text stringlengths 38 1.54M |
|---|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Model zoo."""
import os
import pycls.core.builders as builders
import pycls.core.checkpoint as cp
from pycls.core.config import cfg, load_cfg, reset_cfg
from pycls.core.io import cache_url
# URL prefix for pretrained models
_URL_WEIGHTS = "https://dl.fbaipublicfiles.com/pycls"
# URL prefix for model config files
_URL_CONFIGS = "https://raw.githubusercontent.com/facebookresearch/pycls/main/configs"
# Model weights download cache directory
_DOWNLOAD_CACHE = "/tmp/pycls-download-cache"
# Predefined model config files
_MODEL_ZOO_CONFIGS = {
"RegNetX-200MF": "dds_baselines/regnetx/RegNetX-200MF_dds_8gpu.yaml",
"RegNetX-400MF": "dds_baselines/regnetx/RegNetX-400MF_dds_8gpu.yaml",
"RegNetX-600MF": "dds_baselines/regnetx/RegNetX-600MF_dds_8gpu.yaml",
"RegNetX-800MF": "dds_baselines/regnetx/RegNetX-800MF_dds_8gpu.yaml",
"RegNetX-1.6GF": "dds_baselines/regnetx/RegNetX-1.6GF_dds_8gpu.yaml",
"RegNetX-3.2GF": "dds_baselines/regnetx/RegNetX-3.2GF_dds_8gpu.yaml",
"RegNetX-4.0GF": "dds_baselines/regnetx/RegNetX-4.0GF_dds_8gpu.yaml",
"RegNetX-6.4GF": "dds_baselines/regnetx/RegNetX-6.4GF_dds_8gpu.yaml",
"RegNetX-8.0GF": "dds_baselines/regnetx/RegNetX-8.0GF_dds_8gpu.yaml",
"RegNetX-12GF": "dds_baselines/regnetx/RegNetX-12GF_dds_8gpu.yaml",
"RegNetX-16GF": "dds_baselines/regnetx/RegNetX-16GF_dds_8gpu.yaml",
"RegNetX-32GF": "dds_baselines/regnetx/RegNetX-32GF_dds_8gpu.yaml",
"RegNetY-200MF": "dds_baselines/regnety/RegNetY-200MF_dds_8gpu.yaml",
"RegNetY-400MF": "dds_baselines/regnety/RegNetY-400MF_dds_8gpu.yaml",
"RegNetY-600MF": "dds_baselines/regnety/RegNetY-600MF_dds_8gpu.yaml",
"RegNetY-800MF": "dds_baselines/regnety/RegNetY-800MF_dds_8gpu.yaml",
"RegNetY-1.6GF": "dds_baselines/regnety/RegNetY-1.6GF_dds_8gpu.yaml",
"RegNetY-3.2GF": "dds_baselines/regnety/RegNetY-3.2GF_dds_8gpu.yaml",
"RegNetY-4.0GF": "dds_baselines/regnety/RegNetY-4.0GF_dds_8gpu.yaml",
"RegNetY-6.4GF": "dds_baselines/regnety/RegNetY-6.4GF_dds_8gpu.yaml",
"RegNetY-8.0GF": "dds_baselines/regnety/RegNetY-8.0GF_dds_8gpu.yaml",
"RegNetY-12GF": "dds_baselines/regnety/RegNetY-12GF_dds_8gpu.yaml",
"RegNetY-16GF": "dds_baselines/regnety/RegNetY-16GF_dds_8gpu.yaml",
"RegNetY-32GF": "dds_baselines/regnety/RegNetY-32GF_dds_8gpu.yaml",
"ResNet-50": "dds_baselines/resnet/R-50-1x64d_dds_8gpu.yaml",
"ResNet-101": "dds_baselines/resnet/R-101-1x64d_dds_8gpu.yaml",
"ResNet-152": "dds_baselines/resnet/R-152-1x64d_dds_8gpu.yaml",
"ResNeXt-50": "dds_baselines/resnext/X-50-32x4d_dds_8gpu.yaml",
"ResNeXt-101": "dds_baselines/resnext/X-101-32x4d_dds_8gpu.yaml",
"ResNeXt-152": "dds_baselines/resnext/X-152-32x4d_dds_8gpu.yaml",
"EfficientNet-B0": "dds_baselines/effnet/EN-B0_dds_8gpu.yaml",
"EfficientNet-B1": "dds_baselines/effnet/EN-B1_dds_8gpu.yaml",
"EfficientNet-B2": "dds_baselines/effnet/EN-B2_dds_8gpu.yaml",
"EfficientNet-B3": "dds_baselines/effnet/EN-B3_dds_8gpu.yaml",
"EfficientNet-B4": "dds_baselines/effnet/EN-B4_dds_8gpu.yaml",
"EfficientNet-B5": "dds_baselines/effnet/EN-B5_dds_8gpu.yaml",
}
# Predefined model weight files
_MODEL_ZOO_WEIGHTS = {
"RegNetX-200MF": "dds_baselines/160905981/RegNetX-200MF_dds_8gpu.pyth",
"RegNetX-400MF": "dds_baselines/160905967/RegNetX-400MF_dds_8gpu.pyth",
"RegNetX-600MF": "dds_baselines/160906442/RegNetX-600MF_dds_8gpu.pyth",
"RegNetX-800MF": "dds_baselines/160906036/RegNetX-800MF_dds_8gpu.pyth",
"RegNetX-1.6GF": "dds_baselines/160990626/RegNetX-1.6GF_dds_8gpu.pyth",
"RegNetX-3.2GF": "dds_baselines/160906139/RegNetX-3.2GF_dds_8gpu.pyth",
"RegNetX-4.0GF": "dds_baselines/160906383/RegNetX-4.0GF_dds_8gpu.pyth",
"RegNetX-6.4GF": "dds_baselines/161116590/RegNetX-6.4GF_dds_8gpu.pyth",
"RegNetX-8.0GF": "dds_baselines/161107726/RegNetX-8.0GF_dds_8gpu.pyth",
"RegNetX-12GF": "dds_baselines/160906020/RegNetX-12GF_dds_8gpu.pyth",
"RegNetX-16GF": "dds_baselines/158460855/RegNetX-16GF_dds_8gpu.pyth",
"RegNetX-32GF": "dds_baselines/158188473/RegNetX-32GF_dds_8gpu.pyth",
"RegNetY-200MF": "dds_baselines/176245422/RegNetY-200MF_dds_8gpu.pyth",
"RegNetY-400MF": "dds_baselines/160906449/RegNetY-400MF_dds_8gpu.pyth",
"RegNetY-600MF": "dds_baselines/160981443/RegNetY-600MF_dds_8gpu.pyth",
"RegNetY-800MF": "dds_baselines/160906567/RegNetY-800MF_dds_8gpu.pyth",
"RegNetY-1.6GF": "dds_baselines/160906681/RegNetY-1.6GF_dds_8gpu.pyth",
"RegNetY-3.2GF": "dds_baselines/160906834/RegNetY-3.2GF_dds_8gpu.pyth",
"RegNetY-4.0GF": "dds_baselines/160906838/RegNetY-4.0GF_dds_8gpu.pyth",
"RegNetY-6.4GF": "dds_baselines/160907112/RegNetY-6.4GF_dds_8gpu.pyth",
"RegNetY-8.0GF": "dds_baselines/161160905/RegNetY-8.0GF_dds_8gpu.pyth",
"RegNetY-12GF": "dds_baselines/160907100/RegNetY-12GF_dds_8gpu.pyth",
"RegNetY-16GF": "dds_baselines/161303400/RegNetY-16GF_dds_8gpu.pyth",
"RegNetY-32GF": "dds_baselines/161277763/RegNetY-32GF_dds_8gpu.pyth",
"ResNet-50": "dds_baselines/161235311/R-50-1x64d_dds_8gpu.pyth",
"ResNet-101": "dds_baselines/161167170/R-101-1x64d_dds_8gpu.pyth",
"ResNet-152": "dds_baselines/161167467/R-152-1x64d_dds_8gpu.pyth",
"ResNeXt-50": "dds_baselines/161167411/X-50-32x4d_dds_8gpu.pyth",
"ResNeXt-101": "dds_baselines/161167590/X-101-32x4d_dds_8gpu.pyth",
"ResNeXt-152": "dds_baselines/162471172/X-152-32x4d_dds_8gpu.pyth",
"EfficientNet-B0": "dds_baselines/161305613/EN-B0_dds_8gpu.pyth",
"EfficientNet-B1": "dds_baselines/161304979/EN-B1_dds_8gpu.pyth",
"EfficientNet-B2": "dds_baselines/161305015/EN-B2_dds_8gpu.pyth",
"EfficientNet-B3": "dds_baselines/161305060/EN-B3_dds_8gpu.pyth",
"EfficientNet-B4": "dds_baselines/161305098/EN-B4_dds_8gpu.pyth",
"EfficientNet-B5": "dds_baselines/161305138/EN-B5_dds_8gpu.pyth",
}
def get_model_list():
"""Get list of all valid models in model zoo."""
return _MODEL_ZOO_WEIGHTS.keys()
def get_config_file(name):
"""Get file with model config (downloads if necessary)."""
err_str = "Model {} not found in the model zoo.".format(name)
assert name in _MODEL_ZOO_CONFIGS.keys(), err_str
config_url = os.path.join(_URL_CONFIGS, _MODEL_ZOO_CONFIGS[name])
return cache_url(config_url, _DOWNLOAD_CACHE, _URL_CONFIGS)
def get_weights_file(name):
"""Get file with model weights (downloads if necessary)."""
err_str = "Model {} not found in the model zoo.".format(name)
assert name in _MODEL_ZOO_WEIGHTS.keys(), err_str
weights_url = os.path.join(_URL_WEIGHTS, _MODEL_ZOO_WEIGHTS[name])
return cache_url(weights_url, _DOWNLOAD_CACHE, _URL_WEIGHTS)
def get_model_info(name):
"""Return model info (useful for debugging)."""
config_url = _MODEL_ZOO_CONFIGS[name]
weight_url = _MODEL_ZOO_WEIGHTS[name]
model_id = weight_url.split("/")[1]
config_url_full = os.path.join(_URL_CONFIGS, _MODEL_ZOO_CONFIGS[name])
weight_url_full = os.path.join(_URL_WEIGHTS, _MODEL_ZOO_WEIGHTS[name])
return config_url, weight_url, model_id, config_url_full, weight_url_full
def build_model(name, pretrained=False, cfg_list=()):
"""Constructs a predefined model (note: loads global config as well)."""
# Load the config
reset_cfg()
config_file = get_config_file(name)
load_cfg(config_file)
cfg.merge_from_list(cfg_list)
# Construct model
model = builders.build_model()
# Load pretrained weights
if pretrained:
weights_file = get_weights_file(name)
cp.load_checkpoint(weights_file, model)
return model
def regnetx(name, pretrained=False, cfg_list=()):
"""Constructs a RegNetX model (note: loads global config as well)."""
name = name if "RegNetX-" in name else "RegNetX-" + name
return build_model(name, pretrained, cfg_list)
def regnety(name, pretrained=False, cfg_list=()):
"""Constructs a RegNetY model (note: loads global config as well)."""
name = name if "RegNetY-" in name else "RegNetY-" + name
return build_model(name, pretrained, cfg_list)
def resnet(name, pretrained=False, cfg_list=()):
"""Constructs a ResNet model (note: loads global config as well)."""
name = name if "ResNet-" in name else "ResNet-" + name
return build_model(name, pretrained, cfg_list)
def resnext(name, pretrained=False, cfg_list=()):
"""Constructs a ResNeXt model (note: loads global config as well)."""
name = name if "ResNeXt-" in name else "ResNeXt-" + name
return build_model(name, pretrained, cfg_list)
def effnet(name, pretrained=False, cfg_list=()):
"""Constructs an EfficientNet model (note: loads global config as well)."""
name = name if "EfficientNet-" in name else "EfficientNet-" + name
return build_model(name, pretrained, cfg_list)
|
#!/usr/bin/env python3
"""
Models related to voting history of North Carolina voters.
"""
from django.db import models
from django.contrib.postgres.functions import RandomUUID
from .voter import Voter
class Election(models.Model):
"""
An election in which a voter may have voted.
"""
id = models.UUIDField(
primary_key=True,
default=RandomUUID(),
editable=False,
help_text='Randomly generated universal unique identifier.',
)
date = models.DateField()
name = models.CharField(max_length=100)
class Meta:
unique_together = (("date", "name"),)
ordering = ['-date', 'name']
def __str__(self):
return f'{self.name} ({self.date})'
class ElectionNameTranslator(models.Model):
"""
"""
date = models.DateField()
clean_name = models.CharField(max_length=100)
raw_name = models.CharField(max_length=100)
class VoterHistory(models.Model):
"""
The registration and voting status of a given voter in each election.
"""
voter = models.ForeignKey(
'Voter',
on_delete=models.SET_NULL,
related_name="histories",
null=True,
)
voter_reg_num = models.CharField(max_length=12, blank=True)
election = models.ForeignKey(
'Election',
on_delete=models.CASCADE,
help_text='Refers to the election in which the vote was cast.'
)
voting_method = models.CharField(max_length=32, blank=True)
voted_party_cd = models.CharField(max_length=3, blank=True)
county_id = models.SmallIntegerField(db_index=True)
voted_county_id = models.SmallIntegerField()
|
print("Сніцаренко Анна Сергіївна \nКМ-91 \nЛабораторна робота №1 \nЗнайти max {min (a, b), min (c, d)} \n")
a = float(input("Введіть значення a"))
b = float(input("Введіть значення b"))
c = float(input("Введіть значення c"))
d = float(input("Введіть значення d"))
print(max(min(a, b), min(c, d))) |
from nose.tools import assert_equals, with_setup
def setup():
"""setting up the test"""
# TODO here we add the useful global vars
global global_var
global_var = "is this a global?"
def teardown():
"""teardowning the test"""
# TODO add something to do when exiting
pass
@with_setup(setup, teardown)
def test():
"""testing Authorize.Net"""
# TODO write the actual test
#assert_equals(False, "we're fucked, but the global var we set == {0}".format(global_var))
pass
|
from datetime import datetime
from regobj import HKEY_LOCAL_MACHINE, HKEY_USERS, HKEY_CURRENT_USER
from commit import Commit
KEYS = (HKEY_LOCAL_MACHINE, HKEY_USERS)
KEYS = (HKEY_CURRENT_USER, )
class Snapshot():
def __init__(self, initial=False):
self.name = str(datetime.now())
self.values = {}
if not initial:
for root in KEYS:
self.collect_values(root)
def collect_values(self, root, path=''):
for value in root.values():
value_path = '{path}\\{root_name}\\{name}'.format(path=path, root_name=root.name, name=value.name)
self.values[value_path] = value.type, value.data
if path:
root_path = '{path}\\{root_name}'.format(path=path, root_name=root.name)
else:
root_path = root.name
for subkey in root.subkeys():
self.collect_values(subkey, root_path)
def compare_to(self, other):
self_keys = set(self.values.keys())
other_keys = set(other.values.keys())
deleted_keys = other_keys - self_keys
created_keys = self_keys - other_keys
common_keys = self_keys.intersection(other_keys)
modified_keys = set((key for key in common_keys if self.values[key] != other.values[key]))
created = [(k, self.values[k]) for k in created_keys]
deleted = [(k, other.values[k]) for k in deleted_keys]
modified = [(k, self.values[k], other.values[k]) for k in modified_keys]
return Commit(created, modified, deleted)
if __name__ == '__main__':
before = datetime.now()
s1 = Snapshot()
print(len(s1.values))
print(datetime.now() - before)
p = input('change anything')
s2 = Snapshot()
print(s1.compare_to(s2)) |
from PyQt5 import QtWidgets, QtGui, QtCore
from core.Utils.BasePanel import BasePanel
from view.UiMainPanel import UiMainPanel
import openpyxl
from core.Utils.ExcelPanel import *
import os
from src.datalink.DataLinkPanel import DataLinkPanel
from core.Manage.SignalManage import *
import json
import time
class MainPanel(QtWidgets.QMainWindow, UiMainPanel, BasePanel):
def __init__(self, parent=None):
super().__init__(parent)
self.setupUi(self)
self.eventListener()
self.subPanel = None
self.item_file_dir = None
self.sheet_path = {} # item.xlsx 关联的表文件路径
self.sheet_ok = {}
self.pushButton_2.setDisabled(True)
self.pushButton_4.setDisabled(True)
self.pushButton_5.setDisabled(True)
self.pushButton_6.setDisabled(True)
self.pushButton_7.setDisabled(True)
self.pushButton_12.setDisabled(True)
self.pushButton_13.setDisabled(True)
self.pushButton_14.setDisabled(True)
self.pushButton_15.setDisabled(True)
self.timer = QtCore.QBasicTimer()
def eventListener(self):
Worker().parse_triggered.connect(self.changedata)
self.pushButton.clicked.connect(self.openfile)
self.pushButton_2.clicked.connect(self.loadSheet)
self.pushButton_3.clicked.connect(self.resert)
self.pushButton_15.clicked.connect(lambda: self.loadSheetData(self.tableWidget_4, True))
self.pushButton_4.clicked.connect(lambda: self.loadSheetData(self.tableWidget, False))
self.pushButton_13.clicked.connect(self.saveFile)
self.pushButton_12.clicked.connect(self.exportClientData)
self.pushButton_14.clicked.connect(self.exportServerData)
self.pushButton_5.clicked.connect(lambda: self.checkDataChange(self.sheet_ok))
self.pushButton_6.clicked.connect(self.gailvcheck)
self.pushButton_7.clicked.connect(self.hengxiangshujucheck)
def openfile(self):
print(os.getcwd())
self.item_file_dir = QtWidgets.QFileDialog.getOpenFileName(self, "item.xlsx文件路径",
os.getcwd(),
"Excel files(*.xlsx)")[0]
if self.item_file_dir:
self.label_9.setText(self.item_file_dir)
self.pushButton_2.setEnabled(True)
self.pushButton.setDisabled(True)
def loadSheet(self):
if self.item_file_dir:
wb = openpyxl.load_workbook(self.item_file_dir, data_only=True)
ws = wb.worksheets[0]
self._showleftdata(ws, self.tableWidget_2)
self._showleftdata(ws, self.tableWidget_3)
self.pushButton_4.setEnabled(True)
self.pushButton_15.setEnabled(True)
else:
warring_msg = QtWidgets.QMessageBox.information(self, "警告", "未选择item.xlsx", QtWidgets.QMessageBox.Yes)
def _showleftdata(self, sheet, Qtable):
max_row = len(sheet["A"])
parent_path = os.path.dirname(self.item_file_dir)
Qtable.setRowCount(max_row - 1)
Qtable.setColumnCount(3)
Qtable.setHorizontalHeaderLabels(["数据表", "文件名", "状态"])
Qtable.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows) # 整行选中
Qtable.setEditTriggers(QtWidgets.QTableWidget.NoEditTriggers) # 设置表格不可编辑
Qtable.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection) # 单选模式
self.progressBar.setRange(0, max_row - 2)
for i in range(max_row - 1):
self.progressBar.setValue(i)
datadir = sheet.cell(row=i + 2, column=3).value
sheetname = sheet.cell(row=i + 2, column=1).value
Qtable.setItem(i, 0, QtWidgets.QTableWidgetItem(datadir))
Qtable.setItem(i, 1, QtWidgets.QTableWidgetItem(sheetname))
_datadir = str(datadir)
_sheetname = str(sheetname)
if os.path.exists(parent_path + "/" + _datadir):
if os.path.exists(parent_path + "/" + _datadir + "/" + _sheetname + ".xlsx"):
Qtable.setItem(i, 2, QtWidgets.QTableWidgetItem("OK"))
self.sheet_path[i] = parent_path + "/" + _datadir + "/" + _sheetname + ".xlsx"
self.sheet_ok[i] = _datadir
else:
Qtable.setItem(i, 2, QtWidgets.QTableWidgetItem("文件丢失"))
else:
Qtable.setItem(i, 2, QtWidgets.QTableWidgetItem("文件夹丢失"))
def loadSheetData(self, tableWidget, bool=False):
"""
:param tableWidget: 显示的Qtablewight
:param bool:是否可以编辑,True 可编辑
:return:
"""
selectmode = 0
selectnum = 0
if tableWidget == self.tableWidget:
current_row = self.tableWidget_2.currentRow()
selectmode = 2
selectnum = 2
else:
current_row = self.tableWidget_3.currentRow()
if current_row in self.sheet_path.keys():
self.tableWidget.clear()
wb = openpyxl.load_workbook(self.sheet_path[current_row], data_only=True)
ws = wb.worksheets[0]
if ws.max_row > 20000:
warring_msg = QtWidgets.QMessageBox.information(self, "警告", "数据表行数超过20000行,无法处理",
QtWidgets.QMessageBox.Yes)
return
ExceltoQTableWidgets(ws, tableWidget, bool, selectmode, selectnum, self.progressBar)
self.pushButton_5.setEnabled(True)
self.pushButton_6.setEnabled(True)
self.pushButton_7.setEnabled(True)
self.pushButton_13.setEnabled(True)
self.pushButton_12.setEnabled(True)
self.pushButton_14.setEnabled(True)
else:
warring_msg = QtWidgets.QMessageBox.information(self, "警告", "文件未选中,请检查文件状态", QtWidgets.QMessageBox.Yes)
def checkDataChange(self, sheet_ok):
self.subPanel = DataLinkPanel()
self.subPanel.loaddata(sheet_ok)
self.subPanel.show()
def changedata(self, datalist):
wslist = []
for data in datalist:
wb = openpyxl.load_workbook(self.sheet_path[data], data_only=True)
ws = wb.worksheets[0]
wslist.append(ws)
if ws.max_row > 20000:
warring_msg = QtWidgets.QMessageBox.information(self, "警告", "数据表行数超过20000行,无法处理",
QtWidgets.QMessageBox.Yes)
return
b = {}
for ws in wslist:
for i in range(ws.max_row):
b[str(ws.cell(i + 2, 1).value)] = ws.cell(i + 2, 2).value
maxRow = self.tableWidget.rowCount()
curColumn = self.tableWidget.selectedIndexes()
if len(curColumn) != maxRow: # 判定是否多选或者没有选择
warring_msg = QtWidgets.QMessageBox.information(self, "警告", "只能选择一列进行匹配", QtWidgets.QMessageBox.Yes)
return
curColumn = self.tableWidget.currentColumn()
self.progressBar.setRange(1, maxRow - 1)
for i in range(1, maxRow):
self.progressBar.setValue(i)
ErrorMark = False
item = self.tableWidget.item(i, curColumn)
itemtext = str(item.text())
tmp = itemtext.split("|")
for j in range(len(tmp)):
if tmp[j] in b.keys():
tmp[j] = b[tmp[j]]
else:
ErrorMark = True
itemtext = "|".join(tmp)
item.setText(itemtext)
if ErrorMark:
item.setBackground(QtGui.QColor(255, 0, 0))
def gailvcheck(self):
maxRow = self.tableWidget.rowCount()
curColumn = self.tableWidget.selectedIndexes()
if len(curColumn) != maxRow: # 判定是否多选或者没有选择
warring_msg = QtWidgets.QMessageBox.information(self, "警告", "只能选择一列进行概率计算",
QtWidgets.QMessageBox.Yes)
return
self.progressBar.setRange(1, maxRow - 1)
curColumn = self.tableWidget.currentColumn()
for i in range(1, maxRow):
self.progressBar.setValue(i)
item = self.tableWidget.item(i, curColumn)
itemtext = str(item.text())
tmp = itemtext.split("|")
he = 0
try:
he = sum([int(x) for x in tmp])
if he == 0:
raise ZeroDivisionError
except Exception:
item.setBackground(QtGui.QColor(255, 255, 0))
continue
for j in range(len(tmp)):
tmp[j] = str(round(int(tmp[j]) / he, 2))
itemtext = "|".join(tmp)
item.setText(itemtext)
def hengxiangshujucheck(self):
maxRow = self.tableWidget.rowCount()
curColumn = self.tableWidget.selectedIndexes()
if len(curColumn) <= maxRow: # 判定是否多选或者没有选择
warring_msg = QtWidgets.QMessageBox.information(self, "警告", "只能选择多列进行匹配",
QtWidgets.QMessageBox.Yes)
return
curColumnlist = [] # 多选列
for i in range(len(curColumn) // maxRow):
curColumnlist.append(curColumn[i * maxRow].column())
self.progressBar.setRange(1, maxRow - 1)
for i in range(1, maxRow):
self.progressBar.setValue(i)
check_box = []
for j in curColumnlist:
item = self.tableWidget.item(i, j)
itemtext = str(item.text())
tmp = itemtext.split("|")
check_box.append(len(tmp))
if len(set(check_box)) != 1: # 等于1说明数据量相等
self.tableWidget.item(i, 0).setBackground(QtGui.QColor(0, 255, 0))
else:
self.tableWidget.item(i, 0).setBackground(QtGui.QColor(255, 255, 255))
def saveFile(self):
"""打包成一个文件"""
_path = os.getcwd() + '\\' + 'client'
data = {}
for fileName in os.listdir(_path):
file = fileName.replace('.json', '')
if file == 'AllClientData':
continue
filePath = _path + '\\' + fileName
with open(filePath, 'r', encoding='utf8') as f:
data[file] = json.load(f)
exportPath = _path + '\\' + 'AllClientData.json'
with open(exportPath, 'w', encoding='utf8') as f:
json.dump(data, f, ensure_ascii=False)
warring_msg = QtWidgets.QMessageBox.information(self, "提示", "{}个客户端表成功".format(len(data.keys())),
QtWidgets.QMessageBox.Yes)
def _exportData(self, scene):
current_row = self.tableWidget_3.currentRow()
# 获取文件名称
xlsxPath = self.sheet_path[current_row]
filename = xlsxPath.split('/')[-1].split('.')[0]
if current_row in self.sheet_path.keys():
wb = openpyxl.load_workbook(xlsxPath)
ws = wb.worksheets[0]
ws_info = wb.worksheets[1]
if ws_info.title != '属性':
warring_msg = QtWidgets.QMessageBox.information(self, "警告", "第二张表的title不是 属性,请检查",
QtWidgets.QMessageBox.Yes)
return
time_2 = time.time()
ws_info_data = {}
for rx in range(2, ws_info.max_row + 1):
w1 = ws_info.cell(row=rx, column=1).value
w2 = ws_info.cell(row=rx, column=2).value.replace(' ', '').upper()
w3 = ws_info.cell(row=rx, column=3).value
w4 = ws_info.cell(row=rx, column=4).value
w5 = ws_info.cell(row=rx, column=5).value
w6 = ws_info.cell(row=rx, column=6).value
if w1 is not None:
tmp_dict = {'param': w2, 'type': w3, 'note': w4, 'info': w5, 'scene': w6}
ws_info_data[w1] = tmp_dict
# 检查表格字段与属性一一对应
for i in range(1, ws.max_column + 1):
tmp = ws.cell(1, i).value
if tmp and (tmp not in ws_info_data.keys()):
warring_msg = QtWidgets.QMessageBox.information(self, "警告", "配置表错误,字段与变量没有一一对应",
QtWidgets.QMessageBox.Yes)
return
# 输出ws_data
ws_data = {}
self.progressBar.setRange(1, ws.max_row - 1)
for i in range(2, ws.max_row + 1):
self.progressBar.setValue(i - 1)
tmp_dict = {}
# 空行跳过
tmp_row = ws.cell(i, 1).value
if tmp_row is None:
break
# 处理数据
for j in range(2, ws.max_column + 1):
tmp = ws.cell(1, j).value
# 空列数据跳出循环
if not tmp:
break
# 不需要的列就跳过
if ws_info_data[tmp]['scene'] not in scene:
continue
# 转化数据类型
_cellData = ws.cell(row=i, column=j).value
try:
dataType = ws_info_data[tmp]['type']
if dataType == 'string':
cellData = str(_cellData)
elif dataType == 'uint':
cellData = int(_cellData)
elif dataType == 'string[|]':
cellData = str(_cellData).split('|')
elif dataType == 'uint[|]':
cellData = [int(x) for x in str(_cellData).split('|')]
else:
warring_msg = QtWidgets.QMessageBox.information(self, "警告",
"属性表中的变量类型只能是string|uint|sting[|]|uint[|]",
QtWidgets.QMessageBox.Yes)
return
except:
warring_msg = QtWidgets.QMessageBox.information(self, "警告",
"属性表中的变量类型错误或者有效数据区域存在空值",
QtWidgets.QMessageBox.Yes)
return
tmp_dict[ws_info_data[tmp]['param']] = cellData
# 导出数据
ws_data[str(ws.cell(i, 1).value).upper()] = tmp_dict
# 把不需要的字段删除掉
_delete_key = []
for i in ws_info_data.keys():
if ws_info_data[i]['scene'] not in scene:
_delete_key.append(i)
for i in _delete_key:
del ws_info_data[i]
return ws_data, filename, ws_info_data
else:
warring_msg = QtWidgets.QMessageBox.information(self, "警告", "文件未选中,请检查文件状态", QtWidgets.QMessageBox.Yes)
return
def exportClientData(self):
param = ('client', 'all')
data = self._exportData(param)
if data is None:
return
ws_data, filename, ws_info_data = data
# 字段分组
param_group = [[], [], []]
param_count = []
param_weight = []
for i in ws_info_data.keys():
_param = ws_info_data[i]['param']
if '_ID' in _param:
param_group[0].append(_param.replace('_ID', ''))
elif '_COUNT' in _param:
param_group[1].append(_param.replace('_COUNT', ''))
elif '_WEIGHT' in _param:
param_group[2].append(_param.replace('_WEIGHT', ''))
else:
pass
for i in param_group[0]:
if i in param_group[1]:
param_count.append((i + '_ID', i + '_COUNT'))
if i in param_group[2]:
param_weight.append((i + '_ID', i + '_WEIGHT'))
for i in ws_data.keys():
for j in param_count:
_id = ws_data[i][j[0]]
_count = ws_data[i][j[1]]
_data = {}
for k in range(len(_id)):
_data[_id[k]] = _count[k]
ws_data[i][j[1]] = _data
for j in param_weight:
_id = ws_data[i][j[0]]
_weight = ws_data[i][j[1]]
_data = {}
for k in range(len(_id)):
_data[_id[k]] = _weight[k]
ws_data[i][j[1]] = _data
for j in param_count:
del ws_data[i][j[0]]
for j in param_weight:
tmp = j[0]
if tmp in ws_data[i].keys():
del ws_data[i][tmp]
# 输出文件
_path = os.getcwd() + '\\' + 'client'
if not os.path.exists(_path):
os.makedirs(_path)
filepath = _path + '\\' + filename + '.json'
with open(filepath, 'w', encoding="utf-8") as f:
# f.write('DATA=')
json.dump(ws_data, f, ensure_ascii=False)
warring_msg = QtWidgets.QMessageBox.information(self, "提示", "导出客户端表成功", QtWidgets.QMessageBox.Yes)
def exportServerData(self):
param = ('server', 'all')
data = self._exportData(param)
if data is None:
return
ws_data, filename, ws_info_data = data
# 字段分组
param_group = [[], [], []]
param_count = []
param_weight = []
for i in ws_info_data.keys():
_param = ws_info_data[i]['param']
if '_ID' in _param:
param_group[0].append(_param.replace('_ID', ''))
elif '_COUNT' in _param:
param_group[1].append(_param.replace('_COUNT', ''))
elif '_WEIGHT' in _param:
param_group[2].append(_param.replace('_WEIGHT', ''))
else:
pass
for i in param_group[0]:
if i in param_group[1]:
param_count.append((i + '_ID', i + '_COUNT'))
if i in param_group[2]:
param_weight.append((i + '_ID', i + '_WEIGHT'))
for i in ws_data.keys():
for j in param_count:
_id = ws_data[i][j[0]]
_count = ws_data[i][j[1]]
_data = {}
for k in range(len(_id)):
_data[_id[k]] = _count[k]
ws_data[i][j[1]] = _data
for j in param_weight:
_id = ws_data[i][j[0]]
_weight = ws_data[i][j[1]]
_data = {}
for k in range(len(_id)):
_data[_id[k]] = _weight[k]
ws_data[i][j[1]] = _data
for j in param_count:
del ws_data[i][j[0]]
for j in param_weight:
tmp = j[0]
if tmp in ws_data[i].keys():
del ws_data[i][tmp]
_path = os.getcwd() + '\\' + 'server'
if not os.path.exists(_path):
os.makedirs(_path)
filepath = _path + '\\' + filename + '.py'
with open(filepath, 'w', encoding="utf-8") as f:
f.write('DATA=')
json.dump(ws_data, f, ensure_ascii=False)
warring_msg = QtWidgets.QMessageBox.information(self, "提示", "导出服务器表成功", QtWidgets.QMessageBox.Yes)
def resert(self):
self.item_file_dir = None
self.pushButton.setEnabled(True)
self.label_9.setText("请输入$item.xlsx文件的路径")
self.tableWidget.clear()
self.tableWidget_2.clear()
self.tableWidget_3.clear()
self.tableWidget_4.clear()
self.sheet_path = {}
self.pushButton_2.setDisabled(True)
self.pushButton_4.setDisabled(True)
self.pushButton_6.setDisabled(True)
self.pushButton_7.setDisabled(True)
self.pushButton_12.setDisabled(True)
self.pushButton_13.setDisabled(True)
self.pushButton_14.setDisabled(True)
self.pushButton_15.setDisabled(True)
|
#PRINTING NUMBER IN SPIRAL PATTERN IN SQUARE
N=int(input("Enter the user Input: ")) #Taking user input
chakra=[[0 for i in range(N)] for i in range(N)]
co=1
print("Square - Sprial Pattern")
for i in range(N//2):
row,col=i,i
end_col=N-i-1
while(col<end_col): #printing first row
chakra[row][col]=co
co+=1
col+=1
end_row=N-i-1
while(row<end_row): #printing the last column
chakra[row][col]=co
co+=1
row+=1
end_col=i
while(col>end_col): #printing the bottom row
chakra[row][col]=co
co+=1
col-=1
end_row=i
while(row>end_row): #printing the first column
chakra[row][col]=co
co+=1
row-=1
if N%2==1:
chakra[N//2][N//2]=N*N #printing middle most value
for i in range(N):
for j in range(N):
print(str(chakra[i][j]),end='\t')
print()
|
import string
import os
import re
import sys
class Ethernet :
def __init__(self, dest, src, type_protocole) :
self.dest = dest
self.src = src
self.type_protocole = type_protocole
def affichage_dest(self) :
dest = ':'.join(self.dest)
print ( "\tDestination (Adresse MAC) : ", dest )
return None
def affichage_src(self) :
src = ':'.join(self.src)
print ( "\tSource (Adresse MAC) : ", src)
return None
@staticmethod
def version_type ( t ) :
if t == "0800" :
print( " (IPv4)" )
elif t == "0806" :
print ( " (ARP)" )
elif t == "8035" :
print ( " (RARP)")
elif t == "8090" :
print( " (Appletalk)")
elif t == "0805" :
print( " (X.25 niveau 3)")
elif t == "86dd" or t == "86DD" :
print( " (IPv6)")
else :
print(" Non reconnu")
return None
def affichage_type_protocole (self) :
t = ''.join(self.type_protocole)
t1 = "0x"+t
print ( "\tType : " , t1, end = '')
self.version_type ( t )
return None
def affichage(self) :
print ("\nEthernet II")
self.affichage_dest()
self.affichage_src()
self.affichage_type_protocole()
def affichage_bouton(self, fichier ):
t = "*\n0 14\n"
t = t + "0 6\n"
t = t + "6 12\n"
t = t + "12 14\n"
fichier.write(t)
class IP :
def __init__ (self, version, ihl, tos, total_length, ident, flags, fragment_offset, ttl, protocol, header_cks ,src, dest, options) :
self.version = version
self.ihl = ihl
self.tos = tos
self.total_length = total_length
self.ident = ident
self.flags = flags
self.fragment_offset = fragment_offset
self.ttl = ttl
self.protocol = protocol
self.header_cks = header_cks
self.src = src
self.dest = dest
self.options = options
self.bool_checksum = None
def set_options (self, trame):
self.options = trame
return None
def affichage_version (self) :
print("\tVersion : " + self.version)
return None
def affichage_ihl (self) :
print("\tHeader Length : ", 4 * int(self.ihl,16) , " bytes (", int(self.ihl,16) ,")",sep='')
return None
def taille_options_IP (self) :
if int(self.ihl,16) == 5 :
return 0
return (int(self.ihl,16) - 5 ) * 4
def affichage_tos (self) :
print("\tDifferentiated Services Field : " + "0x" + self.tos[0])
return None
def affichage_total_length (self) :
t = ''.join(self.total_length)
print("\tTotal Length : " , int(t,16) )
return None
def affichage_ident (self) :
t = ''.join(self.ident)
print("\tIdentification : " + "0x" + t + " (", int(t,16), ")", sep='')
return None
def affichage_flags (self) :
t = ''.join(self.fragment_offset)
print("\tFlags : 0x"+t)
b = bin(int(self.flags,16))[2:].zfill(8)
if b[0] != '0' :
print("ERROR! Reserved bit of flag not 0!!")
return None
print ("\t\t 0... .... .... .... = Reserved bit : Not set")
c = "Set" if b[1] == '1' else "Not Set"
print ( "\t\t ." , b[1] , ".. .... .... .... = Don't Fragment : " + c, sep = '')
c = "Set" if b[2] == '1' else "Not Set"
print ( "\t\t .." , b[2], ". .... .... .... = More Fragment : " + c, sep = '')
return None
def affichage_fragment_offset (self) :
t = ''.join(self.fragment_offset)
b = bin(int( t ,16))[2:].zfill(8)
if b[1] == '0' and any(c != '0' for c in b[3:]) :
print("ERROR! Fragment detected when not allowed!")
return None
print("\tFragment offset : ", int(b[3:],2) )
return None
def affichage_ttl (self) :
print("\tTime to live : ", int(self.ttl,16 ) )
return None
def affichage_protocol (self) :
t = self.protocol
text = "\tProtocol : "
if t == "06" :
print(text + " TCP (6)")
elif t == "11":
print(text + " UDP (17)")
elif t == "01":
print(text + " ICMP (1)")
elif t == "02":
print(text + " IGMP (2)")
elif t == "08":
print(text + " EGP (8)")
elif t == "09":
print(text + " IGP (9)")
elif t == "24":
print(text + " XTP (36)")
elif t == "2E" or t == "2e":
print(text + " RSVP (47)")
else :
print("\tProtocole non reconnu")
return None
def verification_checksum (self, trame ) :
max = len(trame)
est_pair = True
if max%2 == 1 :
est_pair = False
max -= 1
cpt = 0
for i in range( 0, max, 2) :
t = ''.join(trame[i:i+2])
cpt = cpt + int(t,16)
if not est_pair :
cpt = cpt + int(trame[max]+"00",16)
b = bin(cpt)[2:].zfill(16)
lsb_16 = len(b) - 16
res = int(b[lsb_16:],2) + int(b[:lsb_16],2)
if bin(res)[2:].zfill(16) == "1111111111111111" :
self.bool_checksum = True
return None
self.bool_checksum = False
def affichage_header_cks (self) :
t = ''.join(self.header_cks)
ok = self.bool_checksum
if ok :
s = "Verified"
elif ok == False :
s = "Bad checksum! ERROR"
else :
s = "Unverified"
print("\tHeader checksum : " + "0x" + t + " " + s )
return None
def affichage_src (self) :
dec = [str(int (x, 16)) for x in self.src]
src = '.'.join(dec)
print ( "\tSource : ", src)
return None
def affichage_dest (self) :
dec = [str(int (x, 16)) for x in self.dest]
dest = '.'.join(dec)
print ( "\tDestination: ", dest)
return None
def version_options(self, trame):
octets = trame[0]
if octets == "00" :
print( "\t\tOption IP - End of Options List (EOL) ")
return 0
elif octets == "01" :
print( "\t\tOption IP - No-Operation (NOP) (1 byte)")
return 0
elif octets == "07" :
taille = int(trame[1],16)
pointeur = int(trame[2],16)
print( "\t\tOption IP - Record route (RR) ")
print("\t\t\tType : 7")
print("\t\t\tLength : ", taille)
print("\t\t\tPointer : ", pointeur)
for i in range( 3 , taille, 4 ) :
t1 = int( trame[i] ,16)
t2 = int( trame[i+1] ,16)
t3 = int( trame[i+2] ,16)
t4 = int( trame[i+3] ,16)
print( "\t\t\tRecorded Route : ", t1,".",t2,".",t3,".",t4,sep='')
return taille - 2
elif octets == "83" :
taille = int(trame[1],16)
pointeur = int(trame[2],16)
print( "\t\tOption IP - Loose Source Record Route (LSRR)")
print("\t\t\tType : 131")
print("\t\t\tLength : ", taille)
print("\t\t\tPointer : ", pointeur)
for i in range( 3 , taille, 4 ) :
t1 = int( trame[i] ,16)
t2 = int( trame[i+1] ,16)
t3 = int( trame[i+2] ,16)
t4 = int( trame[i+3] ,16)
print( "\t\t\tRecorded Route : ", t1,".",t2,".",t3,".",t4,sep='')
return taille - 2
elif octets == "89" :
taille = int(trame[1],16)
pointeur = int(trame[2],16)
print( "\t\tOption IP - Strict Source Record Route (SSRR) ")
print("\t\t\tType : 139")
print("\t\t\tLength : ", taille)
print("\t\t\tPointer : ", pointeur)
for i in range( 3 , taille, 4 ) :
t1 = int( trame[i] ,16)
t2 = int( trame[i+1] ,16)
t3 = int( trame[i+2] ,16)
t4 = int( trame[i+3] ,16)
print( "\t\t\tRecorded Route : ", t1,".",t2,".",t3,".",t4,sep='')
return taille - 2
elif octets == "44" :
print( "\t\tOption IP - TimeStamp (TS) ")
return 0
else :
return 0
def analyse_option(self) :
trame = self.options
taille = len( trame )
i = 0
while i != taille :
length_option = self.version_options(trame[i:])
i = i + length_option
i += 1
return None
def affichage_options (self) :
taille = self.taille_options_IP()
if taille == 0 :
print("\tPas d'options")
return None
bytes_options = taille
print("\tOptions: (", bytes_options," bytes)", sep='')
self.analyse_option()
return None
def affichage (self) :
print( "\nInternet Protocol Version 4")
self.affichage_version ()
self.affichage_ihl ()
self.affichage_tos ()
self.affichage_total_length ()
self.affichage_ident ()
self.affichage_flags ()
self.affichage_fragment_offset ()
self.affichage_ttl ()
self.affichage_protocol ()
self.affichage_header_cks ()
self.affichage_src ()
self.affichage_dest ()
self.affichage_options ()
def affichage_bouton(self, fichier ):
taille_IP = 14 + 4 * int(self.ihl,16)
t = "14 "+ str( taille_IP )+"\n"
t = t + "14 15\n"
t = t + "14 15\n"
t = t + "15 16\n"
t = t + "16 18\n"
t = t + "18 20\n"
t = t + "20 21\n"
t = t + "20 22\n"
t = t + "22 23\n"
t = t + "23 24\n"
t = t + "24 26\n"
t = t + "26 30\n"
t = t + "30 34\n"
t = t + "34 " + str ( taille_IP) + "\n"
fichier.write(t)
return None
class TCP :
def __init__ (self , src_port, dest_port, sequence, acknowledgment, offset, flags, window, checksum, urgent_pointer, options) :
self.src_port = src_port
self.dest_port = dest_port
self.sequence = sequence
self.acknowledgment = acknowledgment
self.offset = offset
self.flags = flags
self.window = window
self.checksum = checksum
self.urgent_pointer = urgent_pointer
self.options = options
self.bool_checksum = None
def set_options (self, trame):
self.options = trame
return None
def affichage_src_port (self) :
t = ''.join(self.src_port)
dec = int(t, 16)
print ( "\tSource Port : ", dec)
return None
def affichage_dest_port (self) :
t = ''.join(self.dest_port)
dec = int(t, 16)
print ("\tDestination Port : ", dec)
return None
def affichage_sequence (self) :
t = ''.join(self.sequence)
dec = int(t, 16)
print ("\tSequence number : ", dec)
return None
def affichage_ackowledgment (self) :
t = ''.join(self.acknowledgment)
dec = int(t, 16)
print ("\tAcknowledgment number : ", dec)
return None
def affichage_offset (self) :
b = bin(int(self.offset,16))[2:].zfill(8)
b = b[:4]
dec = int(b, 2)
print ("\t" + b + " .... = Header length : ", dec * 4 , " bytes (", dec ,")")
return None
def taille_options_TCP (self) :
b = bin(int(self.offset,16))[2:].zfill(8)
b = b[:4]
dec = int(b, 2)
if dec == 5 :
return 0
return (dec - 5 ) * 4
def affichage_flags (self) :
print("\t Flags :")
t = ''.join(self.flags)
b = bin(int(t,16))[2:].zfill(16)
b = b[4:]
if any (x != '0' for x in b[:6] ):
print("ERROR! Reserved bit not 0!!")
return None
print ("\t\t0000 00.. .... = Reserved bit : Not set")
c = "Set" if b[6] == '1' else "Not Set"
print ( "\t\t.... .." , b[6], ". .... = Urgent : " + c , sep = '')
c = "Set" if b[7] == '1' else "Not Set"
print ( "\t\t.... ..." , b[7], " .... = Acknowledgement : " + c, sep = '')
c = "Set" if b[8] == '1' else "Not Set"
print ( "\t\t.... .... " , b[8], "... = Push : " + c, sep = '')
c = "Set" if b[9] == '1' else "Not Set"
print ( "\t\t.... .... ." , b[9], ".. = Reset : " + c, sep = '')
c = "Set" if b[10] == '1' else "Not Set"
print ( "\t\t.... .... .." , b[10], ". = Syn : " + c, sep = '')
c = "Set" if b[11] == '1' else "Not Set"
print ( "\t\t.... .... ..." , b[11], " = Fin : " + c, sep = '')
return None
def affichage_window (self) :
t = ''.join(self.window)
dec = int(t, 16)
print ("\tWindow size value : ", dec)
return None
def verification_checksum (self, trame ) :
max = len(trame)
est_pair = True
if max%2 == 1 :
est_pair = False
max -= 1
cpt = 0
for i in range( 0, max, 2) :
t = ''.join(trame[i:i+2])
cpt = cpt + int(t,16)
if not est_pair :
cpt = cpt + int(trame[max]+"00",16)
b = bin(cpt)[2:].zfill(16)
lsb_16 = len(b) - 16
res = int(b[lsb_16:],2) + int(b[:lsb_16],2)
if bin(res)[2:].zfill(16) == "1111111111111111" :
self.bool_checksum = True
return None
self.bool_checksum = False
def affichage_checksum (self) :
t = ''.join(self.checksum)
ok = self.bool_checksum
if ok :
s = "Verified"
elif ok == False :
s = "Bad checksum! ERROR"
else :
s = "Unverified"
print("\tChecksum : " + "0x" + t + " " + s )
return None
def affichage_urgent_pointer (self) :
t = ''.join(self.urgent_pointer)
c = ''.join(self.flags)
b = bin(int(c,16))[2:].zfill(16)
b = b[4:]
if b[6] == '0' and any(x == '1' for x in t) :
print ("ERROR! Urgent pointer in use with URG Flag Not Set")
print("\tUrgent Pointer : " + "0x" + t)
return None
def version_options(self, trame):
octets = trame[0]
if octets == "00" :
print( "\t\tTCP Option - End of Options List (EOL) ")
return 0
elif octets == "01" :
print( "\t\tTCP Option - No-Operation (NOP) (1 byte)")
return 0
elif octets == "02" :
t = ''.join(trame[2:4])
print("\t\tTCP Option - Maximun segment size", int(t,16), "bytes" )
print("\t\t\tKind : Maximum segment size (2)")
print("\t\t\tLength : 4")
print("\t\t\tMSS Value : ", int(t,16) )
return 3
elif octets == "03" :
print("\t\tTCP Option - Window scale", int(trame[2],16))
print("\t\t\tKind : Window Scale (3)")
print("\t\t\tLength : 3")
print("\t\t\tShift count : ",int(trame[2],16) )
return 2
elif octets == "04" :
print("\t\tTCP Option - SACK permitted")
print("\t\t\tKind : SACK permitted (4)")
print("\t\t\tLength : 2")
return 1
elif octets == "08" :
t2 = ''.join(trame[2:6])
t3 = ''.join(trame[6:10])
print("\t\tTCP Option - Timestamp :")
print("\t\t\tKind : Time Stamp Option (8)", sep='')
print("\t\t\tLength : 10")
print("\t\t\tTimestamp value : ", int (t2,16))
print("\t\t\tTimestamp echo reply : ", int (t3,16))
return 9
elif octets == "09" :
print("\t\tPartial Order Connection Permitted")
return 3
else :
print("\t\tUnknown Option")
return 0
def analyse_option(self) :
trame = self.options
taille = len( trame )
i = 0
while i != taille :
length_option = self.version_options(trame[i:])
i = i + length_option
i += 1
return None
def affichage_options (self) :
taille = self.taille_options_TCP()
if taille == 0 :
print("\t Pas d'options")
return None
bytes_options = taille
print("\t Options: (", bytes_options," bytes)", sep='')
self.analyse_option()
return None
def affichage (self) :
print ("\nTransmission Control Protocol ")
self.affichage_src_port ()
self.affichage_dest_port ()
self.affichage_sequence ()
self.affichage_ackowledgment ()
self.affichage_offset ()
self.affichage_flags ()
self.affichage_window ()
self.affichage_checksum ()
self.affichage_urgent_pointer ()
self.affichage_options ()
return None
def affichage_bouton(self, fichier, debut ):
b = bin(int(self.offset,16))[2:].zfill(8)
b= b[:4]
dec = int(b, 2)
taille_TCP = 4 * dec
str_debut = str(debut)
str_fin = str(debut + taille_TCP)
t = str_debut + " " + str_fin +"\n"
t = t + str(debut) + " " + str(debut+2) +"\n"
t = t + str(debut+2) + " " + str(debut+4) +"\n"
t = t + str(debut+4) + " " + str(debut+8) +"\n"
t = t + str(debut+8) + " " + str(debut+12) +"\n"
t = t + str(debut+12) + " " + str(debut+13) +"\n"
t = t + str(debut+12) + " " + str(debut+14) +"\n"
t = t + str(debut+14) + " " + str(debut+16) +"\n"
t = t + str(debut+16) + " " + str(debut+18) +"\n"
t = t + str(debut+18) + " " + str(debut+20) +"\n"
t = t + str(debut+20) + " " + str_fin +"\n"
fichier.write(t)
return None
class HTTP :
def __init__ ( self, data ) :
self.data = data
def affichage_data (self) :
t = "\t"
tmp = 0
tmp1 = 0
for x in self.data :
if ( 13 == int(x,16) ) :
if tmp == 10 :
tmp1 = 10
elif ( 10 == int(x,16) ) :
if tmp1 == 10 :
break
else :
tmp = 10
else :
tmp = 0
tmp1 = 0
t = t + chr( int(x,16) )
if tmp == 10 :
t = t + "\t"
try:
t.encode('ASCII')
except UnicodeEncodeError:
print ("\tIt was not a ascii-encoded unicode string")
else:
print ( t )
return None
def affichage( self) :
print( "Hypertext Transfer Protocol")
self.affichage_data()
return None
def affichage_bouton(self, fichier, debut) :
t = str(debut) + " " + str(debut + len(self.data))+"\n"
fichier.write(t)
return None
def affichage_liste_ligne( L ) :
""" list[octects] -> None
Affiche une ligne de la trame recupere """
print(L)
return None
def affichage_plusieurs_listes_lignes( L ) :
""" list[list[octects]] -> None
Affiche plusieurs lignes de la trame """
for i in L :
affichage_liste_ligne(i)
return None
def affichage_trame (trame) :
""" list[octects] -> None
Affiche la trame """
print(trame)
return None
def affichage_plusieurs_trames( liste_trame ) :
"""liste(tuple(list[octects],bool,int,int)) -> None
Affiche plusieurs trames"""
print ("\n ********************** AFFICHAGE DES TRAMES **************************** \n")
for L in liste_trame :
trame, ok, ligne_erreur,taille = L
print ( L )
print("\n")
return None
def creation_listes_lignes( nom_fichier ):
""" str -> list ( list ( octects ) )
Retourne une liste de toutes les lignes du fichier en supprimant tous les caractères
non hexadecimaux, et y compris les caracteres hexadecimaux se situant apres ces caracteres
non hexadecimaux"""
f = open(nom_fichier)
# lines_list : list [str]
lines_list = list(f.read().splitlines()) # split le fichier en une liste de lignes
f.close()
# _element_lignes : list [ list (str) ]
element_lignes = []
# split les lignes avec des espaces
element_lignes = [[ n for n in k.split(' ') if n != '' ] for k in lines_list]
# nombre de lignes totales
ligne_len = len( element_lignes)
# i : int
i = 0
# while il y a des lignes a parcourir faire :
while i < ligne_len :
# k : int <- nombre d'element de la ligne i
k = len(element_lignes[i])
# parcours de la ligne i
for j in range(k):
# if caractere ligne[i][j] est un hexa then ok = True
ok = all (c in string.hexdigits for c in element_lignes[i][j])
# else suppression de tous les caracteres apres le premier element identifié comme non hexadecimal
if not ok :
del element_lignes[i][j:]
break
# if il y a une ligne sans caractere hexa ou seulement un seul, then supprimer cette ligne
if element_lignes[i] == [] or len(element_lignes[i]) == 1 :
element_lignes.pop(i)
ligne_len -= 1
else:
i += 1
# return une liste des lignes du fichier apres avoir filtre tous les caracteres non hexa
return element_lignes
def make_list_offset_decimal( trame ) :
""" list[list[octects]] -> list[int]
Retourne une liste de tous les offset convertis en decimal
Si les offsets ne sont pas ranges dans l'ordre croissant then return []
la trame est donc erronee"""
# liste_offset : list [int]
liste_offset = [0]
# offset : int <- offset courant
offset = 0
# n : int
n = 0
# parcours tous les offsets de la trame
for L in trame [1:] :
n = int( L[0], 16)
# if n est superieur au precedent offset
if offset < n :
liste_offset.append(n)
offset = n
# else la trame est erronee
else :
return []
# return la liste d'offset en decimal
return liste_offset
def decoupage_plusieurs_trames ( L ):
""" list[lignes] -> list[list[lignes]]
Retourne une liste de differente trame sectionne grace a l'offset 0x0"""
# liste_trame : list[list[lignes]]
liste_trame = []
# trame : list[lignes]
trame = []
# lignes_totale : int
nombre_lignes_totale = len(L)
# cpt_lignes : int
cpt_lignes = 0
# n : int <- offset en decimal
n = int(L[0][0], 16)
# while on trouve un offset a 0
while n != 0 :
# on compte le nombre de lignes parcourues
cpt_lignes += 1
n = int(L[cpt][0], 16)
# Ajout de la premiere ligne de la premier trame
trame.append(L[cpt_lignes])
# parcours de toutes les lignes suivantes
for i in range (cpt_lignes + 1,nombre_lignes_totale) :
# n recupere l'offset de la ligne i
n = int(L[i][0], 16)
# if offset == 0 then on passe a une nouvelle trame
if (n == 0) :
liste_trame.append(trame)
trame = []
trame.append(L[i])
# ajout de la derniere trame
liste_trame.append(trame)
# return la liste des listes de chaque trame
return liste_trame
def creation_trame ( L , liste_offset ) :
""" list[list[octects]] ,list[int] -> tuple(list[octects], bool, int ,int)
Creation de la trame a partir des lignes de L et la liste des offset"""
# si la L est errone
if liste_offset ==[] :
return [],False,-1,0
# trame : tuple(list[octects], bool, int ,int) # Trame finale
trame = []
# add : list[octects]
add = []
# len_offset : int
len_offset = len(liste_offset)-1
# diff : list [int] creation de la difference entre tous les offsets pour savoir le nombre d'octects par ligne
diff = [ liste_offset[x+1]- liste_offset[x] for x in range(len_offset)]
# k : int Nombre de ligne de L - 1
k = len( L ) - 1
for i in range (k) :
# add recupere les octects necessaires pour completer la ligne en fonction de la valeur de l'offset
add = L[i][1:diff[i]+1]
trame = trame + add
# Si il manque des octects
if (len(add) != diff[i] ) :
# return error avec la ligne de l'erreur
taille = len(trame)
return (trame, False, i+1, taille)
# rajout des derniers octets de la trame
trame = trame + L[k][1:]
# taille : int <- renvoie le nombre d'octets de la trame
taille = len(trame)
return (trame, True, -1, taille)
def parser( nom_fichier ) :
""" str -> list [ tuple(list[octects], bool, int ,int) ]
Renvoie une liste de trames a partir du fichier en parametre """
# liste_trame : list[tuple()]
liste_trame = []
# liste_lignes_fichiers : list[lignes]
liste_lignes_fichiers = creation_listes_lignes ( nom_fichier )
# res : list[list[lignes]] <- split les trames du fichiers
res = decoupage_plusieurs_trames( liste_lignes_fichiers )
for L in res :
# Recuperation des offsets de chaque ligne et suppression si lignes ne commencant pas par un offset
liste_offset = make_list_offset_decimal(L)
# Creation de la trame
liste_trame.append( creation_trame( L , liste_offset) )
return liste_trame
def ecrire_trame_gui( liste_trames ) :
""" tuple(list[octects], bool, int ,int) -> None
Ecris dans le fichier trame_gui.txt les trames pour l'interface graphique"""
# text : str
text = ""
# Parcours de toutes les trames
for tuple_trame in liste_trames :
# offset : int <- Reset a chaque tour de boucles
offset = 0
trame, ok, ligne_erreur, taille = tuple_trame
# if la trame est errone
if trame == [] :
text = text + "!!!!\n*\n"
continue
# lignes : int
lignes = taille//16
# reste: int
reste = taille % 16
# Parcours des lignes de chaques trames pour l'affichage dans l'interface graphique
for i in range ( 0, lignes, 1) :
str_offset = (hex(offset))[2:].zfill(4)
text = text + str_offset + " " + ' '.join(trame[i*16:i*16+16]) + "\n"
offset = offset + 16
if trame[lignes*16:] == [] :
text = text + "*\n"
continue
str_offset = (hex(offset))[2:].zfill(4)
text = text + str_offset + " " + ' '.join(trame[lignes*16:])
text = text + "\n*\n"
# ouverture / ecriture / fermeture du fichiers
file =os.path.dirname( os.path.abspath(__file__))
f = open(file+"/.trames_gui.txt", "w+")
f.write(text)
f.close()
return None
def est_IPv4 ( trame ) :
""" Si la trame utlise le protocole IPV4 return True else return False"""
if "0800" == ''.join(trame[12:14]) :
return True
return False
def est_TCP ( trame ) :
""" Si la trame utlise le protocole TCP return True else return False"""
if "06" == trame[23] :
return True
return False
def est_HTTP ( trame, port_src, port_dest ) :
""" Si la trame utlise le protocole HTTP return True else return False"""
int_port_src = int( ''.join(port_src),16)
int_port_dest = int( ''.join(port_dest),16)
if 80 == int_port_dest or 80 == int_port_src :
return True
return False
def nombre_octects_exacts ( taille, length_IP ):
""" Si le nombre d'octets de la trame == Total legth annoncee dans IP
then return True else return False"""
if taille == length_IP + 14 :
return True
return False
def analyses_toute_les_trames ( Liste_trames , fichier ) :
"""list [ tuple(list[octects], bool, int ,int) ] -> None
Analyse toutes les trames recuperees """
length = len(Liste_trames) # nombre de trames recuperees
# Parcours de toutes les trames recuperees
for i in range( length ) :
Trame, ok, ligne_erreur, taille = Liste_trames[i]
print ( "\n********************* Analyse de la trame", i+1, "******************** \n")
# if la trame est errone en raison des offsets invalides
if Trame == [] :
print(" \nLa trame" , i, "est erronee car l'offset n'est pas dans l'ordre croissant\n")
fichier.write("*\n!\n" )
continue
# if il manque des octets dans la trame
if not ok :
print(" \nLa trame" , i, "a rencontré une erreur à la ligne ",ligne_erreur, " de sa trame\n")
fichier.write("*\n!\n" )
continue
print (taille, " octets de donnees")
# donne des noms differents a chaque trame et leurs composantes
str_num = str(i)
str_Ethernet = "Ethernet_" + str_num
str_IP = "IP_" + str_num
str_TCP = "TCP_" + str_num
str_HTTP = "HTTP_" + str_num
# Entete Ethernet
dest_mac = Trame[:6] # Destination (Adresse MAC)
src_mac = Trame[6:12] # Soruce (Adresse MAC)
type_ethernet = Trame[12:14] # Type Ethernet
str_Ethernet = Ethernet( dest_mac, src_mac, type_ethernet )
str_Ethernet.affichage()
str_Ethernet.affichage_bouton(fichier)
# If la trame n'est pas IPv4 then aller à la trame suivante
if not est_IPv4( Trame ) :
print("Notre analyseur ne prend pas en compte le protocole 0x" + ''.join(Trame[12:14] ) )
fichier.write("14 " + str(taille))
continue
# Entete IPv4
version = Trame[14][0] # Version
header_length_ip = Trame[14][1] # Header Length
dsf = Trame[15] # Differentiated Sevices Field
total_length = Trame[16:18] # Total Length
id = Trame[18:20] # Identification
flags_ip = Trame[20] # Flags
offset = Trame[20:22] # Framgment offset
ttl = Trame[22] # Time to live
protocol = Trame[23] # Protocol
checksum_ip = Trame[24:26] # Header Checksum
src_ip = Trame[26:30] # Source IP
dest_ip = Trame[30:34] # Destination IP
options_ip = [] # Options IP
str_IP = IP ( version, header_length_ip, dsf , total_length, id, flags_ip, offset, ttl, protocol, checksum_ip, src_ip, dest_ip, options_ip)
taille_options_IP = str_IP.taille_options_IP() # Recupere la taille des options IP si elles existent
fin_IP = 34 + taille_options_IP # Recupere le dernier octect de l'entete IP
str_IP.set_options( Trame[34 : fin_IP ] ) # Affectation des options de IP
str_IP.verification_checksum(Trame[14:fin_IP] ) # Check le checksum
str_IP.affichage()
str_IP.affichage_bouton(fichier)
# if la trame recuperee n'est pas de la taille de totale_length
if not nombre_octects_exacts( taille, int(''.join(str_IP.total_length),16)):
print("La trame contient ",taille,"octets alors qu'elle devrait en contenir",int(''.join(str_IP.total_length),16)+14)
continue
# if la trame n'est pas TCP then aller à la trame suivante
if not est_TCP( Trame ) :
print("\nNotre analyseur ne prend pas en compte le protocole 0x" + ''.join(Trame[23] ) )
fichier.write(str(fin_IP) +" " + str(taille) + "\n")
continue
# Entete TCP
debut_TCP = fin_IP # premier octer du segment TCP
src_port = Trame[debut_TCP:debut_TCP+2] # Source Port
dest_port = Trame[debut_TCP+2:debut_TCP+4] # Destination Port
sequence = Trame[debut_TCP+4:debut_TCP+8] # Sequence number
acknowledgment = Trame[debut_TCP+8:debut_TCP+12] # acknowledgment number
header_length_tcp = Trame[debut_TCP+12] # Header Length
flags_tcp = Trame[debut_TCP+12:debut_TCP+14] # Flags
window = Trame[debut_TCP+14:debut_TCP+16] # Window
checksum_tcp = Trame[debut_TCP+16:debut_TCP+18] # Checksum
urgent_pointer = Trame[debut_TCP+18:debut_TCP+20] # Urgent pointer
options_tcp = [] # Options
TCP_length = str ( hex( int( ''.join(str_IP.total_length),16 ) - 20 - taille_options_IP )[2:].zfill(4)) # Taille du segment TCP
str_TCP = TCP ( src_port, dest_port, sequence, acknowledgment, header_length_tcp, flags_tcp, window, checksum_tcp, urgent_pointer, options_tcp)
str_TCP.set_options( Trame[debut_TCP+20 : debut_TCP+20+str_TCP.taille_options_TCP() ] )
str_TCP.verification_checksum( src_ip + dest_ip + ["00"] + [protocol] + [TCP_length[0:2]] + [TCP_length[2:4]] + Trame[debut_TCP:]) # Check le checksum
str_TCP.affichage()
str_TCP.affichage_bouton(fichier, debut_TCP)
taille_options_TCP = str_TCP.taille_options_TCP() # Recupère la taille des options TCP si elles existent
fin_TCP = debut_TCP + 20 + taille_options_TCP # Recupere le dernier octect de l'entete IP
# if pas de data then aller a la trame suivante
if fin_TCP == taille :
continue
# if la trame n'est pas HTTP then aller à la trame suivante
if not est_HTTP( Trame, src_port, dest_port) :
print("\nNotre analyseur ne peut pas lire le contenu de ce segment (pas HTTP)")
continue
debut_HTTP = fin_TCP # Récupère le premier octet du segment HTTP
str_HTTP = HTTP ( Trame[debut_HTTP:])
str_HTTP.affichage()
str_HTTP.affichage_bouton(fichier, debut_HTTP)
return None
if __name__ == "__main__":
orig_stdout = sys.stdout
file =os.path.dirname( os.path.abspath(__file__))
f = open(file+"/out.txt", "w", encoding="utf-8")
f_bouton = open(file+"/.f_bouton.txt", 'w', encoding="utf-8")
sys.stdout = f
fileSrc = sys.argv[1]
Liste_trames = parser(fileSrc)
analyses_toute_les_trames (Liste_trames,f_bouton)
sys.stdout = orig_stdout
f_bouton.close()
f.close()
ecrire_trame_gui(Liste_trames)
|
# Licensed Materials - Property of IBM
# Copyright IBM Corp. 2016
import unittest
import sys
import itertools
import test_vers
from streamsx.topology.topology import *
from streamsx.topology.tester import Tester
from streamsx.topology import schema
import streamsx.topology.context
import streamsx.spl.op as op
@unittest.skipIf(not test_vers.tester_supported() , "tester not supported")
class TestSPL(unittest.TestCase):
""" Test invocations of SPL operators from Python topology.
"""
def setUp(self):
Tester.setup_standalone(self)
def test_SPLBeaconFilter(self):
"""Test a Source and a Map operator.
Including an output clause.
"""
topo = Topology('test_SPLBeaconFilter')
s = op.Source(topo, "spl.utility::Beacon",
'tuple<uint64 seq>',
params = {'period': 0.2, 'iterations':100})
s.seq = s.output('IterationCount()')
f = op.Map('spl.relational::Filter', s.stream,
params = {'filter': op.Expression.expression('seq % 2ul == 0ul')})
tester = Tester(topo)
tester.tuple_count(f.stream, 50)
tester.test(self.test_ctxtype, self.test_config)
@unittest.skipIf(not test_vers.tester_supported() , "tester not supported")
class TestDistributedSPL(TestSPL):
def setUp(self):
Tester.setup_distributed(self)
@unittest.skipIf(not test_vers.tester_supported() , "tester not supported")
class TestBluemixSPL(TestSPL):
def setUp(self):
Tester.setup_streaming_analytics(self, force_remote_build=True)
# Ensure the old name still works.
self.test_ctxtype = "ANALYTICS_SERVICE"
|
from modules.commands.utils import *
from modules.commands.kick import initiate_kicks
from vkwave.bots.storage.storages.ttl import TTLStorage
async def exec_catch_runner(box, user_id):
if user_id not in stor.vault['enters'].keys():
stor.vault['enters'][user_id] = {
'chats':[box.msg.peer_id],
'times':[int(time.time())]
}
else:
cur_user_enters = stor.vault['enters'][user_id]
for_delete = []
time_delete_border = int(time.time() - stor.time_day)
for num, expire_time in enumerate(cur_user_enters['times']):
if expire_time < time_delete_border:
for_delete.append(num)
for_delete.reverse()
for num in for_delete:
cur_user_enters['chats'].pop(num)
cur_user_enters['times'].pop(num)
if box.msg.peer_id not in stor.vault['enters'][user_id]['chats']:
stor.vault['enters'][user_id]['chats'].append(box.msg.peer_id)
stor.vault['enters'][user_id]['times'].append(int(time.time()))
count_of_enters = len(stor.vault['enters'][user_id]['chats'])
if count_of_enters == stor.config['MAXENTERS']:
stor.do(send_answer(box, "You will be kicked for entering any chat in 24 hours"))
if count_of_enters == stor.config['MAXENTERS'] * 2:
await initiate_kicks(box.api, stor.vault['enters'][user_id]['chats'], user_id, msg_del=True)
return True
if count_of_enters > stor.config['MAXENTERS']:
await initiate_kicks(box.api, box.msg.peer_id, user_id, msg_del=True)
return True
return False
@log_and_respond_decorator
async def check_all(box, user_id):
if user_id == box.event.object.group_id*-1:
return (True, f"I'm in")
if not base.verify_chat(box.msg.peer_id):
return (False, f"New user in wrong chat")
if base.is_banned(user_id, box.msg.peer_id):
await initiate_kicks(box.api, box.msg.peer_id, user_id, msg_del=True)
return (True, f"Ban was triggered")
if base.is_chat_admin(box.msg.from_id, box.msg.peer_id):
return (True, "Under admin's control")
if user_id<0:
await initiate_kicks(box.api, box.msg.peer_id, user_id, msg_del=True)
return (True, f"Bot was caught at {box.msg.peer_id}", "I am the only bot here")
if base.check_gate(box.msg.peer_id):
await initiate_kicks(box.api, box.msg.peer_id, user_id, msg_del=True)
return (True, f"User {user_id} kicked by closed gate at {box.msg.peer_id}", "Gate is closed")
if await exec_catch_runner(box, user_id):
return (True, f"Runner was caught")
not_from_target = f"{user_id} " if user_id != box.msg.from_id else ''
return (True, f"User {not_from_target}welcomed")
|
import obspy
from obspy import Stream
from obspy.clients.syngine import Client as synClient
from obspy.clients.fdsn import Client as fdsnClient
from obspy.core.util.attribdict import AttribDict
from obspy.geodetics.base import degrees2kilometers, gps2dist_azimuth
from bowpy.util.array_util import dist_azimuth2gps, geometrical_center
"""
:param sourcedoublecouple: Specify a source as a double couple. The
list of values are ``strike``, ``dip``, ``rake`` [, ``M0`` ],
where strike, dip and rake are in degrees and M0 is the scalar
seismic moment in Newton meters (Nm). If not specified, a value
of *1e19* will be used as the scalar moment.
:param sourcemomenttensor: Specify a source in moment tensor
components as a list: ``Mrr``, ``Mtt``, ``Mpp``, ``Mrt``, ``Mrp``,
``Mtp`` with values in Newton meters (*Nm*).
# Example 1
from obspy.clients.syngine import Client as synClient
from obspy.clients.fdsn import Client as fdsnClient
from bowpy.util.syngine import get_syngine_data
model='ak135f_1s'
eventid="GCMT:201305240544A"
irisclient = fdsnClient('IRIS')
inv = irisclient.get_stations(network='TA', station='121A',
starttime=UTCDateTime(2017,1,1),
endtime=UTCDateTime(2018,1,1), maxlatitude=50)
streams, cat = get_syngine_data(model, client="IRIS", eventid=eventid, inv=inv)
st = streams.TA
# Example 2
from obspy.clients.syngine import Client as synClient
from obspy.clients.fdsn import Client as fdsnClient
from bowpy.util.syngine import get_syngine_data
from obspy.core import AttribDict
model='ak135f_1s'
irisclient = fdsnClient('IRIS')
origins = AttribDict()
origins['latitude'] = 54.61
origins['longitude'] = 153.77
origins['depth'] = 611000.0
origins['time'] = UTCDateTime(2013, 5, 24, 5, 45, 8.3)
moment_tensor = [-1.670, 0.382, 1.280, -0.784, -3.570, 0.155]
exponent = 1E28
moment_tensor[:] = [x * exponent for x in moment_tensor]
sourcedoublecouple = [189, 11, -93, 3.95e+28]
inv = irisclient.get_stations(network='TA', station='121A',
starttime=UTCDateTime(2017,1,1),
endtime=UTCDateTime(2018,1,1),
maxlatitude=50)
streams, cat_syn = get_syngine_data(model, client='IRIS', inv=inv,
origins=origins, m_tensor=moment_tensor)
st = streams.TA
"""
def get_syngine_data(
model,
client=None,
reclat=None,
reclon=None,
inv=None,
eventid=None,
origins=None,
m_tensor=None,
source_dc=None,
):
"""
param reclat:
type reclat: list of floats
param reclon:
type reclon: list of floats
"""
if client:
client = fdsnClient(client)
synclient = synClient()
if inv:
streams = AttribDict()
for network in inv:
stream = obspy.Stream()
for station in network:
print(station)
if eventid:
stream_tmp = synclient.get_waveforms(
model=model,
network=network.code,
station=station.code,
eventid=eventid,
)
else:
stream_tmp = synclient.get_waveforms(
model=model,
network=network.code,
station=station.code,
origintime=origins.time,
sourcelatitude=origins.latitude,
sourcelongitude=origins.longitude,
sourcedepthinmeters=origins.depth,
sourcemomenttensor=m_tensor,
sourcedoublecouple=source_dc,
)
stream.append(stream_tmp[0])
streams[network.code] = stream
if reclat and reclon:
stream = obspy.Stream()
for rlat, rlon in zip(reclat, reclon):
if eventid:
stream_tmp = synclient.get_waveforms(
model=model,
receiverlatitude=rlat,
receiverlongitude=rlon,
eventid=eventid,
)
else:
stream_tmp = synclient.get_waveforms(
model=model,
receiverlatitude=rlat,
receiverlongitude=rlon,
origintime=origins.time,
sourcelatitude=origins.latitude,
sourcelongitude=origins.longitude,
sourcedepthinmeters=origins.depth,
sourcemomenttensor=m_tensor,
sourcedoublecouple=source_dc,
)
stream.append(stream_tmp[0])
streams = stream
if origins:
starttime = origins.time - 120
endtime = starttime + 120
if client:
cat = client.get_events(
starttime,
endtime,
minlatitude=origins.latitude - 0.5,
maxlatitude=origins.latitude + 0.5,
)
else:
cat = None
else:
cat = None
return streams, cat
def get_ref_data(
stream,
inv,
model="ak135f_1s",
eventid=None,
origins=None,
m_tensor=None,
source_dc=None,
):
ref_stream = Stream()
rlats = []
rlons = []
geom = geometrical_center(inv)
d, az, baz = gps2dist_azimuth(
origins.latitude, origins.longitude, geom.latitude, geom.longitude
)
for i, trace in enumerate(stream):
dist = degrees2kilometers(trace.stats.distance) * 1000.0
rlat, rlon = dist_azimuth2gps(origins.latitude, origins.longitude, az, dist)
if rlon > 180:
rlon = 180.0 - rlon
print(rlat, rlon)
rlats.append(rlat)
rlons.append(rlon)
print("Receiving trace %i of %i." % (i + 1, len(stream)))
stream_tmp, cat_void = get_syngine_data(
model,
reclat=rlats,
reclon=rlons,
eventid=eventid,
origins=origins,
m_tensor=m_tensor,
source_dc=source_dc,
)
trace_tmp = stream_tmp[0].copy()
trace_tmp.stats.station = trace.stats.station
trace_tmp.stats.starttime = trace.stats.starttime
trace_tmp.stats.distance = trace.stats.distance
ref_stream.append(trace_tmp)
return ref_stream
|
# This problem was asked by Uber.
# Given an array of integers, return a new array such that each element at index i of the new array is the product of all the numbers in the original array except the one at i.
# For example, if our input was [1, 2, 3, 4, 5], the expected output would be [120, 60, 40, 30, 24]. If our input was [3, 2, 1], the expected output would be [2, 3, 6].
# Follow-up: what if you can't use division?
from functools import reduce
if __name__ == "__main__":
in1 = [1,2,3,4,5]
in2 = [3,2,1]
product = reduce(lambda x,y: x*y, in1)
out = [int(product / i) for i in in1]
print(out)
product = reduce(lambda x,y: x*y, in2)
out = [int(product / i) for i in in2]
print(out)
out = [reduce(lambda x, y: x * y, in1[:i] + in1[i + 1:]) for i in range(len(in1))]
print(out)
out = [reduce(lambda x, y: x * y, in2[:i] + in2[i + 1:]) for i in range(len(in2))]
print(out) |
"""Main integratopm abstract class.
This is the main module of spradius. It contains the abstract definition
of any time integrations strategy to be implemented within this software
and the main routine for computing the spectral radius, based on the
numerical strategy presented in
1. Benítez JM, Montáns FJ. The value of numerical amplification matrices
in time integration methods. Computers & Structures. 2013 Nov 1;128:243–50.
Due to the characteristics of the numerical method, it is required a
significant numerical floating-point procedure. This comes from the fact
that a large number of time steps is needed for an accurate prediciton of
the spectral radius and, as a consequence, the eigenvalues of the
amplification matrix (typically with modulus smaller than 1) will
dangerously decrease to values close to 0. For this reason, the mpmath
package is used, at the cost of substantially reducing the speed.
The mass is set to 1, the damping to 0 and the stiffness to 4*pi^2.
This ensures that the natural period is 1, therefore the input time
step vector actually represents the vector of non-dimensional time
steps often written as Delta t / T_1.
..module:: integrator
:synopsis: Integrator main class
..moduleauthor:: A. M. Couto Carneiro <amcc@fe.up.pt>
"""
from abc import ABC, abstractmethod
import mpmath
import numpy as np
from tqdm import tqdm
PRECISION = 30
mpmath.mp = PRECISION
class integrator(ABC):
# Mass, damping and stiffness
mass = mpmath.mpf(1)
damp = mpmath.mpf(0)
stif = 4 * mpmath.pi ** 2
def __init__(self, dt_list, num_steps=600, **kwargs):
self.initialise(**kwargs)
self.spectral_radius = self.compute_spectral_radius(dt_list, num_steps)
@abstractmethod
def initialise(self, **kwargs):
"""Initialises the specific integrator with the given keyword
arguments.
"""
pass
@abstractmethod
def integrate(self, dt, num_steps, init_disp, init_vel, init_accel):
"""Main abstract routine for performing the time-integration.
Parameters
----------
dt : float
Time step
num_steps : int
Number of time steps to compute
init_disp : float
Initial displacement
init_vel : float
Initial velocity
init_accel : float
Initial acceleration
Returns
-------
disp_this : mfr
Displacement at the last time step
vel_this : mfr
Displacement at the last time step
accel_this : mfr
Displacement at the last time step
"""
pass
@staticmethod
@abstractmethod
def get_amplification_matrix_dim():
"""Returns the dimension of the amplification matrix.
Each time-integration method is associated with a specific
dimension of the amplication matrix. This routine serves the
purpose of controlling this aspect which is relevant in the
computation of the associated eigenvalues and identification of
the maximum (spectral radius).
pass
Returns
-------
ndim : int
Dimension of the amplification matrix
"""
def compute_spectral_radius(self, dt_list, num_steps):
"""Spectral radius computation routine.
The spectral radius is computed approximately by integrating the
equations of motion for initial unit displacement, velocity and
acceleration. Each of these runs allows determining one columns of the
amplification matrix powered to the number of time steps taken.
The spectral radius of the amplification matrix can be identified by
taking the n-th root of the eigenvalues of the powered matrix.
Reference
---------
1. Benítez JM, Montáns FJ. The value of numerical amplification
matrices in time integration methods. Computers & Structures.
2013 Nov 1;128:243–50.
Parameters
----------
dt_list : list / array of float
List of all time steps to be evaluated
num_steps : int
Number of time steps to perform
Returns
-------
spectral_radius : array of float
Vector of approximate spectral radius values
"""
dt_array = np.array(dt_list)
num_points = np.size(dt_array, axis=0)
spectral_radius = np.zeros((num_points))
print(" ")
for i_dt in tqdm(range(num_points)):
dt = dt_array[i_dt]
# Integrate the equations of motion for unit intial conditiosn
disp_unit_disp, vel_unit_disp, accel_unit_disp = self.integrate(
dt, num_steps, 1, 0, 0
)
disp_unit_vel, vel_unit_vel, accel_unit_vel = self.integrate(
dt, num_steps, 0, 1, 0
)
disp_unit_accel, vel_unit_accel, accel_unit_accel = self.integrate(
dt, num_steps, 0, 0, 1
)
# Build the amplification matrix A^n
amplification_matrix = mpmath.matrix(
[
[disp_unit_disp, disp_unit_vel, disp_unit_accel],
[vel_unit_disp, vel_unit_vel, vel_unit_accel],
[accel_unit_disp, accel_unit_vel, accel_unit_accel],
]
)
# Take only the relevant components
ndim = self.get_amplification_matrix_dim()
amplification_matrix = amplification_matrix[0:ndim, 0:ndim]
# Find the largest of the eigenvalues of A^n
eigenvalues = mpmath.eig(
amplification_matrix, left=False, right=False
)
modulus_eigenvalues = [mpmath.fabs(x) for x in eigenvalues]
spectral_radius_to_n = max(modulus_eigenvalues)
# Compute the spectral radius
spectral_radius[i_dt] = mpmath.root(
spectral_radius_to_n, num_steps
)
return spectral_radius
|
# -*- coding: utf-8 -*-
"""
Archivo de importación de datos para Medias móviles.
"""
#%% Importar librerías.
from mylib import mylib
import time as _time
import datetime
yahooKeyStats = mylib.yahooKeyStats
#%% Descargar datos históricos.
stock = ['AC.MX','ALFAA.MX','ALPEKA.MX','ALSEA.MX','ELEKTRA.MX','IENOVA.MX','MEXCHEM.MX','PE&OLES.MX','PINFRA.MX','WALMEX.MX']
#stock = ['AMXL.MX','WALMEX.MX','FEMSAUBD.MX','TLEVISACPO.MX','GMEXICOB.MX','GFNORTEO.MX','CEMEXCPO.MX','ALFAA.MX','PE&OLES.MX','GFINBURO.MX','ELEKTRA.MX','MEXCHEM.MX','BIMBOA.MX','AC.MX','KIMBERA.MX','LABB.MX','LIVEPOL1.MX','ASURB.MX','GAPB.MX','ALPEKA.MX','GRUMAB.MX','ALSEA.MX','GCARSOA1.MX','LALAB.MX','IENOVA.MX','PINFRA.MX']
#today = datetime.date.today()
#days =datetime.timedelta(days=2520) #Buscamos 1 año de historia
#
#timestamp=today-days #Solo es para observar que la fecha sea correcta
#start = int(_time.mktime(today.timetuple())) #fecha inicial
#
#timestamp2 = datetime.datetime.fromtimestamp(start) #Solo es para observar que la fecha sea correcta
#end= int(_time.mktime(timestamp.timetuple())) #fecha final
start = datetime.date(2013,12,18)
start = int(_time.mktime(start.timetuple()))
today = datetime.date.today()
end = int(_time.mktime(today.timetuple()))
#%%
for j in stock:
data=yahooKeyStats(j,start,end) #descarga los datos de cada ticker
try:
data.to_csv(('../Data/%s.csv')%j) #exporta los datos de cada ticker a un csv.
except:
print(j)
_time.sleep(1) |
from django import forms
from .models import ArcGISLayerImport
class ArcGISLayerImportForm(forms.ModelForm):
class Meta:
model = ArcGISLayerImport
fields = ['url']
widgets = {
'url': forms.TextInput(attrs={
'id': 'layer-url',
'required': True,
'placeholder': 'Esri Layer URL...'
}),
}
|
import gevent
def testfn(name, count):
for i in range(count):
print("In {}, count = {}".format(name, count))
while True: pass
gevent.sleep(1)
g1 = gevent.spawn(testfn, "foo", 10)
g2 = gevent.spawn(testfn, "bar", 15)
g3 = gevent.spawn(testfn, "baz", 20)
gevent.joinall([g1, g2, g3])
|
#!/usr/bin/env python3
# encoding: utf-8
# time : 2019/10/10 3:46 下午
from _sha256 import sha256
from ssl import SSLContext
from typing import Optional, Dict, Any, Mapping, Union
import ujson
from aiohttp import BasicAuth, ClientTimeout, Fingerprint
from aiohttp.helpers import sentinel
from aiohttp.typedefs import StrOrURL, LooseCookies, LooseHeaders
from faker import Faker
from aiospider.exceptions import InvalidRequestMethod
from aiospider.models import BaseTask
fake = Faker(locale='zh_CN')
METHOD = {'GET', 'POST', 'PUT', 'DELETE', 'HEAD', 'OPTIONS', 'PATCH'}
class Request(BaseTask):
def __init__(self, method: str, url: StrOrURL, callback: str = None,
params: Optional[Mapping[str, str]] = None,
data: Any = None,
json: Any = None,
metadata: Optional[Dict] = None,
dont_filter: bool = False,
priority: int = 10,
cookies: Optional[LooseCookies] = None,
headers: LooseHeaders = None,
auth: Optional[BasicAuth] = None,
allow_redirects: bool = True,
max_redirects: int = 10,
proxy: Optional[StrOrURL] = None,
proxy_auth: Optional[BasicAuth] = None,
timeout: Union[ClientTimeout, object] = sentinel,
verify_ssl: Optional[bool] = None,
ssl: Optional[Union[SSLContext, bool, Fingerprint]] = None,
etag: int = 1,
age: int = 60 * 60 * 24 * 2
):
super().__init__(priority, dont_filter, age)
self.method = method.upper()
if self.method not in METHOD:
raise InvalidRequestMethod(f"{self.method} method is not supported")
self.url = url
self.params = params
self.data = data
self.headers = headers or {'User-Agent': fake.chrome(), 'Accept-Encoding': 'gzip'}
self.cookies = cookies
self.callback = callback
self.metadata = metadata or {}
self.proxy = proxy
self.proxy_auth = proxy_auth
self.auth = auth
self.allow_redirects = allow_redirects
self.timeout = timeout
self.json = json
self.max_redirects = max_redirects
self.verify_ssl = verify_ssl
self.etag = etag
self.ssl = ssl
@property
def taskId(self) -> str:
dupe_str = sha256()
dupe_str.update(ujson.dumps(self.metadata).encode('utf-8'))
if not self.params and not (self.data or self.json):
dupe_str.update((self.method + self.url).encode('utf-8'))
elif self.params and (self.data or self.json):
dupe_str.update(
(self.method + self.url + ujson.dumps(self.params) + ujson.dumps(self.data or self.json)).encode(
'utf-8'))
elif self.params and not (self.data or self.json):
dupe_str.update(
(self.method + self.url + ujson.dumps(self.params)).encode('utf-8'))
elif not self.params and (self.data or self.json):
dupe_str.update(
(self.method + self.url + ujson.dumps(self.data or self.json)).encode('utf-8'))
return dupe_str.hexdigest()
def __repr__(self):
return f'<AioSpiderRequest url[{self.method}]: {self.url} callback={self.callback}>'
def __str__(self):
return f'<AioSpiderRequest url[{self.method}]: {self.url} callback={self.callback}>'
|
# 연습문제 4-4
original_text = 'I think, therefore I am.'
replaced_text = original_text.replace('think', 'eat')
print(replaced_text)
|
from functools import partial
from typing import Iterable
from sqlalchemy import Column, Integer, String, Float, create_engine, ForeignKey
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, relationship
import pandas as pd
from common.constatns import _DB_FILE_LOCATION
Base = declarative_base()
_engine = create_engine(f'sqlite:///{_DB_FILE_LOCATION}', echo=False)
class ModelScore(Base):
__tablename__ = 'scores'
# __table_args__ = (
# PrimaryKeyConstraint('model_id'),
# )
# id = Column('id', primary_key=True)
model_id = Column('model_id', ForeignKey('models.id'), primary_key=True)
bleu = Column('bleu', Float)
wbss = Column('wbss', Float)
models = relationship("Model", lazy='subquery', back_populates="model_scores")
def __init__(self, model_id, bleu, wbss):
""""""
self.model_id = model_id
self.bleu = bleu
self.wbss = wbss
def __repr__(self):
return f'{self.__class__.__name__}(model_id={self.model_id}, bleu={self.bleu}, wbss={self.wbss})'
class QuestionCatgory(Base):
""""""
__tablename__ = 'question_categories'
id = Column('id', Integer, primary_key=True)
Category = Column('Category', String(50), primary_key=True)
def __init__(self, category):
""""""
super().__init__()
self.Category = category
def __repr__(self):
return f'{QuestionCatgory.__name__}(Category={self.Category})'
def __str__(self):
return f'{QuestionCatgory.__name__}:(id={self.id}, Category={self.Category})'
class EvaluationType(Base):
""""""
__tablename__ = 'evaluation_types'
id = Column('id', Integer, primary_key=True)
name = Column('name', String(50))
def __init__(self, name):
""""""
super().__init__()
self.name = name
class ModelPartialScore(Base):
__tablename__ = 'partial_scores'
# __table_args__ = (
# PrimaryKeyConstraint('model_id'),
# )
# id = Column('id', primary_key=True)
model_id = Column('model_id', ForeignKey('models.id'), primary_key=True)
evaluation_type = Column('evaluation_type', ForeignKey('evaluation_types.id'), primary_key=True)
question_category_id = Column('question_category_id', ForeignKey('question_categories.id'), primary_key=True)
score = Column('score', Float)
def __init__(self, model_id, evaluation_type, question_category_id, score):
""""""
self.model_id = model_id
self.evaluation_type = evaluation_type
self.question_category_id = question_category_id
self.score = score
def __repr__(self):
return f'{self.__class__.__name__}(evaluation_type={self.evaluation_type}, ' \
f'question_category_id={self.question_category_id}, score={self.score})'
class Model(Base):
__tablename__ = 'models'
id = Column('id', Integer, primary_key=True)
model_location = Column(String(50))
history_location = Column(String(50))
image_base_net = Column(String(50))
loss = Column(Float())
val_loss = Column(Float())
accuracy = Column(Float())
val_accuracy = Column(Float())
class_strategy = Column(String(15))
parameter_count = Column('parameter_count', Integer)
trainable_parameter_count = Column('trainable_parameter_count', Integer)
f1_score = Column(Float())
f1_score_val = Column(Float())
recall = Column(Float())
recall_val = Column(Float())
precsision = Column(Float())
precsision_val = Column(Float())
loss_function = Column(String(50))
activation = Column(String(50))
notes = Column('notes', String(200))
# model_scores = relationship(ModelScore, backref='models')
model_scores = relationship("ModelScore", lazy='subquery', back_populates="models")
@property
def score(self):
assert len(self.model_scores) <= 1, f'Unexpectedly Got multiple scores for model {self.id}'
s = next((m for m in self.model_scores), None)
return s
def __init__(self,
model_location,
history_location,
image_base_net,
loss,
val_loss,
accuracy,
val_accuracy,
notes,
parameter_count, trainable_parameter_count,
f1_score,
f1_score_val,
recall,
recall_val,
precsision,
precsision_val,
loss_function,
activation,
class_strategy):
""""""
self.model_location = model_location
self.history_location = history_location
self.image_base_net = image_base_net
self.loss = loss
self.val_loss = val_loss
self.accuracy = accuracy
self.val_accuracy = val_accuracy
self.notes = notes
self.parameter_count = parameter_count
self.trainable_parameter_count = trainable_parameter_count
self.f1_score = f1_score
self.f1_score_val = f1_score_val
self.recall = recall
self.recall_val = recall_val
self.precsision = precsision
self.precsision_val = precsision_val
self.loss_function = loss_function
self.activation = activation
self.class_strategy = class_strategy
def __repr__(self):
return f'{self.__class__.__name__}(id={self.id},\n' \
f'\tmodel_location={self.model_location},\n' \
f'\thistory_location={self.history_location},\n' \
f'\timage_base_net={self.image_base_net},\n' \
f'\tloss={self.loss},\n' \
f'\tval_loss={self.val_loss},\n' \
f'\taccuracy={self.accuracy},\n' \
f'\tval_accuracy={self.val_accuracy},\n' \
f'\tclass_strategy={self.class_strategy})' \
f'\tf1_score = {self.f1_score},\n' \
f'\tf1_score_val = {self.f1_score_val},\n' \
f'\trecall = {self.recall},\n' \
f'\trecall_val = {self.recall_val},\n' \
f'\tprecsision = {self.precsision},\n' \
f'\tprecsision_val = {self.precsision_val},\n' \
f'\tloss_function = {self.loss_function},\n' \
f'\tactivation = {self.activation},\n'.rstrip()
def create_db():
Base.metadata.create_all(_engine)
def insert_dals(dal_obj_arr: Iterable[Base]) -> None:
session = get_session()
try:
session.add_all(dal_obj_arr)
# session.flush()
session.commit()
except Exception as ex:
session.rollback()
raise
def insert_dal(dal_obj: Base) -> None:
return insert_dals([dal_obj])
def get_session():
SessionMaker = sessionmaker(bind=_engine, autocommit=False, autoflush=False)
session = SessionMaker()
return session
def get_items(dal_type: Base) -> [Base]:
session = get_session()
res_q = session.query(dal_type)
models = list(res_q)
return models
get_models = partial(get_items, Model)
get_scores = partial(get_items, ModelScore)
get_partial_scores = partial(get_items, ModelPartialScore)
get_question_categories = partial(get_items, QuestionCatgory)
get_evaluation_types = partial(get_items, EvaluationType)
def get_data_frame(get_objectcs_func, index=None):
objects = get_objectcs_func()
# Dont bring private objects
attr = [str(k) for k in objects[0].__dict__.keys() if not str(k).startswith('_') and not hasattr(k, '__dict__')]
df = pd.DataFrame([[getattr(curr_obj, curr_attr) for curr_attr in attr] for curr_obj in objects], columns=attr)
if index:
df.set_index(keys=index, inplace=True)
return df
get_models_data_frame = partial(get_data_frame, get_models, index='id')
get_scores_data_frame = partial(get_data_frame, get_scores, index='model_id')
get_partial_scores_data_frame = partial(get_data_frame, get_partial_scores, index='model_id')
get_question_categories_data_frame = partial(get_data_frame, get_question_categories, index='id')
get_evaluation_types_data_frame = partial(get_data_frame, get_evaluation_types, index='id')
def get_model(predicate: callable) -> Model:
models = get_models()
return next(model for model in models if predicate(model))
def get_model_by_id(model_id: int) -> Model:
return get_model(lambda model: model.id == model_id)
# def get_models_data_frame():
# models = get_models()
# if not models:
# return pd.DataFrame()
# variables = [v for v in models[0].__dict__.keys() if not v.startswith('_')]
# df = pd.DataFrame([[getattr(i, j) for j in variables] for i in models], columns=variables)
#
# scores = get_scores()
# if len(scores) > 0:
# s_variables = [v for v in scores[0].__dict__.keys() if not v.startswith('_')] if scores else []
# s_df = pd.DataFrame([[getattr(i, j) for j in s_variables] for i in scores], columns=s_variables)
#
# merged_df = s_df.merge(df, left_on='model_id', right_on='id', how='outer') # how='left')
# ret = merged_df
# else:
# # Should happen only before we have done ANY evaluations
# ret = df
#
# return ret
def execute_sql(txt):
from sqlalchemy.sql import text
with _engine.connect() as con:
con.execute(text(txt))
def execute_sql_from_file(file_name):
with open(file_name, 'r') as f:
txt = f.read()
return execute_sql(txt)
# noinspection PyUnreachableCode
def main():
df = get_scores_data_frame()
# create_db()
return
# ps = ModelPartialScore(1,1,4,0.5)
# insert_dal(ps)
return
# create_db()
# return
# # ms = get_models()
# df_models = get_models_data_frame()
#
# return
# resnet_model = Model(model_location='C:\\Users\\Public\\Documents\\Data\\2018\\vqa_models\\20180730_0524_48\\vqa_model_ClassifyStrategies.CATEGORIAL_trained.h5',
# history_location='',
# image_base_net='resnet50',
# loss=0.1248,
# val_loss=2.7968,
# accuracy=0.9570,
# val_accuracy=0.5420,
# notes ='Categorial, 4 options Imaging devices')
# loss: - acc: - val_loss: - val_acc: Training Model: 3:50:30.246880
# vgg19_model_multi_classes = Model(
# model_location='C:\\Users\\Public\\Documents\\Data\\2018\\vqa_models\\20180829_1113_47\\vqa_model_ClassifyStrategies.CATEGORIAL_trained.h5',
# history_location='',
# image_base_net='vgg19',
# loss=26.7192,
# val_loss=18.9696,
# accuracy=0.066,
# val_accuracy=0.0064,
# notes='',
# parameter_count=21061245,
# trainable_parameter_count=1036221,
# f1_score=0.0579,
# f1_score_val=0.0520,
# recall=0.0318,
# recall_val=0.0296,
# precsision=0.3887,
# precsision_val=0.2135,
# loss_function='categorical_crossentropy',
# activation='softmax'
# )
# model_location = 'C:\\Users\\Public\\Documents\\Data\\2018\\vqa_models\\20180829_1113_47\\vqa_model_ClassifyStrategies.CATEGORIAL_trained.h5',
# history_location = '',
# image_base_net = 'vgg19',
# loss = 26.7192,
# val_loss = 18.9696,
# accuracy = 0.066,
# val_accuracy = 0.0064,
# notes = '',
# parameter_count = 21061245,
# trainable_parameter_count = 1036221,
# f1_score = 0.0579,
# f1_score_val = 0.0520,
# recall = 0.0318,
# recall_val = 0.0296,
# precsision = 0.3887,
# precsision_val = 0.2135,
# loss_function = 'categorical_crossentropy',
# activation = 'softmax',
str()
# vgg19_model = Model(
# model_location='C:\\Users\\Public\\Documents\\Data\\2018\\vqa_models\\20180731_0630_29\\vqa_model_ClassifyStrategies.CATEGORIAL_trained.h5',
# history_location='',
# image_base_net='vgg19',
# loss=0.0843,
# val_loss=2.7968,
# accuracy=0.9776,
# val_accuracy=0.6480,
# notes='Categorial, 4 options Imaging devices')
insert_dal(vgg19_model_multi_classes)
# ## Resnet 50:
# trained_model_location = 'C:\Users\Public\Documents\Data\2018\vqa_models\20180730_0524_48\vqa_model_ClassifyStrategies.CATEGORIAL_trained.h5'
# loss: 0.1248 - acc: 0.9570 - val_loss: 2.7968 - val_acc: 0.5420
# Training Model: 12:22:54.619203
pass
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
# @Author:Qingshui Wang
# @Email:apecoder@foxmail.com
# @Time:2018/5/1 21:10
# @File:Main.py
"""
|
from selenium import webdriver
import time
import selenium.webdriver.chrome.service as service
# browser = webdriver.Chrome()
# browser.get('http://www.baidu.com')
driver = webdriver.Chrome() # Optional argument, if not specified will search path.
driver.get('https://www.baidu.com') # set up the destination
time.sleep(5) # Let the user actually see something!
search_box = driver.find_element_by_name('wd') # find elements by id/name/tag_name/class_name/css_selector/linke_text/xpath/partial_link_text
search_box.send_keys('ChromeDriver') # post data in the above element
search_box.submit() # submit data in form
time.sleep(5) # Let the user actually see something!
driver.quit()
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index),
path('price/', views.price),
] |
# This is the Egg class file
import Global_Variables as gv
import pygame as pg
class Egg:
# Egg constructor
def __init__(self, egg_size, egg_pos):
self.size = egg_size
self.pos = egg_pos
# updates the location of the egg
def update(self, display):
pg.draw.circle(display, gv.RED, (self.pos[0], self.pos[1]), self.size)
|
import numpy as np
import scipy.interpolate as si
def bspline(cv, n=100, degree=3, periodic=False):
""" Calculate n samples on a bspline
cv : Array ov control vertices
n : Number of samples to return
degree: Curve degree
periodic: True - Curve is closed
False - Curve is open
"""
# If periodic, extend the point array by count+degree+1
cv = np.asarray(cv)
count = len(cv)
if periodic:
factor, fraction = divmod(count+degree+1, count)
cv = np.concatenate((cv,) * factor + (cv[:fraction],))
count = len(cv)
degree = np.clip(degree,1,degree)
# If opened, prevent degree from exceeding count-1
else:
degree = np.clip(degree,1,count-1)
# Calculate knot vector
kv = None
if periodic:
kv = np.arange(0-degree,count+degree+degree-1)
else:
kv = np.clip(np.arange(count+degree+1)-degree,0,count-degree)
# Calculate query range
u = np.linspace(periodic,(count-degree),n)
# Calculate result
return np.array(si.splev(u, (kv,cv.T,degree))).T |
def solution(numbers):
answer = 0
result = sum(numbers)
return result/len(numbers); |
from django.conf.urls.defaults import patterns, include, url
from django.conf.urls.static import static
from django.conf import settings
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'',
# Examples:
url(r'^$', 'django_hello_world.hello.views.home', name='home'),
url(r'^home/', include('django_hello_world.hello.urls',
namespace="home_pages")),
url(r'^requests/$', 'django_hello_world.hello.views.requests',
name='requests'),
url(r'^requests/remove/(?P<log_id>\d+)/$',
'django_hello_world.hello.views.request_remove_handler',
name='request_remove'),
url(r'^accounts/login/$', 'django.contrib.auth.views.login',
{'template_name': 'admin/login.html'},
name="login_page"),
url(r'^accounts/logout/$',
'django.contrib.auth.views.logout', name="logout_url"),
# url(r'^django_hello_world/', include('django_hello_world.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
) + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) + \
static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
import pandas as pd
import codecs
from textrank4zh import TextRank4Keyword, TextRank4Sentence
import jieba.analyse
def textRank4zh():
# 加载数据
df = pd.read_csv("C:/Users/11245/Desktop/好好看好好学/毕业设计/中文数据集/ChnSentiCorp_htl_all.csv", encoding='utf-8')
x_data = df['review'].astype(str).to_list()
# 创建分词类的实例
tr4w = TextRank4Keyword()
# 对文本进行分析,设定窗口大小为2,并将英文单词小写
for i in range(0, 100):
print(x_data[i])
tr4w.analyze(text=x_data[i], lower=True, window=2)
# 从关键词列表中获取前20个关键词
print('关键词为:', end='')
for item in tr4w.get_keywords(num=5, word_min_len=1):
# 打印每个关键词的内容及关键词的权重
print(item.word, item.weight, ' ', end='')
print('\n')
print('关键短语为:', end='')
# 从关键短语列表中获取关键短语
for phrase in tr4w.get_keyphrases(keywords_num=5, min_occur_num=1):
print(phrase, ' ', end='')
print('\n')
if __name__=='__main__':
# 加载数据
df = pd.read_csv("C:/Users/11245/Desktop/好好看好好学/毕业设计/中文数据集/ChnSentiCorp_htl_all.csv", encoding='utf-8')
x_data = df['review'].astype(str).to_list()
print(x_data[0])
# textrank
keywords_textrank = jieba.analyse.textrank(x_data[0])
print(keywords_textrank)
# tf-idf
keywords_tfidf = jieba.analyse.extract_tags(x_data[0])
print(keywords_tfidf) |
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import mmcv
import numpy as np
from ..utils import get_root_logger
from .base import BaseDataset
from .builder import DATASETS
@DATASETS.register_module()
class PoseDataset(BaseDataset):
"""Pose dataset for action recognition.
The dataset loads pose and apply specified transforms to return a
dict containing pose information.
The ann_file is a pickle file, the json file contains a list of
annotations, the fields of an annotation include frame_dir(video_id),
total_frames, label, kp, kpscore.
Args:
ann_file (str): Path to the annotation file.
pipeline (list[dict | callable]): A sequence of data transforms.
split (str | None): The dataset split used. Only applicable to UCF or
HMDB. Allowed choiced are 'train1', 'test1', 'train2', 'test2',
'train3', 'test3'. Default: None.
valid_ratio (float | None): The valid_ratio for videos in KineticsPose.
For a video with n frames, it is a valid training sample only if
n * valid_ratio frames have human pose. None means not applicable
(only applicable to Kinetics Pose). Default: None.
box_thr (str | None): The threshold for human proposals. Only boxes
with confidence score larger than `box_thr` is kept. None means
not applicable (only applicable to Kinetics Pose [ours]). Allowed
choices are '0.5', '0.6', '0.7', '0.8', '0.9'. Default: None.
class_prob (dict | None): The per class sampling probability. If not
None, it will override the class_prob calculated in
BaseDataset.__init__(). Default: None.
**kwargs: Keyword arguments for ``BaseDataset``.
"""
def __init__(self,
ann_file,
pipeline,
split=None,
valid_ratio=None,
box_thr=None,
class_prob=None,
**kwargs):
modality = 'Pose'
# split, applicable to ucf or hmdb
self.split = split
super().__init__(
ann_file, pipeline, start_index=0, modality=modality, **kwargs)
# box_thr, which should be a string
self.box_thr = box_thr
if self.box_thr is not None:
assert box_thr in ['0.5', '0.6', '0.7', '0.8', '0.9']
# Thresholding Training Examples
self.valid_ratio = valid_ratio
if self.valid_ratio is not None:
assert isinstance(self.valid_ratio, float)
if self.box_thr is None:
self.video_infos = self.video_infos = [
x for x in self.video_infos
if x['valid_frames'] / x['total_frames'] >= valid_ratio
]
else:
key = f'valid@{self.box_thr}'
self.video_infos = [
x for x in self.video_infos
if x[key] / x['total_frames'] >= valid_ratio
]
if self.box_thr != '0.5':
box_thr = float(self.box_thr)
for item in self.video_infos:
inds = [
i for i, score in enumerate(item['box_score'])
if score >= box_thr
]
item['anno_inds'] = np.array(inds)
if class_prob is not None:
self.class_prob = class_prob
logger = get_root_logger()
logger.info(f'{len(self)} videos remain after valid thresholding')
def load_annotations(self):
"""Load annotation file to get video information."""
assert self.ann_file.endswith('.pkl')
return self.load_pkl_annotations()
def load_pkl_annotations(self):
data = mmcv.load(self.ann_file)
if self.split:
split, data = data['split'], data['annotations']
identifier = 'filename' if 'filename' in data[0] else 'frame_dir'
data = [x for x in data if x[identifier] in split[self.split]]
for item in data:
# Sometimes we may need to load anno from the file
if 'filename' in item:
item['filename'] = osp.join(self.data_prefix, item['filename'])
if 'frame_dir' in item:
item['frame_dir'] = osp.join(self.data_prefix,
item['frame_dir'])
return data
|
Q = int(input())
is_prime = [True]*(2000000 + 1)
# エラトステネスの篩
is_prime[0] = False
is_prime[1] = False
now = 2
while now * now <= 2000000:
if is_prime[now]:
for i in range(now * 2, 2000000 + 1, now):
is_prime[i] = False
now += 1
ok = [0]
for i in range(1, 2000001):
if i % 2 == 1 and is_prime[i] and is_prime[(i + 1) // 2]:
ok.append(ok[i-1] + 1)
else:
ok.append(ok[i-1])
for i in range(Q):
l, r = map(int, input().split())
print(ok[r] - ok[l-1])
|
# Generated by Django 3.2.3 on 2021-07-07 21:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='empleo',
name='idEmpleo',
field=models.IntegerField(primary_key=True, serialize=False, verbose_name='Id de emplea'),
),
migrations.AlterField(
model_name='empleo',
name='nombreEmpleo',
field=models.CharField(max_length=50, verbose_name='Nombre Cate'),
),
]
|
"""
GreenThumb REST API.
GreenThumb Group <greenthumb441@umich.edu>
"""
from greenthumb.api.catalog import (get_catalog, get_catalog_plant_page)
from greenthumb.api.guides import(get_guides, get_guide_page)
# import greenthumb.api.usergarden
from greenthumb.api.usergarden import (get_user_gardens, get_garden, add_garden_location, add_plant_to_garden, edit_plant_in_garden, delete_plant_in_garden, get_user_plants, get_user_plants_with_id) |
### PreScript ###
lastindex = 0
cn_last = 0
soundfile1 = r'C:\TFS\Doc\3-ZIS\3-Development\Discussions\ExperimentFeedback\Release_DVD\SoundFiles\PsychoScream.wav'
soundfile2 = r'C:\TFS\Doc\3-ZIS\3-Development\Discussions\ExperimentFeedback\Release_DVD\SoundFiles\YEAH.WAV'
### LoopScript ###
# get parameters
frame = ZenService.Analysis.Cells.ImageIndexTime
cn = ZenService.Analysis.Cells.RegionsCount
delta = cn - cn_last
# write to log file (optional)
logfile = ZenService.Xtra.System.AppendLogLine(str(frame)+'\t'+str(cn)+'\t'+str(delta))
cn_last = cn
# check if the number of active cells has changed
if (delta > 0): ## active cell number has increased
# play soundfile 1 --> this could be anythin else, e.g. sent a TTL to port XY
ZenService.Xtra.System.PlaySound(soundfile2)
elif (delta < 0): ## active cell number has decreased
# play soundfile 2 --> just a placeholder for a more meaningful action
ZenService.Xtra.System.PlaySound(soundfile1)
### PostScript ###
ZenService.Xtra.System.ExecuteExternalProgram(r'C:\Program Files (x86)\Notepad++\notepad++.exe', logfile)
# additional script execution to display the data directly from the feedback sricpt
filename = '-f ' + ZenService.Experiment.ImageFileName[:-4] + '_Log.txt'
script = r'c:\TFS\Doc\3-ZIS\3-Development\Discussions\ExperimentFeedback\Release_DVD\Python_Scripts_for_Data_Display\display_jurkat.py'
ZenService.Xtra.System.ExecuteExternalProgram(script, filename) |
#!/usr/bin/python3
import ev3dev.ev3 as ev3
import time
irSensor = ev3.InfraredSensor()
print("ready")
def runLoop():
while True:
print(irSensor.proximity)
time.sleep(1)
runLoop()
|
import os
import torch
from torch.utils.data import Dataset
from skimage.io import imread
import numpy as np
import torchvision.transforms as tvt
cityscapes_classes = np.array([
[ 0, 0, 0],#static
[111, 74, 0],#dynamic
[ 81, 0, 81],#ground
[128, 64,128],#road
[244, 35,232],#sidewalk
[250,170,160],#parking
[230,150,140],#rail track
[ 70, 70, 70],#building
[102,102,156],#wall
[190,153,153],#fence
[180,165,180],#guard rail
[150,100,100],#bridge
[150,120, 90],#tunnel
[153,153,153],#pole
[153,153,153],#polegroup
[250,170, 30],#traffic light
[220,220, 0],#traffic sign
[107,142, 35],#vegetation
[152,251,152],#terrain
[ 70,130,180],#sky
[220, 20, 60],#person
[255, 0, 0],#rider
[ 0, 0,142],#car
[ 0, 0, 70],#truck
[ 0, 60,100],#bus
[ 0, 0, 90],#caravan
[ 0, 0,110],#trailer
[ 0, 80,100],#train
[ 0, 0,230],#motorcycle
[119, 11, 32],#bicycle
[ 0, 0,142]#license plate,
])
maps_classes = np.array([
[255,255,251],
[203,222,174],
[171,208,251],
[231,229,224],
[243,239,235],
[255,150,63]
])
facades_classes = np.array([
[255,154,47],
[194,0,47],
[0,56,248],
[252,766,30],
[0,247,238],
[0,129,249],
[101,255,160],
[197,2533,90],
[0,24,215]
])
classes_city = np.array([
[128,64,123], # violet route
[0,15,137], # bleu voiture
[222,218,63], # Jaune Panneau
[253,162,59], # Orange Feux de signalisation
[0,0,0], # noir panneau
[72,72,72], #gris batiment
[151,251,157], # vert pelouse
[107,141,53], # vert arbre
[251,24,226], #rose trottoir
[62,130,176], #bleu ciel
[83,0,79], #violet sortie de route
[188,152,152], #beige muret ?
[253,164,168], #beige parking
[149,100,102], #beige panneau route
[224,0,62], #rouge personne
[120,5,43], #rouge scooter, velo
[0,62,98], #Turquoise bus
[0,19,161], #bleu trotinette
[254,0,24], #rouge enfant
[115,75,23], #kaki parasol
[167,142,165], #gris poteau
[0,7,70] #bleu camion
])
train_transforms = tvt.Compose([
tvt.ToPILImage(),
tvt.Resize((256,256)),
tvt.ToTensor(),
tvt.Normalize([0.485,0.456,0.406],[1.,1.,1.])]
)
mask_transforms = tvt.Compose([
tvt.ToPILImage(),
tvt.Resize((256,256)),
])
class city(Dataset):
def __init__(
self,
mode,
classes=classes_city,
transforms=train_transforms,
mask_transforms=mask_transforms,
folder=r"./datasets/cityscapes/",
use_cuda=True
):
super(city, self).__init__()
self.path = os.path.join(folder, mode)
self.ims = sorted(
list(
map(
lambda file:os.path.join(self.path, file),os.listdir(self.path)
)
)
)
self.transforms = transforms
self.mask_transform = mask_transforms
self.use_cuda = use_cuda
self.classes = classes
def __len__(self):
return len(self.ims)
def applyCuda(self,x):
return x.cuda() if self.use_cuda else x
def __getitem__(self, idx):
im, mask = self.get_data(idx)
if self.transforms is not None:
im = self.transforms(im)
mask= np.array(self.mask_transform(mask))
mask = self.make_mask(mask)
return {'image':im, 'target':mask}
def get_data(self, idx):
I_M = imread(self.ims[idx])
h,w,c= I_M.shape
I = I_M[:,:int(w/2)]
M = I_M[:,int(w/2):]
return I,M
def make_mask(self, mask):
def find_cluster(vec, classes=self.classes):
rscores = np.zeros((256*256, len(classes)))
for i in range(len(classes)):
rscores[:, i] = np.linalg.norm(vec - np.repeat(classes[i].reshape(1, 3), 256 * 256, axis=0), axis=1)
vc = np.argmin(rscores, axis=1)
return vc
def find_cluster_torch(vec, classes=self.classes):
rscores = torch.zeros((256 * 256, len(classes)))
for i in range(len(classes)):
rscores[:, i] = torch.norm(
torch.cuda.FloatTensor(vec.reshape(-1, 3)) - torch.cuda.FloatTensor(
classes[i].reshape(1, 3)).repeat(256 * 256, 1),
dim=1
)
vc = rscores.argmin(dim=1)
return vc
clusters = find_cluster_torch(mask.reshape(-1,3))
mask = clusters.view(256,256).type(torch.LongTensor)
return mask
|
# %load BJ_humidity_lightgbm.py
# %load BJ_humidity_lightgbm.py
#!/usr/bin/env python3
"""
Created on Thu Apr 12 11:03:29 2018
@author: dedekinds
"""
import os
import pandas as pd
import random
import numpy as np
import matplotlib.pyplot as plt
import lightgbm as lgb
from sklearn.externals import joblib
%matplotlib inline
per = 0.1
Type = 'humidity'
#'temperautre'
#'windspeed'
#pressure
location = 'BJ'
#'LD'
f = open('/home/dedekinds/'+location+'_'+Type+'_2_step.csv')
df = pd.read_csv(f)
data = df.values
raw = int(data.shape[0]*(1-per))
train_x = data[:raw,:-1]
train_y = data[:raw,-1]
test_x = data[raw:,:-1]
test_y = data[raw:,-1]
gbm = lgb.LGBMRegressor(objective='regression',
num_leaves=31,
learning_rate=0.5,
n_estimators=2000)
gbm.fit(train_x, train_y,
eval_set=[(test_x, test_y)],
eval_metric='l2',
early_stopping_rounds=10)
#保存模型
model_dir = "lightgbm_model"
model_name = Type+'_lightgbm_'+location+'.txt'
if not os.path.exists(model_dir):
os.mkdir(model_dir)
joblib.dump(gbm, os.path.join(model_dir, model_name))
##读取模型 并测试demo
#
#import os
#import pandas as pd
#import random
#import numpy as np
#import matplotlib.pyplot as plt
#import lightgbm as lgb
#from sklearn.externals import joblib
#%matplotlib inline
#
#Type = 'humidity'
# #'temperautre'
# #'windspeed'
# #pressure
#location = 'BJ'
# #'LD'
#
#f = open('/home/dedekinds/'+location+'_'+Type+'_new_month_2_step.csv')
#df = pd.read_csv(f)
#data = df.values
#
#model_dir = "lightgbm_model"
#model_name = Type+'_lightgbm_'+location+'.txt'
#gbm = joblib.load(os.path.join(model_dir, model_name))
#
#test_x = data[:,:-1]
#test_y = data[:,-1]
#test_predict = gbm.predict(test_x, num_iteration=gbm.best_iteration_)
#
#acc=np.average(np.abs(test_predict -test_y[:len(test_predict )])) #偏差
#print(acc)
##以折线图表示结果
#plt.figure()
#plt.plot(list(range(len(test_predict ))), test_predict , color='b')
#plt.plot(list(range(len(test_y))), test_y, color='r')
#plt.show() |
import ez_setup
ez_setup.use_setuptools()
from setuptools import setup, find_packages
setup(
name = "greenlet", # note that we fake the name here! this is artififical after all...
version = "0.1", # but what version should it be?
packages = find_packages()
)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-05 20:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('redcrossmain', '0002_auto_20160705_2345'),
]
operations = [
migrations.CreateModel(
name='Newsletter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50, null=True)),
('file', models.FileField(upload_to='C:\\Programming\\Django\\redcross\\redcross_media\\uploads\\newsletter')),
('date', models.DateTimeField(auto_now=True)),
],
),
migrations.AlterField(
model_name='team_images',
name='images',
field=models.FileField(null=True, upload_to='C:\\Programming\\Django\\redcross\\redcross_media\\uploads\\temporary'),
),
migrations.AlterField(
model_name='top_slider',
name='images',
field=models.FileField(null=True, upload_to='C:\\Programming\\Django\\redcross\\redcross_media\\uploads\\temporary'),
),
]
|
#
# File: results_ECC.py
# Author: Alexander Craig
# Project: An Analysis of the Security of RSA & Elliptic Curve Cryptography
# Supervisor: Maximilien Gadouleau
# Version: 2.0
# Date: 19/03/19
#
# Functionality: gathers results for RSA in given range
#
# CLI: python3 results_ECC.py -h (to see possible flags)
#
############ IMPORTS #########
# needed for pydocs to correctly find everything
import sys
sys.path.append('Programming/')
# to make it backwards compatable with Python < 3.6
try:
import secrets
except ImportError:
from utils import secrets
import argparse
import math
import threading
import matplotlib.pyplot as plt # for drawing graphs
from ECC import *
from utils.plots import *
############ GLOBAL VARIABLES #########
resCount_C = {} # stores results as dictionary
resTime_C = {} # stores results as dictionary
resSpace_C = {} # stores results as dictionary
resCount_W = {} # stores results as dictionary
resTime_W = {} # stores results as dictionary
resSpace_W = {} # stores results as dictionary
saveFile = ""
############ FUNCTIONS #########
def saveResults(saveFile):
""" saves results to a csv file """
with open(saveFile + "_C.csv", "w+") as file: # open file
keys = sorted(list(resCount_C.keys())) # important that in sorted order
for key in keys: # loop over keys
out = (str(key) + "," + str(resCount_C[key][0]) + "," + # write out info
str(resTime_C[key][0]) + "," +
str(resSpace_C[key][0]) + "," +
str(resCount_C[key][1]) + "\n")
file.write(out) # write to file
with open(saveFile + "_W.csv", "w+") as file: # open file
keys = sorted(list(resCount_W.keys())) # important that in sorted order
for key in keys: # loop over keys
out = (str(key) + "," + str(resCount_W[key][0]) + "," + # write out info
str(resTime_W[key][0]) + "," +
str(resSpace_W[key][0]) + "," +
str(resCount_W[key][1]) + "\n")
file.write(out) # write to file
def getResults(solver, minBit, maxBit, saveFile, noResults):
""" saves a results csv, given a solver, result index and bit range """
for bit in range(minBit, maxBit + 1):
for i in range(noResults):
keys = generate_ECC.KeyGen(bit, False) # initialise keys
keys.generateCurve() # get curve paramaters
keys.generateKeys() # generate keys
solver.setCurve(keys.curve) # setup solver
solver.setQ(keys.Q)
solver.setG(keys.G)
solver.solve() # solve problem
k = int(math.ceil(math.log(keys.p, 2))) # get accurate bit length
if solver.k == keys.k: # if we got it right
resTime = resTime_C # update correct dictionaries
resCount = resCount_C
resSpace = resSpace_C
else:
resTime = resTime_W # else update wrong dictionaries
resCount = resCount_W
resSpace = resSpace_W
if k not in resTime: # if we've not yet had a result for k
resTime[k] = [solver.time, 1] # then set
resSpace[k] = [solver.space, 1] # then set
resCount[k] = [solver.count, 1]
else:
oldT, oldC = resTime[k] # keeps a running average
newC = oldC + 1 # increment count
newT = ((oldT * oldC) + solver.time) / newC # get new averagae
resTime[k] = [newT, newC] # without storing all variables
oldS, oldC = resSpace[k] # keeps a running average
newS = ((oldS * oldC) + solver.space) / newC
resSpace[k] = [newS, newC] # without storing all variables
oldCount, oldC = resCount[k] # keeps a running average
newCount = ((oldCount * oldC) + solver.count) / newC
resCount[k] = [newCount, newC] # without storing all variables
if i % 10 == 0:
saveResults(saveFile) # every ten results save again
def results(algo = 0, minBit = 10, maxBit = 18, saveFile = "results", noResults = 100):
""" generates results for a given algorithm """
solver = None
if algo == 0:
solver = brute_force.BFSolver(v = False)
elif algo == 1:
solver = baby_step.BGSolver(v = False)
elif algo == 2:
solver = pollard_rho.PRSolver(v = False)
elif algo == 3:
solver = pollard_lambda.PLSolver(v = False)
elif algo == 4:
solver = pohlig_hellman.PHSolver(v = False)
elif algo == 5:
solver = mov_attack.MOVSolver(v = False)
getResults(solver, minBit, maxBit, saveFile, noResults)
############ COMMAND LINE INTERFACE #########
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-l", "--minbit", help="minimum bit size to test from", type=int, default=10)
parser.add_argument("-u", "--maxbit", help="maximum bit size to test", type=int, default=20)
parser.add_argument("-s", "--savefile", help="name of file to save results to", type=str, default="results.csv")
parser.add_argument("-n", "--noresults", help="number of results to take for each bit", type=int, default=100)
parser.add_argument("-bf", "--bruteforce", help="turns bruteforce decryption on", action="store_true")
parser.add_argument("-bs", "--baby_step", help="turns baby_step-giant_step decryption on", action="store_true")
parser.add_argument("-pr", "--pollard_rho", help="turns pollard_rho decryption on", action="store_true")
parser.add_argument("-pl", "--pollard_lambda", help="turns pollard_lambda decryption on", action="store_true")
parser.add_argument("-ph", "--pohlig_hellman", help="turns pohlig_hellman decryption on", action="store_true")
parser.add_argument("-ma", "--mov_attack", help="turns mov_attack decryption on", action="store_true")
args = parser.parse_args()
algo = 0
if args.baby_step:
algo = 1
elif args.pollard_rho:
algo = 2
elif args.pollard_lambda:
algo = 3
elif args.pohlig_hellman:
algo = 4
elif args.mov_attack:
algo = 5
results(algo, args.minbit, args.maxbit, args.savefile, args.noresults)
|
from prepareImages import prepareImages
from facial_landmarks import findFace
import os, os.path
from imutils.face_utils import FaceAligner
from imutils.face_utils import rect_to_bb
import argparse
import imutils
import dlib
def main():
dir_path_list = []
new_dir_path_list = []
PROCCES_IMAGES = False
DETECT_FACES = False
prepareImages('gt_db/ja/')
if PROCCES_IMAGES:
for dir in os.listdir('gt_db/'):
extension = os.path.splitext(dir)[1]
dir_path_list.append(os.path.join('gt_db/', dir))
for path in dir_path_list:
print(path + '/')
prepareImages(path + '/')
if DETECT_FACES:
for dir in os.listdir('edited_images/gt_db/'):
extension = os.path.splitext(dir)[1]
new_dir_path_list.append(os.path.join('gt_db/', dir))
print(new_dir_path_list)
for dir in new_dir_path_list:
for file in os.listdir(dir):
file_path = 'edited_images/' + dir + '/' + file;
print(file_path)
findFace(file_path)
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
import os
import argparse
import sqlite3
import xml.etree.cElementTree as ET
import math
parser = argparse.ArgumentParser(description='Testing argparser')
parser.add_argument('-EPSG', type=int, help='UTM EPSG for data', required=True)
parser.add_argument('-i', help='input file destination', required=True)
parser.add_argument('-o', help='output db-file destination')
args = parser.parse_args()
EPSG = args.EPSG
infile = args.i
outfile = args.o
if not os.path.exists(infile):
print "Coudn't find input file"
raise SystemExit
if outfile is None:
outfile = infile + '.sqlite'
LinkType = {}
LinkType['trunk'] = 'FREEWAY'
LinkType['motorway'] = 'EXPRESSWAY' # у нас нет таких дорог
LinkType['primary'] = 'MAJOR'
LinkType['secondary'] = 'MINOR'
LinkType['track'] = 'LOCAL'
LinkType['tertiary'] = 'LOCAL'
LinkType['living_street'] = 'LOCAL'
LinkType['residential'] = 'LOCAL'
LinkType['service'] = 'LOCAL'
LinkType['services'] = 'LOCAL' # почему то не service
LinkType['primary_link'] = 'RAMP'
LinkType['motorway_link'] = 'RAMP' # у нас нет таких дорог
LinkType['tertiary_link'] = 'RAMP'
LinkType['trunk_link'] = 'RAMP'
LinkType['secondary_link'] = 'RAMP'
LinkType['unclassified'] = 'OTHER'
LinkType['road'] = 'OTHER'
LinkType['footway'] = 'WALKWAY'
LinkType['pedestrian'] = 'WALKWAY'
LinkType['steps'] = 'WALKWAY'
LinkType['path'] = 'WALKWAY'
LinkType['bridleway'] = 'WALKWAY'
# LinkType['railway'] = 'HEAVYRAIL'#у нас нет таких дорог почему то
# еще есть: 'construction', - строящиеся дороги,
# 'platform' - площадки для посадки людей
print 'Parsing OSM data for nodes and ways ...'
Nodes = {}
Ways = {}
# -------------------------------АС-------------------
AC = {}
AcWay = {}
# AmenityType = ['bank','library']
# ShopType = ['hairdresser','travel_agency', 'convenience']
# ----------------------------------------------------
# ----------------------------------------------------
# print data in csv
def incsv(Nodes, Links, Ways):
"""
print main data in csv
debug procedure for manage troubles with db
"""
print 'Creating the text-base ...'
with open(outfile + ".nodes.txt", "w") as out:
out.write("%s\t%s\t%s\n" % ('node', 'x', 'y'))
for Node in Nodes:
Lon, Lat, Count, Id = Nodes[Node]
out.write("%d\t%f\t%f\n" % (Id, float(Lon), float(Lat)))
out.close()
with open(outfile + ".links.txt", "w") as out:
out.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % ('link', 'name', 'node_a', 'node_b', 'type', 'lanes_ab', 'lanes_ba'))
for Link in Links:
NodeA, NodeB, Group, Type, GeoString = Links[Link]
#print 'Link=',Link,'NodeA=',NodeA,'NodeId=',Nodes[NodeA][3]
LanesAB = LanesBA = 1
Name = ''
SpeedAB = SpeedBA = 60 / 1.609
if 'LANES' in Ways[Group]:
try:
LanesAB = int(Ways[Group]['LANES'])
if LanesAB != 1:
LanesAB //= 2
LanesBA //= 2
except ValueError:
print 'Please, check data for way with OSM id ', Group, ' you have something strange' \
' in lanes: ', Ways[Group]['LANES']
LanesAB = int(Ways[Group]['LANES'].split(';', 1)[0])
if LanesAB != 1:
LanesAB //= 2
LanesBA //= 2
if 'ONEWAY' in Ways[Group]:
LanesBA = 0
CapBA = 0
if 'SPEED' in Ways[Group]:
try:
SpeedAB = SpeedBA = float(Ways[Group]['SPEED']) / 1.609
except:
print 'Please, check data for way with OSM id ', Group, ' you have something strange' \
' in speed: ', Ways[Group]['SPEED']
if ('ONEWAY' in Ways[Group]) & ('LANES' in Ways[Group]):
try:
LanesAB = int(Ways[Group]['LANES'])
if LanesAB != 1:
LanesAB //= 2
LanesBA //= 2
except ValueError:
print 'Please, check data for way with OSM id ', Group, ' you have something strange' \
' in lanes: ', Ways[Group]['LANES']
LanesAB = int(Ways[Group]['LANES'].split(';', 1)[0])
if LanesAB != 1:
LanesAB //= 2
LanesBA //= 2
if 'NAME' in Ways[Group]:
Name = Ways[Group]['NAME'].encode('utf-8')
if Type in LinkType.keys():
out.write("%d\t%s\t%d\t%d\t%s\t%d\t%d\n" % (
Link, Name, int(NodeA), int(NodeB), LinkType[Type], LanesAB, LanesBA))
out.close()
def indb(Nodes, Links, Ways, AllNodes, AC, AcWay):
"""
import data in database
"""
print 'Creating the database ...'
if os.path.exists(outfile):
os.remove(outfile)
dbcon = sqlite3.connect(outfile)
dbcon.execute('pragma journal_mode = off;')
dbcon.execute('pragma synchronous = 0;')
dbcon.enable_load_extension(1)
dbcon.isolation_level = None
dbcur = dbcon.cursor()
dbcur.execute("select load_extension('mod_spatialite');")
dbcur.execute("begin ;")
dbcur.execute("select InitSpatialMetadata();")
dbcur.execute("commit ;")
print 'Creating the Node table ...'
dbcur.execute("""
CREATE TABLE "Node" (
"node" INTEGER NOT NULL PRIMARY KEY,
"osmid" BIGINT NOT NULL,
"x" REAL DEFAULT 0,
"y" REAL DEFAULT 0,
"z" REAL DEFAULT 0);
""")
dbcur.execute("select AddGeometryColumn ( 'Node', 'GEO', ?, 'POINT', 2 );", [4326])
dbcur.execute("select CreateMbrCache ( 'Node', 'GEO' );")
print 'Creating the Link table ...'
dbcur.execute("""
CREATE TABLE "Link" (
"link" INTEGER NOT NULL PRIMARY KEY,
"osmid" BIGINT NOT NULL,
"name" TEXT DEFAULT '',
"node_a" INTEGER NOT NULL,
"node_b" INTEGER NOT NULL,
"length" REAL DEFAULT 0,
"setback_a" REAL DEFAULT 0,
"setback_b" REAL DEFAULT 0,
"bearing_a" INTEGER DEFAULT 0 NOT NULL,
"bearing_b" INTEGER DEFAULT 0 NOT NULL,
"type" TEXT NOT NULL,
"use" TEXT DEFAULT '' NOT NULL,
"lanes_ab" INTEGER DEFAULT 0 NOT NULL,
"speed_ab" REAL DEFAULT 0,
"fspd_ab" REAL DEFAULT 0,
"cap_ab" INTEGER DEFAULT 0 NOT NULL,
"lanes_ba" INTEGER DEFAULT 0 NOT NULL,
"speed_ba" REAL DEFAULT 0,
"fspd_ba" REAL DEFAULT 0,
"cap_ba" INTEGER DEFAULT 0 NOT NULL,
"left_ab" INTEGER DEFAULT 0 NOT NULL,
"right_ab" INTEGER DEFAULT 0 NOT NULL,
"left_ba" INTEGER DEFAULT 0 NOT NULL,
"right_ba" INTEGER DEFAULT 0 NOT NULL);
""")
dbcur.execute("select AddGeometryColumn ( 'LINK', 'GEO', ?, 'LINESTRING', 2 );", [4326])
dbcur.execute("select CreateMbrCache ( 'LINK', 'GEO' );")
print 'Creating the AC Nodes table ...'
dbcur.execute("""
CREATE TABLE "AcNodes" (
"id" INTEGER NOT NULL PRIMARY KEY,
"osmid" BIGINT NOT NULL,
"node" INTEGER NOT NULL,
"link" INTEGER NOT NULL,
"offset" REAL DEFAULT 0,
"layer" TEXT NOT NULL,
"easting" REAL DEFAULT 0,
"northing" REAL DEFAULT 0,
"elevation" REAL DEFAULT 0,
"notes" TEXT NOT NULL,
"tag" TEXT NOT NULL,
"source" TEXT NOT NULL);
""")
dbcur.execute("select AddGeometryColumn ( 'AcNodes', 'GEO', ?, 'POINT', 2 );", [4326])
dbcur.execute("select CreateMbrCache ( 'AcNodes', 'GEO' );")
print 'Creating the AC Links table ...'
dbcur.execute("""
CREATE TABLE "AcLinks" (
"id" INTEGER NOT NULL PRIMARY KEY,
"osmid" BIGINT NOT NULL,
"type" TEXT NOT NULL,
"tag" TEXT NOT NULL);
""")
dbcur.execute("select AddGeometryColumn ( 'AcLinks', 'GEO', ?, 'POLYGON', 'XY' );", [4326])
dbcur.execute("select CreateMbrCache ( 'AcLinks', 'GEO' );")
print 'Loading the Node table ...'
dbcon.commit()
dbcur.execute("begin")
for Node in NewNodes:
Lon, Lat, Count, GId = NewNodes[Node]
GeoString = 'POINT(' + str(Lon) + ' ' + str(Lat) + ')'
SqlString = 'insert into Node values ( ?, ?, ?, ?, 0.0, Transform ( GeomFromText ( ?, 4326 ), 4326 ) );'
dbcur.execute(SqlString, [GId, Node, Lon, Lat, GeoString])
dbcur.execute("commit")
print 'Updating X and Y coordinates of the Node table ...'
SqlString = "update Node set X = X ( ST_Transform(GEO,?) ), Y = Y ( ST_Transform(GEO,?) );"
dbcur.execute(SqlString, [EPSG, EPSG])
dbcon.commit()
print 'Loading the Link table ...'
Counter = 0
for Link in NewLinks:
NodeA, NodeB, Group, Type, GeoString = NewLinks[Link]
LanesAB = LanesBA = 1
CapAB = CapBA = 500
Name = ''
SpeedAB = SpeedBA = 60 / 1.609
if 'LANES' in Ways[Group]:
try:
LanesAB = int(Ways[Group]['LANES'])
if LanesAB != 1:
LanesAB //= 2
LanesBA //= 2
except ValueError:
print 'Please, check data for way with OSM id ', Group, ' you have something strange' \
' in lanes: ', Ways[Group]['LANES']
LanesAB = int(Ways[Group]['LANES'].split(';', 1)[0])
if LanesAB != 1:
LanesAB //= 2
LanesBA //= 2
if 'SPEED' in Ways[Group]:
try:
SpeedAB = SpeedBA = float(Ways[Group]['SPEED']) / 1.609
except:
print 'Please, check data for way with OSM id ', Group, ' you have something strange' \
' in speed: ', Ways[Group]['SPEED']
if 'ONEWAY' in Ways[Group]:
LanesBA = 0
CapBA = 0
SpeedBA = 0
if (LanesAB != 1) & ('LANES' in Ways[Group]):
try:
LanesAB = int(Ways[Group]['LANES'])
except ValueError:
print 'Please, check data for way with OSM id ', Group, ' you have something strange' \
' in lanes: ', Ways[Group]['LANES']
LanesAB = int(Ways[Group]['LANES'].split(';', 1)[0])
if 'NAME' in Ways[Group]:
Name = Ways[Group]['NAME']
SqlString = "insert into LINK values ( ?, ?, ?, ?, ?, 0.0, 0.0, 0.0, 0, 0, ?, 'ANY', ?, ?, 25.0," \
" ?, ?, ?, 25.0, ?, 0, 0, 0, 0, Transform ( GeomFromText ( ?, 4326 ), 4326 ) );"
dbcur.execute(SqlString,
[Link, Group, Name, Nodes[NodeA][3], Nodes[NodeB][3], LinkType[Type], LanesAB, SpeedAB, CapAB,
LanesBA, SpeedBA, CapBA, GeoString])
Counter += 1
dbcon.commit()
print 'Updating the Length fields in the Link table ...'
dbcur.execute("update LINK set LENGTH = GLength ( GEO, 1 );")
dbcon.commit()
#AC locations
print 'Loading the AcNodes table ...'
for node in AC:
Lon, Lat, Tag, Type, GId = AC[node]
GeoString = 'POINT(' + str(Lon) + ' ' + str(Lat) + ')'
SqlString = "insert into AcNodes values ( ?, ?, 0, 0, 0.0, 'AUTO/BUS/WALK', ?, ?, 0.0, ?, ?, 'POI', Transform ( GeomFromText ( ?, 4326 ), 4326 ) );"
dbcur.execute(SqlString, [GId, node, Lon, Lat, Type, Tag, GeoString])
print 'Loading the AcLinks table ...'
for link in AcWay:
GeoString = 'POLYGON(('
for Node in AcWay[link]['NODES']:
Lon, Lat = AllNodes[Node][0:2]
GeoString += str(Lon) + ' ' + str(Lat) + ','
GeoString = GeoString[:-1] + '))'
Type = AcWay[link]['Type']
Tag = AcWay[link]['Tag']
GId = AcWay[link]['Id']
SqlString = "insert into AcLinks values ( ?, ?, ?, ?, Transform ( GeomFromText ( ?, 4326 ), 4326 ) );"
try:
dbcur.execute(SqlString, [GId, link, Type, Tag, GeoString])
except:
print 'Error with:', link, Type, Tag, GeoString
dbcon.commit()
print 'Updating AcNodes from centroids of AcLinks'
# dbcur.execute("SELECT max(id) FROM AcNodes;")
#for row in dbcur.fetchall():
# MAX_N = int(row[0])
dbcur.execute("SELECT id, osmid, type, tag, ST_Centroid(GEO) FROM AcLinks;")
for row in dbcur.fetchall():
#MAX_N += 1
GId = row[0]
OSMId = row[1]
Type = row[2]
Tag = row[3]
GeoString = row[4]
SqlString = "insert into AcNodes values ( ?, ?, 0, 0, 0.0, 'AUTO/BUS/WALK', 0.0, 0.0, 0.0, ?, ?, 'Centroid', ? );"
dbcur.execute(SqlString, [GId, OSMId, Type, Tag, GeoString])
dbcon.commit()
print 'Updating easting and northing coordinates of the AcNode tables ...'
SqlString = "update AcNodes set easting = X ( ST_Transform(GEO,?) ), northing = Y ( ST_Transform(GEO,?) );"
dbcur.execute(SqlString, [EPSG, EPSG])
dbcon.commit()
print 'Finding the nearest neighbourhood of AC Nodes and Links'
dbcur.execute("begin ;")
query = 'select t1.id, t2.node_a, t2.link, Min(Distance(t1.GEO, t2.GEO)) from AcNodes as t1, Link as t2 group by t1.id;'
dbcur.execute(query)
result = dbcur.fetchall()
dbcur.execute("commit ;")
dbcur.execute("begin ;")
for row in result:
SqlString = 'UPDATE AcNodes SET node = ?, link = ? WHERE id = ? ;'
dbcur.execute(SqlString, [row[1], row[2], row[0]])
dbcur.execute("commit")
print 'Updating the offset of AC Nodes'
query = 'select t1.node, t1.x, t1.y, t2.ROWID, t2.node, t2.easting, t2.northing from AcNodes as t2, Node as t1 where t1.node=t2.node order by t1.node;'
dbcur.execute(query)
result = dbcur.fetchall()
for row in result:
SqlString = 'UPDATE AcNodes SET offset = ? WHERE ROWID = ? ;'
dbcur.execute(SqlString, [math.sqrt((row[5] - row[1]) ** 2 + (row[6] - row[2]) ** 2), row[3]])
#print results
print 'Obtain TRANSIMS nodes file'
SqlString = "Select node, x, y, z from Node;"
dbcur.execute(SqlString)
dbcon.commit()
with open(outfile + ".nodes.txt", "w") as out:
out.write("%s\t%s\t%s\t%s\n" % ('node', 'x', 'y', 'z'))
for row in dbcur.fetchall():
out.write("%d\t%f\t%f\t%f\n" % (row[0], row[1], row[2], row[3]))
out.close()
print 'Obtain TRANSIMS links file'
SqlString = "Select link, name, node_a, node_b, length, type, lanes_ab, speed_ab, cap_ab, " \
"lanes_ba, speed_ba, cap_ba, use from Link;"
dbcur.execute(SqlString)
dbcon.commit()
with open(outfile + ".links.txt", "w") as out:
out.write(
"{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format('LINK', 'STREET', 'ANODE', 'BNODE', 'LENGTH',
'type', 'lanes_ab', 'speed_ab', 'cap_ab',
'lanes_ba', 'speed_ba', 'cap_ba', 'use'))
for row in dbcur.fetchall():
out.write(
"{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format(row[0], row[1].encode('utf-8'), row[2],
row[3], row[4], row[5], row[6], row[7],
row[8], row[9], row[10], row[11], row[12]))
out.close()
print 'Obtain TRANSIMS AL file'
SqlString = "Select id, node, link, layer, offset, easting, northing, elevation, tag, notes from AcNodes;"
dbcur.execute(SqlString)
dbcon.commit()
with open(outfile + ".al.txt", "w") as out:
out.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (
'id', 'node', 'link', 'layer', 'offset', 'easting', 'northing', 'elevation', 'tag', 'notes'))
for row in dbcur.fetchall():
out.write("%d\t%d\t%d\t%s\t%f\t%f\t%f\t%f\t%s\t%s\n" % (
row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9]))
out.close()
# ----------------------------------------------------
# parsing procedure
# ----------------------------------------------------
Counter = 0
NodeErrors = 0
# стандартная штука из мануала
# http://effbot.org/zone/element-iterparse.htm
Context = ET.iterparse(infile)
Conext = iter(Context)
Event, Root = Context.next()
GlobalId = 0
# узлы, третий параметр - чтобы понять скольким линкам принадлежит узел
for Event, Child in Context:
GlobalId +=1
if Child.tag == 'node':
# Subs = [Sub.tag for Sub in Child]
Id = int(Child.attrib['id'])
Lon = Child.attrib['lon']
Lat = Child.attrib['lat']
Nodes[Id] = [Lon, Lat, 0, GlobalId] # 3 параметр для УДС
for Sub in Child:
if Sub.tag == 'tag':
# if Sub.attrib['k'] == 'amenity' and Sub.attrib['v'] in AmenityType:
if Sub.attrib['k'] == 'amenity':
AC[Id] = [Lon, Lat, 'Amenity', Sub.attrib['v'], GlobalId]
# if Sub.attrib['k'] == 'shop' and Sub.attrib['v'] in ShopType:
if Sub.attrib['k'] == 'shop':
AC[Id] = [Lon, Lat, 'Shop', Sub.attrib['v'], GlobalId]
if Sub.attrib['k'] == 'leisure':
AC[Id] = [Lon, Lat, 'Leisure', Sub.attrib['v'], GlobalId]
if Sub.attrib['k'] == 'office':
AC[Id] = [Lon, Lat, 'Office', Sub.attrib['v'], GlobalId]
if Sub.attrib['k'] == 'shop':
AC[Id] = [Lon, Lat, 'Shop', Sub.attrib['v'], GlobalId]
Child.clear()
Subs = []
# линии - пока еще не линки графа, а просто osm way
if Child.tag == 'way':
Id = int(Child.attrib['id'])
Ways[Id] = {}
Ways[Id]['NODES'] = []
for Sub in Child:
if Sub.tag == 'nd':
Node = int(Sub.attrib['ref'])
try:
Nodes[Node][2] += 1 # отмечаем ноды в линках.
Ways[Id]['NODES'].append(Node)
except:
NodeErrors += 1
if Sub.tag == 'tag' and Sub.attrib['k'] != 'highway':
print 'ERROR in WayID:', Id, ' in ', Node
Ways[Id] = {}
break
if Sub.tag == 'tag':
if Sub.attrib['k'] == 'highway' and Sub.attrib['v'] in LinkType:
Ways[Id]['TYPE'] = Sub.attrib['v']
if Sub.attrib['k'] == 'oneway':
Ways[Id]['ONEWAY'] = Sub.attrib['v'] # может быть no вместо null
if Sub.attrib['k'] == 'name':
Ways[Id]['NAME'] = Sub.attrib['v']
if Sub.attrib['k'] == 'lanes':
Ways[Id]['LANES'] = Sub.attrib['v']
if Sub.attrib['k'] == 'maxspeed':
Ways[Id]['SPEED'] = Sub.attrib['v']
# if Sub.attrib['k'] == 'shop' and Sub.attrib['v'] in ShopType:
if Sub.attrib['k'] == 'shop':
AcWay[Id] = {}
AcWay[Id]['NODES'] = Ways[Id]['NODES']
AcWay[Id]['Type'] = Sub.attrib['v']
AcWay[Id]['Tag'] = 'Shop'
AcWay[Id]['Id'] = GlobalId
# if Sub.attrib['k'] == 'amenity' and Sub.attrib['v'] in AmenityType:
if Sub.attrib['k'] == 'amenity':
AcWay[Id] = {}
AcWay[Id]['NODES'] = Ways[Id]['NODES']
AcWay[Id]['Type'] = Sub.attrib['v']
AcWay[Id]['Tag'] = 'Amenity'
AcWay[Id]['Id'] = GlobalId
if Sub.attrib['k'] == 'leisure':
AcWay[Id] = {}
AcWay[Id]['NODES'] = Ways[Id]['NODES']
AcWay[Id]['Type'] = Sub.attrib['v']
AcWay[Id]['Tag'] = 'Leisure'
AcWay[Id]['Id'] = GlobalId
if Sub.attrib['k'] == 'office':
AcWay[Id] = {}
AcWay[Id]['NODES'] = Ways[Id]['NODES']
AcWay[Id]['Type'] = Sub.attrib['v']
AcWay[Id]['Tag'] = 'Office'
AcWay[Id]['Id'] = GlobalId
if 'TYPE' not in Ways[Id]:
del Ways[Id]
Child.clear()
Root.clear()
print 'Removing unused nodes from the network ...'
UnusedNodes = []
for Node in Nodes:
Lon, Lat, Count = Nodes[Node][0:3]
if Count == 0:
UnusedNodes.append(Node)
for Node in UnusedNodes:
del Nodes[Node]
del UnusedNodes
print 'Splitting ways into links as needed ...'
Groups = {}
Links = {}
NewNodes = {}
#LinkId = 10001 # просто случайное число)) нужно аналогично придумать для нодов, а то не влазим в диапазон Int
for Id in Ways:
ShapeCount = len(Ways[Id]['NODES'])
if ShapeCount < 2: # одна нода в way
continue
SegPos = 0
Links[GlobalId] = {}
Links[GlobalId]['NODES'] = []
Links[GlobalId]['TYPE'] = Ways[Id]['TYPE']
# настраиваем перелинковку Группа-Way-Link
Links[GlobalId]['GROUP'] = Id
Groups[Id] = [GlobalId]
# здесь разбиваем way на отдельные сегменты.
for Index in range(ShapeCount):
Node = Ways[Id]['NODES'][Index]
Lon, Lat, Count, GId = Nodes[Node][0:4]
Links[GlobalId]['NODES'].append(Node) # каждую ноду из вей добавляем в линкс
if Index == 0 or Index == ShapeCount - 1: # крайние точки
if Node not in NewNodes:
NewNodes[Node] = [Lon, Lat, 0, GId] # пропускаем транзитные ноды (не добавляем в nodes)
NewNodes[Node][2] += 1
elif Count > 1: # если нода в нескольких way - создаем новый way
if SegPos > 0 and Index < ShapeCount - 1:
SegPos = 0
# создаем новый линк
GlobalId += 1
Links[GlobalId] = {}
Links[GlobalId]['NODES'] = [Node]
Links[GlobalId]['TYPE'] = Ways[Id]['TYPE']
Links[GlobalId]['GROUP'] = Id
Groups[Id].append(GlobalId)
if Node not in NewNodes:
NewNodes[Node] = [Lon, Lat, 0, GId]
NewNodes[Node][2] += 1
SegPos += 1
GlobalId += 1
print 'Checking the sanity of Node and Link relationships ...'
for Link in Links:
Count = len(Links[Link]['NODES'])
if Links[Link]['NODES'][0] not in NewNodes:
print 'A-Node not found!', Link, Links[Link]['NODES'][0]
elif Links[Link]['NODES'][Count - 1] not in NewNodes:
print 'B-Node not found!', Link, Links[Link]['NODES'][-1]
for Id in Links[Link]['NODES'][1:Count - 1]:
if Id in NewNodes:
print 'Shape point is also a node!', Link, Id
# финальная обработка
print 'Creating the final Link data ...'
NewLinks = {}
for Link in Links:
GeoString = 'LINESTRING('
NodeA = Links[Link]['NODES'][0]
NodeB = Links[Link]['NODES'][-1]
Type = Links[Link]['TYPE']
Group = Links[Link]['GROUP']
for Node in Links[Link]['NODES']:
Lon, Lat = Nodes[Node][0:2]
GeoString += str(Lon) + ' ' + str(Lat) + ','
GeoString = GeoString[:-1] + ')'
NewLinks[Link] = [NodeA, NodeB, Group, Type, GeoString]
if len(Links[Link]['NODES']) < 2:
print '==> ERROR!', Link, NodeA, NodeB, Group, Type, GeoString
print 'OSM Ways:', len(Ways), '; OSM Nodes:', len(Nodes), '; Nodes:', len(NewNodes), '; NewLinks:', len(
NewLinks), '; Groups:', len(Groups)
#incsv(NewNodes, NewLinks, Ways)
indb(NewNodes, NewLinks, Ways, Nodes, AC, AcWay) |
############
# QUESTION 4
############
def interpolate(xy, x_hat):
# Assume x_hat values are within x values range
if len(xy) <= 1:
raise ValueError('please enter at least two valid measurements')
times_list = sorted([measure_tup[0] for measure_tup in xy])
if all([req_x in times_list for req_x in x_hat]):
return [(req_x, xy[times_list.index(req_x)][1]) for req_x in x_hat]
req_x_zone_dic = {}
for req_x in x_hat:
for meas_index in range(len(xy) - 1):
if times_list[meas_index] <= req_x <= times_list[meas_index + 1]:
req_x_zone_dic[req_x] = meas_index
break
zone_lin_dic = {}
lin = lambda tup1, tup2: lambda x: (tup2[1] - tup1[1]) / (tup2[0] - tup1[0]) * (x - tup1[0]) + tup1[1]
for zone in sorted(list(set(req_x_zone_dic.values()))):
zone_lin_dic[zone] = lin(xy[zone], xy[zone + 1])
return [(req_x, zone_lin_dic[req_x_zone_dic[req_x]](req_x)) for req_x in x_hat]
|
import logging
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
import fix_path
from src import config, sane
class Handler(webapp.RequestHandler):
def set_header(self, name, value):
self.response.headers[name] = value
def write(self, text):
self.response.out.write(text)
def write_json(self, array, jsonp_callback=u""):
from django.utils import simplejson
json = simplejson.dumps(array)
if jsonp_callback:
result = u"%s(%s)" % (jsonp_callback, json)
else:
result = json
self.write(result)
def render(self, template, **kw):
from src import templating
self.write(templating.render(template, **kw))
def handle_exception(self, e, debug_mode):
if isinstance(e, sane.BadRequestError):
logging.info('Wrong path: %r', self.request.path)
self.error(400)
self.write(str(e))
return
return webapp.RequestHandler.handle_exception(self, e, debug_mode)
class CrossPostHandler(Handler):
"""Provides a common support for Cross-Origin Resource Sharing (CORS)
and iframe POST requests.
"""
def options(self):
self._set_cors_headers()
def _set_cors_headers(self):
"""Sets the needed CORS headers.
"""
self.set_header('Access-Control-Allow-Origin', '*')
self.set_header('Access-Control-Allow-Methods', 'POST, OPTIONS')
self.set_header('Access-Control-Max-Age', str(20*24*3600))
def post(self):
# The incoming Content-Type is ignored.
# XDomainRequest sends everything as text/plain.
self.request.content_type = "application/x-www-form-urlencoded"
self.request.environ.pop("webob._parsed_post_var", None)
result = self.prepare_json_response()
self._set_cors_headers()
# We keep the text/html content-type. It is needed by iframes.
self.set_header('Content-Type', 'text/html; charset=utf-8')
self.write_json(result)
def prepare_json_response(self):
"""Subclasses should return JSON data.
"""
raise NotImplementedError
class SaveHandler(CrossPostHandler):
def prepare_json_response(self):
"""Saves a typo fix.
GET parameters:
url ... url of the fixed page.
orig ... original text.
new ... fixed text.
pos ... 0 for an unique original text. It is the number
of the same preceding texts on the page.
"""
from src import writing
url = sane.valid_url(self.request.get("url"))
orig = self.request.get("orig")
new = self.request.get("new")
pos = sane.valid_int(self.request.get("pos"))
page_order = sane.valid_int(self.request.get("page_order"))
fix = writing.save_fix(url, orig, new, pos, page_order)
marked = fix.mark_changes()
return dict(marked=marked);
class UpdateGoneHandler(CrossPostHandler):
def prepare_json_response(self):
"""Marks not-found fixes as gone
and umarks found gone fixes.
"""
from src import writing
from django.utils import simplejson
json = simplejson.loads(self.request.get("json"))
url = sane.valid_url(json.get("url", u""))
gone = json["gone"]
ungone = json["ungone"]
writing.update_gone(url, gone, ungone)
return dict(success=True)
class FixesHandler(Handler):
def get(self):
from src import reading
url = sane.valid_url(self.request.get("url"))
callback = self.request.get("callback")
fixes = reading.find_fixes(url)
results = []
for fix in fixes:
marked = fix.mark_changes()
results.append(dict(
orig=fix.orig_text,
pos=fix.pos,
gone=fix.gone,
marked=marked))
self.write_json(dict(fixes=results), jsonp_callback=callback)
class SearchHandler(Handler):
def get(self):
from src import reading, visual
q = self.request.get("q")
limit = 100
fixes = reading.search(q, limit)
if len(fixes) != limit:
limit = None
url_fixes = visual.group_by_url(fixes)
title = u"%s fixes" % q
self.render("search.html", title=title, url_fixes=url_fixes, q=q,
limit=limit)
class NotFound404Handler(Handler):
def get(self):
logging.info('Wrong path: %s', self.request.path)
self.error(404)
self.write("No such page.")
def post(self):
self.get()
app = webapp.WSGIApplication(
[
("/api/save", SaveHandler),
("/api/update-gone", UpdateGoneHandler),
("/api/fixes", FixesHandler),
("/search", SearchHandler),
("/.*", NotFound404Handler),
],
debug=config.DEBUG)
def main():
run_wsgi_app(app)
if __name__ == "__main__":
import autoretry
autoretry.autoretry_datastore_timeouts()
main()
|
#!/usr/bin/env python
import time
import subprocess
budget = 16
reach = 20000
workers = [None] * budget
unit = 0
def hire(slot):
global unit
print unit
workers[slot] = subprocess.Popen(['python', 'getmeta.py', str(unit)])
unit += 1
for i in range(budget):
hire(i)
while True:
if unit > reach:
break
for i in range(budget):
if workers[i].poll() is not None:
hire(i)
time.sleep(1)
|
#! /usr/bin/python
from __future__ import division
import json
from collections import defaultdict
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as dates
import time
import os
plt.style.use('ggplot')
os.chdir('/Users/deniz/Research/Organized-Hashtags')
startTime = time.time()
def open_data(filename):
with open(filename) as f:
ct = json.load(f)
return ct
def write_data(d, filename):
with open(filename, 'w') as f:
json.dump(d, f)
hashtag_time = open_data('/Users/deniz/Research/Organized-Hashtags/merged_g1000.json')
hashtag_count = defaultdict(int)
for item in hashtag_time:
hashtag_count[item] = len(hashtag_time[item])
max_keys = sorted(hashtag_count, key=hashtag_count.get, reverse=True)
plt.semilogy([ hashtag_count[i] for i in max_keys])
plt.grid()
endTime = time.time()
print(endTime - startTime)
df_final = pd.DataFrame()
meanAbsPctChange = np.empty((len(max_keys),1))
meanAbsPctChange[:] = np.NAN
interArrivalTimes = defaultdict(int)
startTime = time.time()
for w1 in range(len(max_keys)):
w = max_keys[w1]
times = hashtag_time[w]
dateIndex1 = pd.to_datetime(times,format='%a %b %d %H:%M:%S +0000 %Y').sort_values()
dateIndex1 = sorted(dateIndex1)
df = pd.DataFrame(data=1, index = dateIndex1, columns=[w])
df = df.resample('1min').sum()
df_final = df_final.join(df,how='outer')
difference = np.subtract(dateIndex1[1:],dateIndex1[0:-1])
interArrivalTimes[w] = sorted([difference[i].seconds for i in range(len(difference))])
meanAbsPctChange = df_final.pct_change().abs().mean()
endTime = time.time()
print(endTime - startTime)
write_data(interArrivalTimes, 'interArrivalTimes.json')
df_final.to_pickle('NumberMentionsPerMinute.pkl')
meanAbsPctChange.to_pickle('meanAbsPctChange.pkl')
|
"""TODO"""
from .Expression import Expression, ExpressionError
from .Equation import Equation
from .Inequation import Inequation
from .ExpressionSystem import ExpressionSystem
from .EquationSystem import EquationSystem
|
import numpy as np
from .propagator import Propagator
from ..optics import Wavefront, make_agnostic_optical_element
from ..field import Field
@make_agnostic_optical_element()
class FraunhoferPropagator(Propagator):
'''A monochromatic perfect lens propagator.
This implements the propagation of a wavefront through a perfect lens. The wavefront
is assumed to be exactly in the front focal plane of the lens and is propagated to the
back focal plane. The implementation follows [1]_.
.. [1] Goodman, J.W., 2005 Introduction to Fourier optics. Roberts and Company Publishers.
Parameters
----------
input_grid : Grid
The grid on which the incoming wavefront is defined.
output_grid : Grid
The grid on which the outgoing wavefront is to be evaluated.
wavelength : scalar
The wavelength of the wavefront.
focal_length : scalar
The focal length of the lens system.
'''
def __init__(self, input_grid, output_grid, focal_length=1, wavelength=1):
from ..fourier import make_fourier_transform
self.uv_grid = output_grid.scaled(2*np.pi / (focal_length * wavelength))
self.fourier_transform = make_fourier_transform(input_grid, self.uv_grid)
self.output_grid = output_grid
# Intrinsic to Fraunhofer propagation
self.norm_factor = 1 / (1j * (focal_length * wavelength))
self.input_grid = input_grid
def forward(self, wavefront):
'''Propagate a wavefront forward through the lens.
Parameters
----------
wavefront : Wavefront
The incoming wavefront.
Returns
-------
Wavefront
The wavefront after the propagation.
'''
U_new = self.fourier_transform.forward(wavefront.electric_field) * self.norm_factor
return Wavefront(Field(U_new, self.output_grid), wavefront.wavelength)
def backward(self, wavefront):
'''Propagate a wavefront backward through the lens.
Parameters
----------
wavefront : Wavefront
The incoming wavefront.
Returns
-------
Wavefront
The wavefront after the propagation.
'''
U_new = self.fourier_transform.backward(wavefront.electric_field) / self.norm_factor
return Wavefront(Field(U_new, self.input_grid), wavefront.wavelength)
def get_transformation_matrix_forward(self, input_grid, wavelength=1):
'''Create the forward linear transformation between the internal input grid and output grid.
Parameters
----------
input_grid : Grid
The input grid on which the wavefront is defined.
Currently this parameter is ignored and an internal grid is used.
wavelength : scalar
The wavelength of the wavefront.
Returns
-------
ndarray
The transformation matrix that describes the propagation.
'''
# Ignore input wavelength and just use the internal one.
return self.fourier_transform.get_transformation_matrix_forward() * self.norm_factor
def get_transformation_matrix_backward(self, input_grid, wavelength=1):
'''Create the backward linear transformation between the internal input grid and output grid.
Parameters
----------
input_grid : Grid
The input grid on which the wavefront is defined.
Currently this parameter is ignored and an internal grid is used.
wavelength : scalar
The wavelength of the wavefront.
Returns
-------
ndarray
The transformation matrix that describes the propagation.
'''
# Ignore input wavelength and just use the internal one.
return self.fourier_transform.get_transformation_matrix_backward() / self.norm_factor
|
import json
from jsonschema import validate
class ConfLoader:
def __init__(self, c):
self._conf = c
self._mainConfJson = None
self._logConfJson = None
mainconfschemaPath = './res/mainconfschema.json'
logconfschemaPath = './res/logconfschema.json'
mainconfschema = None
logconfschema = None
try:
with open(mainconfschemaPath) as mainConfSchema:
mainconfschema = json.load(mainConfSchema)
except FileNotFoundError as e:
print(e)
try:
with open(logconfschemaPath) as logConfSchema:
logconfschema = json.load(logConfSchema)
except FileNotFoundError as e:
print(e)
try:
with open(self._conf.getMainConfPath()) as mainConf:
self._mainConfJson = json.load(mainConf)
validate(instance=self._mainConfJson, schema=mainconfschema)
except FileNotFoundError as e:
print(e)
try:
print("confPath: ", self._conf.getLogConfPath())
with open(self._conf.getLogConfPath()) as logConf:
self._logConfJson = json.load(logConf)
validate(instance=self._logConfJson, schema=logconfschema)
except FileNotFoundError as e:
print(e)
def getTargetPathList(self):
print(" getTargetPathList:", self._logConfJson)
if self._logConfJson:
return self._logConfJson['targets']
return None
def getOneShutRule(self):
if self._mainConfJson:
return self._mainConfJson['OneShutRule']
return None
def getCompositeRule(self):
if self._mainConfJson:
return self._mainConfJson['CompositeRule']
return None
'''
def getDisplayLogType(self):
return self._conf.getDisplayLogType()
'''
|
// https://leetcode.com/problems/longest-turbulent-subarray
class Solution(object):
def maxTurbulenceSize(self, A):
"""
:type A: List[int]
:rtype: int
"""
one = 1
mo = 1
for i in range(len(A)-1):
if i%2:
if A[i] > A[i+1]:
one+=1
else:
mo = max(mo, one)
one=1
else:
if A[i] < A[i+1]:
one+=1
else:
mo = max(mo, one)
one=1
mo = max(mo, one)
two = 1
mt = 1
for i in range(len(A)-1):
if i%2:
if A[i] < A[i+1]:
two+=1
else:
mt = max(mt, two)
two=1
else:
if A[i] > A[i+1]:
two+=1
else:
mt = max(mt, two)
two=1
mt = max(mt, two)
return max(mo,mt)
|
import math
from magicbot.state_machine import AutonomousStateMachine, state
from networktables import NetworkTable
from automations.filters import RangeFilter, VisionFilter
from automations.manipulategear import ManipulateGear
from automations.profilefollower import ProfileFollower
from components.chassis import Chassis
from components.gears import GearAligner, GearDepositor
from components.range_finder import RangeFinder
from components.vision import Vision
from components.winch import Winch
from utilities.bno055 import BNO055
from utilities.profilegenerator import generate_trapezoidal_trajectory
class PegAutonomous(AutonomousStateMachine):
# Injectables
bno055 = BNO055
chassis = Chassis
gear_aligner = GearAligner
gear_depositor = GearDepositor
manipulategear = ManipulateGear
profilefollower = ProfileFollower
range_filter = RangeFilter
range_finder = RangeFinder
sd = NetworkTable
vision = Vision
vision_filter = VisionFilter
winch = Winch
centre_to_front_bumper = 0.49
lidar_to_front_bumper = 0.36
peg_range = 1.5
dead_reckon_range = 2
side_drive_forward_length = 2.54
side_rotate_angle = math.pi/3.0
rotate_accel_speed = 4 # rad*s^-2
rotate_velocity = 4
peg_align_tolerance = 0.15
displace_velocity = Chassis.max_vel/3
displace_accel = Chassis.max_acc
displace_decel = Chassis.max_acc/4
# rotate_accel_speed = 2 # rad*s^-2
# rotate_velocity = 2
def on_enable(self):
super().on_enable()
self.bno055.resetHeading()
self.profilefollower.stop()
self.gear_aligner.reset_position()
self.gear_depositor.retract_gear()
self.gear_depositor.lock_gear()
self.init_trajectories()
self.winch.enable_compressor()
self.vision.enabled = True
self.sd.putBoolean("log", True)
@state(first=True)
def drive_to_airship(self, initial_call):
# Drive to a range where we can close the loop using vision, lidar and
# gyro to close the loop on position
if initial_call:
displace = generate_trapezoidal_trajectory(
0, 0, self.dr_displacement, 0, self.displace_velocity,
self.displace_accel, -self.displace_decel,
self.chassis.motion_profile_freq)
self.profilefollower.modify_queue(heading=0, linear=displace)
self.profilefollower.execute_queue()
if not self.profilefollower.executing:
self.next_state("rotate_towards_airship")
@state
def rotate_towards_airship(self, initial_call):
if initial_call:
rotate = generate_trapezoidal_trajectory(
self.bno055.getHeading(), 0, self.perpendicular_heading, 0, self.rotate_velocity,
self.rotate_accel_speed, -self.rotate_accel_speed,
self.chassis.motion_profile_freq)
self.profilefollower.modify_queue(heading=rotate, overwrite=True)
self.logger.info("Rotate Start %s, End %s", rotate[0], rotate[-1])
self.profilefollower.execute_queue()
if not self.profilefollower.executing:
self.next_state("rotate_towards_peg")
@state
def rotate_towards_peg(self, initial_call):
if initial_call:
if self.vision.x != 0.0:
measure_trajectory = generate_trapezoidal_trajectory(
self.bno055.getHeading(), 0,
self.bno055.getHeading() + self.vision.derive_vision_angle(), 0,
self.rotate_velocity, self.rotate_accel_speed, -self.rotate_accel_speed/2,
Chassis.motion_profile_freq)
self.logger.info("vision_x %s, vision_angle %s, heading %s, heading_start %s, heading_end %s",
self.vision.x, self.vision.derive_vision_angle(), self.bno055.getHeading(), measure_trajectory[0][0], measure_trajectory[-1][0])
self.profilefollower.modify_queue(heading=measure_trajectory, overwrite=True)
self.profilefollower.execute_queue()
# self.done()
if not self.profilefollower.executing:
self.next_state("drive_to_wall")
@state
def drive_to_wall(self, initial_call):
if initial_call:
self.profilefollower.stop()
peg_range = self.peg_range - self.centre_to_front_bumper + 0.3
r = self.range_filter.range
self.logger.info("DRIVE WALL RANGE: %s", self.range_finder.getDistance())
self.logger.info("DRIVE WALL FILTER RANGE: %s", self.range_filter.range)
# 40 is range finder max distance, better failure mode than inf or really small
if not math.isfinite(r):
r = 40
elif r < 0.5:
r = 40
to_peg = None
if r > self.dead_reckon_range:
self.logger.info("DEAD RECKON AUTO")
to_peg = generate_trapezoidal_trajectory(
0, 0, peg_range + 0.1, 0, self.displace_velocity,
self.displace_accel, -self.displace_decel,
Chassis.motion_profile_freq)
else:
self.logger.info("RANGE AUTO")
to_peg = generate_trapezoidal_trajectory(0, 0,
self.range_finder.getDistance() - self.lidar_to_front_bumper + 0.1,
0, self.displace_velocity, self.displace_accel, -self.displace_decel,
Chassis.motion_profile_freq)
self.profilefollower.modify_queue(self.bno055.getHeading(),
linear=to_peg, overwrite=True)
self.profilefollower.execute_queue()
self.manipulategear.engage()
if not self.profilefollower.executing:
self.next_state("deploying_gear")
@state
def deploying_gear(self):
if self.manipulategear.current_state == "forward_open":
# self.next_state("roll_back")
self.done()
# elif self.manipulategear.current_state == "backward_open":
# self.next_state("roll_back")
elif self.manipulategear.current_state != "forward_closed":
self.manipulategear.engage(initial_state="forward_closed", force=True)
@state
def roll_back(self, initial_call):
if initial_call:
self.profilefollower.stop()
roll_back = generate_trapezoidal_trajectory(
0, 0, -2, 0, self.displace_velocity, self.displace_accel/2,
-self.displace_decel, Chassis.motion_profile_freq)
self.profilefollower.modify_queue(self.bno055.getHeading(),
linear=roll_back, overwrite=True)
self.profilefollower.execute_queue()
elif not self.profilefollower.executing:
self.done()
def done(self):
super().done()
self.vision.enabled = False
class LeftPeg(PegAutonomous):
MODE_NAME = "Left Peg"
def init_trajectories(self):
self.perpendicular_heading = -self.side_rotate_angle
self.dr_displacement = self.side_drive_forward_length - self.centre_to_front_bumper
class CentrePeg(PegAutonomous):
MODE_NAME = "Centre Peg"
centre_airship_distance = 2.85
peg_range = centre_airship_distance
dead_reckon_range = 4
def init_trajectories(self):
self.perpendicular_heading = 0
self.dr_displacement = self.centre_airship_distance/2 - self.centre_to_front_bumper
@state(first=True)
def drive_to_airship(self):
# Override the initial state here to immediately go to the state we want
# Use next_state_now to avoid wasting a control loop iteration
self.next_state_now("drive_to_wall")
class RightPeg(PegAutonomous):
MODE_NAME = "Right Peg"
def init_trajectories(self):
self.perpendicular_heading = self.side_rotate_angle
self.dr_displacement = self.side_drive_forward_length - self.centre_to_front_bumper
|
from collections import defaultdict
class UF:
def __init__(self):
self.parent = {}
def find(self, x):
self.parent.setdefault(x, x)
while x != self.parent[x]:
x = self.parent[x]
return x
def union(self, p, q):
self.parent[self.find(p)] = self.find(q)
class Solution:
def accountsMerge(self, accounts: List[List[str]]) -> List[List[str]]:
uf = UF()
email_to_name = {}
res = defaultdict(list)
for acc in accounts:
for index, email in enumerate(acc[1:]):
email_to_name[email] = accounts[0]
if index < len(accounts) - 1:
uf.union(accounts[index + 1], accounts[index + 2])
for key in email_to_name:
res[uf.find(key)].append(key)
return [[email_to_name[value[0]]] + sorted(value) for value in res.values()]
|
import csv
import numpy as np
from collections import OrderedDict
from PIL import Image
import copy
import flask
from flask import Flask, render_template, request, redirect, url_for
from tools.vis_web import _get_train_stats
app = Flask(__name__)
@app.route('/', methods=['GET'])
def index():
return render_template("index.html", ldmk_ids=info_by_ldmk.keys(), n_imgs=[len(en) for en in info_by_ldmk.itervalues()])
@app.route('/view/<string:ldmk_id>')
def view_ldmk_id(ldmk_id):
return render_template("view.html", infos=info_by_ldmk[ldmk_id][:500], ldmk_id=ldmk_id, n_img=len(info_by_ldmk[ldmk_id]))
if __name__ == '__main__':
info_by_ldmk = _get_train_stats()
app.run(host='0.0.0.0', debug=True, port=5000)
|
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
# arguments-run
parser.add_argument("--sim", action='store_true', help="run simulation")
parser.add_argument("--plot3Dpos", action='store_true', help="plot static 3d position plot")
parser.add_argument("--anim", action='store_true', help="make animation that shows sim progression on the X-Y, Y-Z, and X-Z planes")
parser.add_argument("--rfc", action='store_true', help="run random forest with classification trees")
# arguments-sim
parser.add_argument("--earlyStop", action="store_true", help="stop sim early if either collision or ejection")
parser.add_argument("--ejectSF", default=1.0, type=float, help="scale factor to increase or decrease ejection critera. EG: --ejectSF 0.5 means scenario will be classified as ejection if speed >= 0.5*escape speed; --ejectSF 2.0 means scenario will be classified as ejection if speed >= 2*escape speed")
# exploratory data analysis
parser.add_argument("--eda", action='store_true', help="plot exploratory data analysis figures.")
# arguments-3D position plot
parser.add_argument('--sampleRowIdx', default=0, type=int, help="select the scenario you wish to plot 3D positions IOR make animation for (default = 0)")
parser.add_argument("--timeIdx", help="select either initial time (0), or final time (-1); can be entered either as int or str")
# arguments-animation
# arguments-random forest classifier
# process args
args = parser.parse_args()
kwargs = args.__dict__
# pull out run args
sim = kwargs.pop('sim')
eda = kwargs.pop('eda')
plot3Dpos = kwargs.pop('plot3Dpos')
anim = kwargs.pop('anim')
rfc = kwargs.pop('rfc')
# make a lists for each set of model arguments
simKwargKeys = ['earlyStop', 'ejectSF']
edaKeys = []
posPlotKeys = ['sampleRowIdx', 'timeIdx']
animKeys = ['sampleRowIdx']
rfcKwargKeys = []
# separate dictionaries
[simKwargs, edaKwargs, posPlotKwargs, animKwargs, rfcKwargs] = map(lambda keys: {x: kwargs[x] for x in keys}, [simKwargKeys, edaKeys, posPlotKeys, animKeys, rfcKwargKeys])
# run simulation
if sim:
from pyFiles.Simulation import Simulation
simInst = Simulation()
simInst.run(**simKwargs)
# exploratory analysis
if eda:
import pyFiles.explore_data
# plot static 3d positions
if plot3Dpos:
from pyFiles.Plots import staticPositionPlot
staticPositionPlot(**posPlotKwargs)
# make animation .mp4 file
if anim:
from pyFiles.Plots import scenarioAnimation
scenarioAnimation(**animKwargs)
# run random forest classifier
if rfc:
from pyFiles.MetaModels.RFclassification import RandomForests
rfcInst = RandomForests()
rfcInst.run(**rfcKwargs)
|
'''
Created on 09.10.2012
@author: cbalea
'''
from selenium.common.exceptions import NoSuchElementException, TimeoutException
from selenium.webdriver.support.wait import WebDriverWait
import time
class BasePage(object):
def __init__(self, driver, wait):
self.driver = driver
self.wait = wait
def elementExistsByClassName(self, className):
try:
return self.driver.find_element_by_class_name(className)
except NoSuchElementException:
return False
def elementExistsById(self, id):
try:
return WebDriverWait(self.driver, 5).until(lambda driver : driver.find_element_by_id(id))
except TimeoutException:
return False
def elementExistsByXpath(self, xpath):
try:
return WebDriverWait(self.driver, 5).until(lambda driver : driver.find_element_by_xpath(xpath))
except TimeoutException:
return False
def elementsExistByXpath(self, xpath):
try:
return WebDriverWait(self.driver, 2).until(lambda driver : driver.find_elements_by_xpath(xpath))
except TimeoutException:
return False
def elementExistsByLinkText(self, linkText):
try:
return WebDriverWait(self.driver, 5).until(lambda driver : driver.find_element_by_xpath("//a[text()='%s']" %linkText))
except TimeoutException:
return False
def isCheckboxChecked(self, checkbox):
if(checkbox.get_attribute("checked")=="true"):
return True
return False
def clickOnLink(self, link):
self.wait.until(lambda driver : driver.find_element_by_xpath("//a[text()='%s']" %link)).click()
def sublistExistsInList(self, sublist, big_list):
for element in sublist:
if(big_list.index(element) == None):
return False
return True
def switchToNewestWindow(self):
self.waitUntilPopupOpens()
for handle in self.driver.window_handles:
self.driver.switch_to_window(handle)
def waitUntilPopupCloses(self):
windows = len(self.driver.window_handles)
while(windows > 1):
time.sleep(1)
windows = len(self.driver.window_handles)
def waitUntilPopupOpens(self):
windows = len(self.driver.window_handles)
timeout=3
while(windows == 1 and timeout < 0):
time.sleep(1)
windows = len(self.driver.window_handles)
timeout -=1
if(timeout == 0):
raise Exception("Popup did not open")
def typeInTinymceEditor(self, iframe, text):
self.wait.until(lambda driver : driver.find_element_by_xpath("//iframe[@id='%s']" %iframe))
self.driver.switch_to_frame(iframe)
self.tinymceEditor = self.driver.switch_to_active_element()
self.tinymceEditor.click()
self.tinymceEditor.clear()
self.tinymceEditor.send_keys(text)
self.switchToNewestWindow()
def getOptionsTextInCombobox(self, combobox):
optionElements = combobox.options
optionsText = []
for element in optionElements:
optionsText.append(element.text)
return optionsText |
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from pages.base_page import Page
from time import sleep
class ProductPage(Page):
SELECT_SIZE_LOCATOR = (By.XPATH, "//*[@id='picker-1']/button")
CHOOSE_SIZE_LOCATOR = (By.XPATH, '//*[@id="picker-1"]/ul/li[3]/div/button')
ADD_TO_CART_LOCATOR = (By.XPATH, "//button[@data-added-text='Item added']")
COUNT_ITEM_LOCATOR = (By.XPATH, "//span[@class='shoppingbag-item-count']")
SELECT_ITEM_LOCATOR = (By.XPATH, "//select[@class='Select-module_select__3ZfQR']")
CHOOSE_ITEM_LOCATOR = (By.XPATH, "//div[@class='Field-module_childWrapper__tFcCt']//select//option[@value='2']")
COLOR_SKIRTS_LOCATOR = (By.XPATH, "//ul[@class='group']//li")
BLACK_CALOR_LOCATOR = (By.XPATH, "//ul[@class='group']//a[@title='Black']")
def click_select_size(self):
self.scroll_into_view(*self.SELECT_SIZE_LOCATOR)
self.wait_for_element_appear(*self.SELECT_SIZE_LOCATOR)
self.wait_for_element_click(*self.SELECT_SIZE_LOCATOR)
self.wait_for_element_appear(*self.CHOOSE_SIZE_LOCATOR)
self.wait_for_element_click(*self.CHOOSE_SIZE_LOCATOR)
def click_add(self):
self.wait_for_element_click(*self.ADD_TO_CART_LOCATOR)
def check_cart_item_number(self):
self.wait_for_element_appear(*self.COUNT_ITEM_LOCATOR)
elem = self.driver.find_element(*self.COUNT_ITEM_LOCATOR)
print('Item number in the shopping bag = ',elem.text)
strnum = elem.text
n = int(strnum)
assert n > 0
def click_select_item_number(self):
self.click(*self.SELECT_ITEM_LOCATOR)
self.click(*self.CHOOSE_ITEM_LOCATOR)
def get_elem_links_list(self, expected_items):
elem_links_list = self.driver.find_elements(*self.COLOR_SKIRTS_LOCATOR)
expected_items = int(expected_items)
print(len(elem_links_list))
if len(elem_links_list) > 0:
assert len(elem_links_list) == expected_items, f'expected {expected_items}, but got {elem_links_list}'
else:
print('not found')
def click_black(self):
self.wait_for_element_click(*self.BLACK_CALOR_LOCATOR) |
import unittest
from pingout import pingout
class TestPingout(unittest.TestCase):
def test_first_check(self):
i = 1
print('Testing:', i)
assert pingout(i) == "PING"
def test_check_signal_strength(self):
for i in [3, 6, 9, 18]:
print('Testing:', i)
assert pingout(i) == "CHECK_SIGNAL_STRENGTH"
def test_check_channel_noise(self):
for i in [5, 10, 50]:
print('Testing:', i)
assert pingout(i) == "CHECK_CHANNEL_NOISE"
def test_scan_for_towers(self):
for i in [15, 30, 75]:
print('Testing:', i)
assert pingout(i) == "SCAN_FOR_TOWERS"
def test_check_not_int(self):
i = 'test'
print('Testing', i)
assert pingout(i) == "NOT_INT"
if __name__ == '__main__':
unittest.main()
|
from django.contrib import admin
from expenses.models import *
# Register your models here.
admin.site.register(Client)
admin.site.register(EmployeeSalaryAdjustment)
admin.site.register(File)
admin.site.register(SubContractorProject)
admin.site.register(SubContractorProjectDay)
class FileInline(admin.StackedInline):
model = File
class ExpenseInline(admin.StackedInline):
model = Expense
class EmployeeAdmin(admin.ModelAdmin):
prepopulated_fields = {'slug': ('name',)}
class ProjectAdmin(admin.ModelAdmin):
prepopulated_fields = {'slug': ('name',)}
class WorkDayInline(admin.StackedInline):
model = WorkDay
class SubContractorPaymentInline(admin.StackedInline):
model = SubContractorPayment
class SubContractorAdmin(admin.ModelAdmin):
prepopulated_fields = {'slug': ('name',)}
class SubContractorProjectDay(admin.StackedInline):
model = SubContractorProjectDay
class DayAdmin(admin.ModelAdmin):
prepopulated_fields = {'slug': ('date',)}
inlines = [
WorkDayInline,
ExpenseInline,
SubContractorPaymentInline,
SubContractorProjectDay,
FileInline,
]
admin.site.register(WorkDay)
admin.site.register(Employee, EmployeeAdmin)
admin.site.register(Project, ProjectAdmin)
admin.site.register(Expense)
admin.site.register(Day, DayAdmin)
admin.site.register(SubContractorPayment)
admin.site.register(SubContractor, SubContractorAdmin)
admin.site.register(EmployeeCalculatedPay) |
from bs4 import BeautifulSoup
html_doc="""
<html><head><title>The dormouse's story</title></head>
<body>
888
<div class='c1'>
<p class='c2'>
</p>
<div id='i1'>
<a>link</a>
</div>
</div>
</body>
</html>
"""
from bs4.element import Tag
soup=BeautifulSoup(html_doc,features="html.parser")
tags=soup.find(attrs={'class':'c2'})
tag=Tag(name='i',attrs={'id':'it'})
tag.string='asdfg'
print(tag)
import copy
new_copy=copy.deepcopy(tags)
soup.find(id='i1').append(tag)
print(soup)
#
# print(tags.text)
# print(list(tags.stripped_strings))
# for item in tags.children:
# print('item is:',item,type(item))
# tags.string='12233'
# print(tags)
# print(tags.previous)
# print(list(tags.previous_elements))
# print(tags.previous_sibling)
# print(tags.previous_siblings)
# for tag in tags:
# print(tag.name)
#2attrs
# soup=BeautifulSoup(html_doc,features="html.parser")
# tag=soup.find(class_='c1') #id不用写
# # print(tag.attrs)
# tag.attrs['id']=1
# del tag.attrs['class']
# # print(tag)
# # print(soup)
# # print(tag.children)
# # print(list(tag.children))
# # print(list(tag.descendants))
# # print(tag.find_all(recursive=False))
#
# # tag.clear()
# # print(soup)
# # v=tag.extract()
# # print('v is :',v)
# # print(soup)
# #
# # print('tag is :',tag,type(tag))
# # print(tag.decode())
# # print(tag.encode())
# # print(tag.decode_contents())
# # from bs4.element import Tag
# # Tag.get_text()
# # Tag.text()
# # tag.text
# # tag.get_text('id')
#
#
#
#
#
|
#!/usr/bin/env python3
'''
Created on Jul 23, 2017
@author: Daniel Sela, Arnon Sela
'''
from rotseana.findburst.matchcoords import matchcoords
from rotseana.findburst.findburst_gd import findburst_gd
from rotseana.findburst.read_data_file import get_data_file_rotse
import itertools
import matplotlib
matplotlib.use('PDF')
# import os
def matchcoords_gd(coord, coord_file, mindelta, minchisq, minsig, fits_index, error, with_reference=False, plot=False, log=None, quiet=False, verbose=False):
# coord_ref=coord
result = list()
# for f in file:
# create pdf title on page
# short_name=os.path.basename(coord_file)
# matchcoords(file, coord, error, verbose)
match_res = matchcoords(file=coord_file, coord=coord, error=error, verbose=verbose)
objids = list()
lineno = 0
for match in match_res:
lineno += 1
if len(match) == 3:
match_file, id_, jname = match
else:
raise Exception("missing values (only %d found); lineno: %d" % (len(match), lineno,))
objids.append((match_file, id_))
data = sorted(objids)
for match_file, objids in itertools.groupby(data, lambda x: x[0]):
rotse = get_data_file_rotse(match_file)
objid = [int(id_) for _, id_ in objids]
answer = findburst_gd(match=match_file, mindelta=mindelta, minsig=minsig, fits_index=fits_index, minchisq=minchisq, objid=objid, rotse=rotse, verbose=verbose)
result.extend(answer)
if verbose:
print('number of good obs found: %d' % (len(result),))
csv = None
if log is not None and len(result) > 0:
csvfile = log + '.txt' if not log.endswith('.txt') else log
csv = open(csvfile, 'w')
if verbose:
print('writing %s' % csvfile)
for objid, mag, jd, merr in result:
sout = '%s %s %s' % (jd, mag, merr)
if with_reference:
sout = '%s %d %s' % (coord_file, objid, sout)
if csv:
csv.write('%s\n' % sout)
if not quiet:
print(sout)
if csv:
csv.close()
|
import redis
import time
r = redis.Redis(host='localhost', port=6379, db=0, charset="utf8", decode_responses=True)
r.set("15991030771", "1234")
# 设置过期时间
r.expire('mobile', 60*10000)
time.sleep(1)
print(r.get("15991030771"))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib, urllib2, json
from django.core.validators import URLValidator, ValidationError
class GooglUrlShort(object):
api_url = "https://www.googleapis.com/urlshortener/v1/url"
def __init__(self, url):
self.url = url
if isinstance(url, unicode):
self.url = url.encode('utf-8')
url_validator = URLValidator()
try:
url_validator(self.url)
except ValidationError:
return u""
def short(self, all_response = False):
header = { "Content-Type": "application/json" }
params = { "longUrl": self.url }
try:
response = urllib2.urlopen(urllib2.Request(
self.api_url, json.dumps(params), header))
except urllib2.URLError, urllib2.HTTPError:
return u""
json_data = response.read()
if not all_response:
data = json.loads(json_data)
if 'id' in data:
return json.loads(json_data)['id']
return u""
return json_data
def expend(self, all_response = False):
json_data = urllib.urlopen(u"https://www.googleapis.com/urlshortener"
"/v1/url?shortUrl=%s" % self.url).read()
if not all_response:
return json.loads(
json_data)['longUrl'] if "longUrl" in json_data else ""
return json_data
|
from tkinter import *
from tkinter.filedialog import askdirectory
from tkinter.filedialog import askopenfilename
from convert import start_convert
def selectPath():
path_ = askdirectory()
path.set(path_)
def selectFile():
file_ = askopenfilename()
file.set(file_)
def convert():
if not file.get():
warning.set("请选择原始文件!")
return
warning.set("开始转化")
print(file.get())
start_convert(file.get())
warning.set("文件转化成功")
root = Tk()
root.title("视频转化工具")
path = StringVar()
file = StringVar()
warning = StringVar()
Label(root, text="原文件:").grid(row=0, column=0)
Entry(root, textvariable=file).grid(row=0, column=1)
Button(root, text="文件选择", command=selectFile).grid(row=0, column=2)
# Label(root, text="目标路径:").grid(row=1, column=0)
# Entry(root, textvariable=path).grid(row=1, column=1)
# Button(root, text="路径选择", command=selectPath).grid(row=1, column=2)
Button(root, text="开始执行", command=convert).grid(row=2, column=0)
Entry(root, textvariable=warning).grid(row=2, column=1)
root.mainloop()
|
import json
from server.BaseHandler import BaseHandler
from server import jsonEncode
from server.UserModele import User
class AuthHandler(BaseHandler):
def get(self):
self.response.write('')
def post(self):
mode = self.request.GET.get("mode", self.request.url.split("/")[-1].split('?')[0])
if mode == 'connexion':
lebody = json.loads(self.request.body)
user = str(lebody.get('user')).lower()
user = user.replace(" ", "")
mdp = str(lebody.get('mdp')).lower()
if user != "" and mdp != "" and user != None and mdp != None:
connexionexiste = User.query(User.user == user and User.password == mdp).get()
if connexionexiste != "" and connexionexiste != None:
self.setSessionParameter('user', user)
else:
print('connexion existe pas')
else:
print('user et mot de pass non rempli')
elif mode == 'affuser':
user = self.getSessionUser()
if user:
response = {
'user': True
}
else:
response = {
'user': False
}
self.response.write(jsonEncode.encode(response))
elif mode == 'register':
lebody = json.loads(self.request.body)
user = str(lebody.get('user')).lower()
user = user.replace(" ", "")
mdp = str(lebody.get('mdp')).lower()
if user != "" and mdp != "" and user != None and mdp != None:
userexiste = User.query(User.user == user).get()
if userexiste != "" and userexiste != None:
print('existe deja')
else:
newuser = User(user=user, password=mdp)
newuser.put()
print('Ok creer')
def put(self):
pass
def delete(self):
pass
|
import threading
import time
import queue
import os
import sys
import serial
def clear():
if os.name == 'nt':
os.system('cls||echo -e \\\\033c')
else:
os.system('clear||echo -e \\\\033c') # change if this doesn't work on Unix systems
def flush_input():
try:
import msvcrt
while msvcrt.kbhit():
msvcrt.getch()
except ImportError:
import sys, termios #for linux/unix
termios.tcflush(sys.stdin, termios.TCIOFLUSH)
def kbfunc():
#this is boolean for whether the keyboard has bene hit
try:
import msvcrt
return msvcrt.kbhit()
except ImportError: #for linux/unix
import termios, fcntl, sys, os
fd = sys.stdin.fileno()
oldterm = termios.tcgetattr(fd)
newattr = termios.tcgetattr(fd)
newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, newattr)
oldflags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, oldflags | os.O_NONBLOCK)
try:
while True:
try:
c = sys.stdin.read(1)
return True
except IOError:
return False
finally:
termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm)
fcntl.fcntl(fd, fcntl.F_SETFL, oldflags)
# try:
# from msvcrt import kbhit
# except ImportError:
# import termios, fcntl, sys, os
# def kbhit():
# fd = sys.stdin.fileno()
# oldterm = termios.tcgetattr(fd)
# newattr = termios.tcgetattr(fd)
# newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO
# termios.tcsetattr(fd, termios.TCSANOW, newattr)
# oldflags = fcntl.fcntl(fd, fcntl.F_GETFL)
# fcntl.fcntl(fd, fcntl.F_SETFL, oldflags | os.O_NONBLOCK)
# try:
# while True:
# try:
# c = sys.stdin.read(1)
# return True
# except IOError:
# return False
# finally:
# termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm)
# fcntl.fcntl(fd, fcntl.F_SETFL, oldflags)
# Getch code is a modified version of http://code.activestate.com/recipes/134892/
# Created by stackoverflow user /2734389/kiri
# Found from https://stackoverflow.com/questions/510357/python-read-a-single-character-from-the-user
class _Getch:
"""Gets a single character from standard input. Does not echo to the screen."""
def __init__(self):
try:
self.impl = _GetchWindows()
except ImportError:
self.impl = _GetchUnix()
def __call__(self):
char = self.impl()
if char == b'\x03':
print("Ctrl+C")
raise KeyboardInterrupt
elif char == b'\x04':
print("Ctrl+D")
raise EOFError
return char
class _GetchUnix:
def __init__(self):
import tty, sys
def __call__(self):
import sys, tty, termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch.encode('utf-8')
class _GetchWindows:
def __init__(self):
import msvcrt
def __call__(self):
import msvcrt
return msvcrt.getwch().encode('utf-8')
bt_usage = True
while True:
clear()
confirmation = input("------------------------- \nConnect to Darwin? \n'y' -> yes \n'f' -> yes and fake BT connection, for script testing purposes \n'n' -> no \n------------------------- \n")
if len(confirmation) > 0:
if confirmation[0] == "y":
break
elif confirmation[0] == "n" or confirmation[0] == "q":
raise SystemExit
elif confirmation[0] == "f":
bt_usage = False
break
continue
clear()
getch = _Getch()
lock = threading.Lock()
data = False
stop = False
jeff = False
bt_status = False
bt_complete = False
bt_read = ""
cycle = 0
print_delay = 2 # 0.5 # print output delay, seconds
arming_delay = 0.05 # 0.05 # slow down arming speed, seconds
raw_data_points = 16 # amount of raw data points being sent by teensy data output
bt_fake_signal = "1;2;3;4;5;6;7;8;9;10;11;12;13;14;15;16;17;18;19;20"
null_data_message = "null"
undefined_data_message = "unreceived value"
port = 'COM6'
baud = 9600
bt = None
def determine_action(c):
global stop, data, jeff, bt
if c == "start data":
if bt != None:
bt.write("startData".encode("utf-8"))
data = True
elif c == "stop data":
if bt != None:
bt.write("stopData".encode("utf-8"))
data = False
elif c == "jeff":
jeff = True
elif c == "quit":
stop = True
def f_input():
global stop, arming_status
while not stop:
try:
getch_input = getch()
key = getch_input.decode("utf-8")
if key == "i":
arming_status = False
lock.acquire()
clear()
print("--------------------------------- \n'start data' to view data ouput \n'stop data' to close data output \n'quit' to stop the script \n--------------------------------- \nCommand input: \n---------------------------------")
try:
command = input("").lower().strip()
except:
lock.release()
determine_action(command)
clear()
lock.release()
elif key == "q":
raise Exception
except:
stop = True
print("Input Termination Occured")
break
print("Closing Input Thread")
def process_data_packet():
global raw_data_points, null_data_message, undefined_data_message
data_packet = bt_read
if data_packet == "" or data_packet.count(";") == 0:
raw_data = [null_data_message] * raw_data_points
else:
data_packet = data_packet.strip(";").split(";")
difference = len(data_packet) - raw_data_points
if difference >= 0:
raw_data = data_packet
for i in range(difference):
raw_data.append(undefined_data_message)
elif difference < 0:
raw_data = []
for i in range(0, raw_data_points - 1):
raw_data.append(data_packet[i])
return raw_data
def print_data():
global cycle, bt_read
raw_data = process_data_packet()
data_string = "Cycle #" + str(cycle) + "\n----------------------------\n"
data_string += "Altitude (BMP1): " + raw_data[0] + "\n"
data_string += "Altitude (BMP2): " + raw_data[1] + "\n"
data_string += "Altitude (BMP3): " + raw_data[2] + "\n"
data_string += "Pressure (BMP1): " + raw_data[3] + "\n"
data_string += "Pressure (BMP2): " + raw_data[4] + "\n"
data_string += "Pressure (BMP3): " + raw_data[5] + "\n"
data_string += "Temperature (BMP1): " + raw_data[6] + "\n"
data_string += "Temperature (BMP2): " + raw_data[7] + "\n"
data_string += "Temperature (BMP3): " + raw_data[8] + "\n"
data_string += "Temperature (BNO): " + raw_data[9] + "\n"
data_string += "Orientation (BNO): " + raw_data[10] + "\n"
data_string += "Angular Velocity (BNO): " + raw_data[11] + "\n"
data_string += "Linear Acceleration (BNO): " + raw_data[12] + "\n"
data_string += "Net Acceleration (BNO): " + raw_data[13] + "\n"
data_string += "Gravitational Acceleration (BNO): " + raw_data[14] + "\n"
data_string += "Magnetic Field (BNO): " + raw_data[15]
print(data_string)
cycle += 1
def f_output():
global data, stop, jeff, cycle, print_delay, arming_status
command = ""
while not stop:
try:
if not lock.locked():
clear()
if arming_status:
print("---------------------------- \nDarwin Connection Successful")
print("----------------------------\n'i' for input, 'q' for quit \n---------------------------- \nBT: " + bt_read + "\n----------------------------")
if data and not lock.locked():
print_data()
if jeff and not lock.locked():
jeff = False
# ascii_path = "jeff.txt"
# # # uncomment if pygame module is installed and you have a valid sound file in the directory
# # music_path = "jeff.mp3"
# # from pygame import mixer # Load the popular external library
# # mixer.init()
# # mixer.music.load(music_path)
# # mixer.music.play()
# # time.sleep(3.5)
# f = open(ascii_path)
# lines = f.read().splitlines()
# f.close()
# for line in lines:
# print(line)
print("A curious one I see ;)")
time.sleep(1)
time.sleep(print_delay)
except:
print("Output Termination Occured")
break
print("Closing Output Thread")
def f_bluetooth():
global port, baud, stop, bt, bt_status, bt_read, bt_complete, bt_usage, bt_fake_signal
while not stop:
if not bt_status:
if bt_usage:
print("Connecting to Bluetooth... ('q' to quit)")
time.sleep(1)
try:
bt = serial.Serial(port=port, baudrate=baud)
if bt.is_open:
print("Successfully opened bluetooth serial on " + bt.name)
bt_status = True
else:
print("Could not find " + port)
bt_complete = True
except:
print("Bluetooth Initialization Exception Occured")
bt_complete = True
stop = True
break
else:
bt_complete = True
bt_status = True
bt_read = bt_fake_signal
elif bt_complete and bt_usage:
try:
response = ""
while not stop:
c = bt.read(1)
if c != b'':
response = str(c).strip("b").strip("'")
while not stop:
char = bt.read(1)
if char == b'\n':
break
response = response + str(char).strip("b").strip("'")
response = response.replace("\\n", "").replace("\\r", "")
break
bt_read = response
except:
print("Bluetooth Read Exception Occured")
stop = True
break
else: # bluetooth not in use (for testing script)
pass
print("Closing Bluetooth Thread")
output_thread = threading.Thread(target=f_output, daemon=True)
input_thread = threading.Thread(target=f_input, daemon=True)
bluetooth_thread = threading.Thread(target=f_bluetooth, daemon=True)
bluetooth_thread.start()
bt_connect_timer = 0
while not bt_complete: # check for 'q' input to quit
while kbfunc():
if getch().decode("utf-8") == "q":
raise SystemExit
#clear()
#print("------------------------------------------- \nConnecting to Darwin... (" + str(bt_connect_timer) + " cycles since start) ('q' to quit) \n-------------------------------------------")
bt_connect_timer += 1
time.sleep(arming_delay)
if bt_status:
flush_input()
arming_status = True
clear()
input_thread.start()
output_thread.start()
while True:
try:
if input_thread.is_alive():
continue
else:
lock.acquire()
raise SystemExit
except:
input_thread.join()
output_thread.join()
bluetooth_thread.join()
raise SystemExit
else:
raise SystemExit
|
#ImportModules
import ShareYourSystem as SYS
#Need to define before either it is not setted
object.MyInt=0
#Define an Object
MyObject=object(**{'MyInt':4})
#print
print('MyObject is '+str(MyObject))
#print
print('MyObject.__dict__ is '+str(MyObject.__dict__))
|
import asyncio
import requests, json
from src.objs import *
from src.commands.addTorrent import addTorrent
from src.functions.floodControl import floodControl
from src.functions.referralCode import referralCode
from src.functions.keyboard import mainReplyKeyboard, githubAuthKeyboard
# Start handler
@bot.message_handler(commands=['start'])
def start(message):
userId = message.from_user.id
params = message.text.split()[1] if len(message.text.split()) > 1 else None
userLanguage = dbSql.getSetting(userId, 'language')
if not params:
bot.send_message(message.chat.id, text=language['greet'][userLanguage], reply_markup=mainReplyKeyboard(userId, userLanguage))
#! If start paramater is passed
if params:
sent = bot.send_message(message.chat.id, text=language['processing'][userLanguage])
#! Github oauth
if params.startswith('oauth'):
code = params[6:]
params = {'client_id': 'ba5e2296f2bbe59f5097', 'client_secret': config['githubSecret'], 'code':code}
response = requests.get('https://github.com/login/oauth/access_token', params=params)
#! Successfully authenticated
if response.text[:13] == 'access_token=':
accessToken = response.text[13:].split('&', 1)[0]
headers = {'Authorization': f'token {accessToken}'}
response = requests.get('https://api.github.com/user', headers=headers).json()
if 'login' in response:
bot.edit_message_text(language['loggedInAs'][userLanguage].format(f"<a href='https://github.com/{response['login']}'>{response['login'].capitalize()}</a>"), chat_id=sent.chat.id, message_id=sent.id)
following = requests.get(f"https://api.github.com/users/{response['login']}/following").json()
#! User is following
if any(dicT['login'] == 'hemantapkh' for dicT in following):
dbSql.setSetting(userId, 'githubId', response['id'])
bot.send_message(chat_id=message.chat.id, text=language['thanksGithub'][userLanguage])
#! User is not following
else:
bot.send_message(chat_id=message.chat.id, text=language['ghNotFollowed'][userLanguage], reply_markup=githubAuthKeyboard(userLanguage))
#! Error
else:
bot.edit_message_text(language['processFailed'][userLanguage], chat_id=sent.chat.id, message_id=sent.id)
#! If add torrent paramater is passed via database key
elif params.startswith('addTorrentDb'):
key = params[13:]
magnetLink = dbSql.getMagnet(key)
asyncio.run(addTorrent(message, userLanguage, magnetLink, messageId=sent.id))
#! If add torrent paramater is passed via URL
elif params.startswith('addTorrentURL'):
url = f'https://tinyurl.com/{params[14:]}'
response = requests.get(url, allow_redirects=False)
magnetLink = response.headers['Location'] if 'Location' in response.headers else None
asyncio.run(addTorrent(message, userLanguage, magnetLink, messageId=sent.id))
#! Else, login token is passed
else:
data = requests.get(f"https://torrentseedrbot.herokuapp.com/getdata?key={config['databaseKey']}&id={params}")
data = json.loads(data.content)
if data['status'] == 'success':
data = json.loads(data['data'])
#! Login new account
if data['type'] == 'login':
login(sent, userLanguage, data)
elif data['type'] == 'refresh':
login(sent, userLanguage, data, refresh=True)
else:
bot.edit_message_text(language['processFailed'][userLanguage], chat_id=sent.chat.id, message_id=sent.id)
#: Account login
def login(sent, userLanguage, data, refresh=False):
userId = sent.chat.id
bot.edit_message_text(language['refreshing'][userLanguage] if refresh else language['loggingIn'][userLanguage], chat_id=sent.chat.id, message_id=sent.id)
if refresh:
ac = dbSql.getDefaultAc(userId)
if ac:
email = ac['email']
password = ac['password']
response = seedrAc.login(email, password, data['captchaResponse'])
else:
response = None
bot.edit_message_text(language['noAccount'][userLanguage] if refresh else language['loggingIn'][userLanguage], chat_id=sent.chat.id, message_id=sent.id)
else:
email = data['email']
password = data['password']
response = seedrAc.login(email, password, data['captchaResponse'])
if response:
cookies = requests.utils.dict_from_cookiejar(response.cookies)
response = response.json()
#! If account logged in successfully
if 'remember' in cookies:
dbSql.setAccount(userId, accountId=response['user_id'], userName=response['username'], email=email, password=password, cookie=f"remember={cookies['remember']}")
bot.delete_message(chat_id=sent.chat.id, message_id=sent.id)
bot.send_message(chat_id=sent.chat.id, text=language['refreshed'][userLanguage].format(response['username']) if refresh else language['loggedInAs'][userLanguage].format(response['username']), reply_markup=mainReplyKeyboard(userId, userLanguage))
else:
#! Captcha failed
if response['error'] == 'RECAPTCHA_FAILED':
bot.edit_message_text(language['captchaFailled'][userLanguage], chat_id=sent.chat.id, message_id=sent.id)
#! Wrong username or password
elif response['error'] == 'INCORRECT_PASSWORD':
if refresh:
dbSql.deleteAccount(userId, ac['id'])
bot.delete_message(chat_id=sent.chat.id, message_id=sent.id)
bot.send_message(text=language['incorrectPassword'][userLanguage], chat_id=sent.chat.id, reply_markup=mainReplyKeyboard(userId, userLanguage))
else:
bot.edit_message_text(language['incorrectPassword'][userLanguage], chat_id=sent.chat.id, message_id=sent.id)
#! Unknown error
else:
bot.edit_message_text(language['unknownError'][userLanguage].format(response.text), chat_id=sent.chat.id, message_id=sent.id) |
"""""
Object Oriented Programming
Class:
A class is a template that defines the object
You can create a class for anything
"""""
class Car(object):
def __init__(self, make, model):
self.make = make #self is just the first parameter received to create the object
self.model = model
c1 = Car('bmw',"5501")
print(c1.make)
print(c1.model)
c2 = Car('benz',"E-Class")
print(c2.make)
print(c2.model)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
def original_sequences(k):
for x in itertools.product('GL', repeat=k):
yield ''.join(x)
def produce_artwork(os, c):
a = os
g = 'G' * len(os)
for i in xrange(c-1):
a = ''.join([os if t == 'L' else g for t in a])
return a
"""
6 6 1: 1866=6*311
5 1 5: 1 2 3 4 5
5 2 3: 2 14 15
5 3 3: 8 20
5 4 2: 39 40
5 5 1: 195=5*39 or 3125-194
5 6 1: 195
4 1 4: 1 2 3 4
4 2 3: 2 12
4 3 2: 2 12 or 7 8
4 4 1: 28=4*7 or 256-27
4 5 1: 28
4 6 1: 28
3 1 3: 1 2 3
3 2 2: 2 3
3 3 1: 6=3*2 or 27-5
3 4 1: 6
2 1 2: 1 2
2 2 1: 2
2 3 1: 2
"""
def print_table(k, c, s):
out = ''
for os in original_sequences(k):
out += '{0}: {1}\n'.format(os, produce_artwork(os, c))
return out
def print_table_transpose(k, c, s):
out = ''
co = 0
for t in sorted([(i+1, ta.count('G'), ''.join(ta)) for i, ta in enumerate(zip(*[produce_artwork(os, c) for os in original_sequences(k)]))], key=lambda x: x[1], reverse=True):
if t[1] < co: break
co = t[1]
out += '{0:3} {1:2} {2}\n'.format(t[0], t[1], t[2])
return out
def solve(k, c, s):
if n == 0: return 'INSOMNIA'
sum = n
s = set(str(sum))
while len(s) < 10:
sum += n
s.update(set(str(sum)))
return sum
def solve_small(k, c, s):
return ' '.join([str(i) for i in range(1, k+1)])
if __name__ == "__main__":
for case in xrange(1, 1+input()):
print "Case #{0}: {1}".format(case, solve_small(*[int(x) for x in raw_input().strip().split()]))
#print "Case #{0}: {1}".format(case, solve(*[int(x) for x in raw_input().strip().split()])),
#print "Case #{0}\n{1}".format(case, print_table(*[int(x) for x in raw_input().strip().split()])),
#print "Case #{0}\n{1}".format(case, print_table_transpose(*[int(x) for x in raw_input().strip().split()])), |
from csv import DictReader
from .TileNode import TileNode
from .NodeSet import NodeSet
# Read in locations.txt and parse data as ImageData class, compile into ImageSet
def parse_locations(locationsFile):
nodes = []
with open(locationsFile) as csvFile:
reader = DictReader(csvFile, delimiter=' ')
for row in reader:
node = TileNode(
file_name=row["#name"],
lat=row["latitude/Y"],
longi=row["longitude/X"],
height=row["height/Z"]
)
nodes.append(node)
return NodeSet(nodes)
|
# base decimal
a = int('23', 10)
print(f'Resultado: {a}')
# base binario
a = int('10111', 2)
print(f'Resultado: {a}')
# base octal
a = int('27', 8)
print(f'Resultado: {a}')
# hexadecimal
a = int('17', 16)
print(f'Resultado: {a}')
|
# 1. Pedir dos números por teclado e imprimir la suma de ambos.
print("Ejercicio 1")
def sumar (numero_1, numero_2):
resultado = numero_1 + numero_2
return resultado
numero_1 = float(input("Introduce el primer número: "))
numero_2 = float(input("Introduce el segundo número: "))
resultado = sumar (numero_1, numero_2)
print ("El resultado es: " + str(resultado)) |
__author__ = 'Hk4Fun'
__date__ = '2018/1/7 19:02'
'''题目描述:
判断两个无环链表listA和listB是否相交,如果相交,返回相交的节点在listB中的索引(从0开始),否则返回False
'''
'''主要思路:
可以让其中一个链表(不妨设是listA)的尾节点连接到其头部,这样在listB中就一定会出现一个环,
这样就将问题分别转化成了15_3和15_4(交点即为环的入口点)
(参考题37,给出其他算法思路,不要被前面几道题的思路给局限了)
'''
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def ListIntersect(self, headA, headB):
if not headA or not headB: return
nodeA = headA
while nodeA.next: # 找到listA的尾结点
nodeA = nodeA.next
nodeA.next = headA # 连接listA的尾节点连接到其头部
fast = slow = headB # 再在listB中判断是否有环
while fast.next and fast.next.next:
fast = fast.next.next
slow = slow.next
if slow is fast: break
else: # 正常退出循环说明不含环
return False
# 开始寻找入口点
index = 0
pHead = headB
pMeet = fast
while pHead != pMeet:
pHead = pHead.next
pMeet = pMeet.next
index += 1
return index
# ================================测试代码================================
from Test import Test
class MyTest(Test):
def my_test_code(self):
# 只需在此处填写自己的测试代码
# testArgs中每一项是一次测试,每一项由两部分构成
# 第一部分为被测试函数的参数,第二部分只有最后一个,为正确答案
testArgs = []
headA = ListNode(1)
headA.next = ListNode(2) # listA: 1 -> 2 -> None
headB = ListNode(1) # listB: 1 -> None
testArgs.append([headA, headB, False]) # 不相交
headA = ListNode(1)
headA.next = ListNode(2) # listA: 1 \
headB = ListNode(1) # 2 -> None
headB.next = headA.next # listB: 1 /
testArgs.append([headA, headB, 1]) # 相交
headA = ListNode(1)
headA.next = ListNode(2) # listA: 1 ->2 -> None
headB = ListNode(1) # |
headB.next = headA # listB: 1
testArgs.append([headA, headB, 1]) # 相交
headA = ListNode(1) # listA: 1 -> None
headB = ListNode(1) # |
headB.next = headA # listB: 1
testArgs.append([headA, headB, 1]) # 相交
testArgs.append([[], [], None]) # 空链表
return testArgs
if __name__ == '__main__':
solution = Solution()
MyTest(solution=solution).start_test()
|
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 7 22:56:10 2018
@author: Inigo
"""
# =============================================================================
# LIBRARIES
# =============================================================================
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import requests
import os
from datetime import datetime
from datetime import timedelta
from keys import *
import json
# =============================================================================
# DATA IMPORTATION
# =============================================================================
csv_file = "data/2018_09_setembre_bicing_estacions.csv"
df = pd.read_csv(csv_file, sep=';',decimal=',')
date_format = '%d/%m/%y %H:%M:%S'
# =============================================================================
# DATA PREPROCESSING
# =============================================================================
df_electric = df[df['type']=='BIKE-ELECTRIC']
#plt.scatter(df_electric['latitude'],df_electric['longitude'])
df_electric['datetime'] = pd.to_datetime(df_electric['updateTime'],format=date_format)
df_electric['date'] = df_electric['datetime'].dt.date
df_electric = df_electric.assign(total_space = df_electric.slots+df_electric.bikes)
df_electric = df_electric.assign(flux = df_electric.groupby('id')['bikes'].diff(1) )
df_electric = df_electric[df_electric['flux']!=0].dropna()
# =============================================================================
# COST ESTIMATION
# =============================================================================
def route_estimation(waypoint0, waypoint1, app_id='', app_code=''):
service = "https://route.api.here.com/routing/7.2/calculateroute.json"
options = "?app_id={}&app_code={}&waypoint0=geo!{}&waypoint1=geo!{}&mode=fastest;bicycle"
query = (service+options).format(app_id, app_code, waypoint0, waypoint1)
r = requests.get(query)
travelTime = r.json()['response']['route'][0]['leg'][0]['travelTime']
distance = r.json()['response']['route'][0]['leg'][0]['length']
trajectory = [ m['position'] for m in r.json()['response']['route'][0]['leg'][0]['maneuver'] ]
return [travelTime, distance, trajectory]
stations = df_electric.groupby('id').first()#.reset_index()
stations_file = "stations.json"
if not os.path.isfile(stations_file):
stations.reset_index().to_json(stations_file,'records')
stations_pairs = [(x, y) for x in stations.index for y in stations.index if x != y]
stations_pairs = list(set((i,j) if i<=j else (j,i) for i,j in stations_pairs))
def get_point(x):
return x['latitude'].astype(str)+','+x['longitude'].astype(str)
cost_file = 'cost.csv'
if not os.path.isfile(cost_file):
cost_list = []
for i,j in stations_pairs:
print(i,j)
waypoint0 = get_point(stations.loc[i])
waypoint1 = get_point(stations.loc[j])
[travelTime, distance, trajectory] = route_estimation(waypoint0, waypoint1, app_id, app_code)
cost_list.append([i,j, travelTime, distance, trajectory])
cost_cols = ['origin','destination','travelTime','distance','trajectory']
df_cost = pd.DataFrame(cost_list,columns=cost_cols)
df_cost_inv = df_cost.copy()
df_cost_inv['origin'] = df_cost['destination']
df_cost_inv['destination'] = df_cost['origin']
df_cost_inv['trajectory'] = [list(reversed(x)) for x in df_cost['trajectory']]
df_cost = pd.concat([df_cost,df_cost_inv])
df_cost['origin_latitude'] = stations.loc[df_cost.origin]['latitude'].values
df_cost['origin_longitude'] = stations.loc[df_cost.origin]['longitude'].values
df_cost['destination_latitude'] = stations.loc[df_cost.destination]['latitude'].values
df_cost['destination_longitude'] = stations.loc[df_cost.destination]['longitude'].values
df_cost.to_csv(cost_file,index=False)
else:
df_cost = pd.read_csv(cost_file)
import ast
df_cost.trajectory = df_cost.trajectory.apply(ast.literal_eval)
# =============================================================================
# POSSIBLE TRIPS
# =============================================================================
col_names = ["origin","origin_time","destination","destination_time","cost"]
origin_names = {"id": col_names[0], "datetime": col_names[1]}
destiny_names = {"id": col_names[2], "datetime": col_names[3]}
def trips_calculation(row):
k = 1
cond1 = (df_electric.id != row.id)
cond3 = (df_electric.date >= row.date - timedelta(days=1))
cond4 = (df_electric.date <= row.date + timedelta(days=1))
if row.flux > 0:
cond2 = (df_electric.updateTime < row.updateTime)
trips = df_electric[cond1 & cond2 & cond3 & cond4][['id','datetime']].rename(columns=origin_names)
trips[col_names[2]] = row['id']
trips[col_names[3]] = row['datetime']
extra = [0,0,row['id'],row['datetime'],k]
elif row.flux < 0:
cond2 = (df_electric.updateTime > row.updateTime)
trips = df_electric[cond1 & cond2 & cond3 & cond4][['id','datetime']].rename(columns=destiny_names)
trips[col_names[0]] = row['id']
trips[col_names[1]] = row['datetime']
extra = [row['id'],row['datetime'],0,0,k]
trips = trips.assign(duration=(trips['destination_time']-trips['origin_time']).dt.total_seconds())
trips_ext = trips.merge(df_cost,on=['origin','destination'])
trips_ext['cost'] = round(abs(trips_ext.duration - trips_ext.travelTime)/trips_ext.travelTime,2)
trips_ext = trips_ext[trips_ext['cost']<k][col_names]
exception = pd.DataFrame([extra], columns=col_names)
trips_ext = trips_ext.append(exception)
return(trips_ext)
trips_file = 'trips.csv'
if not os.path.isfile(trips_file):
tmp = []
for index, row in df_electric.iterrows():
tmp.append(row)
from multiprocessing.dummy import Pool as ThreadPool
pool = ThreadPool(16)
trips_list = pool.map(trips_calculation, tmp)
pool.close()
pool.join()
sum([m.shape[0] for m in trips_list])
df_trips = pd.concat(trips_list)
df_trips.to_csv(trips_file,index=False)
else:
df_trips = pd.read_csv(trips_file)
opt_file = "opt.mat"
if not os.path.isfile(opt_file):
from scipy.sparse import lil_matrix, csr_matrix
c = csr_matrix(df_trips.cost, dtype=np.double )
b = csr_matrix(abs(df_electric.flux), dtype=np.int16 )
A = lil_matrix( (df_electric.shape[0],df_trips.shape[0]), dtype=np.int16 )#.todense()
cont = 0
for index, row in df_electric.iterrows():
if cont % 10==0:
print(cont)
if row.flux < 0:
A[cont,:] = ((row.id == df_trips.origin) & (row.datetime == df_trips.origin_time))
if row.flux > 0:
A[cont,:] = ((row.id == df_trips.destination) & (row.datetime == df_trips.destination_time))
cont += 1
from scipy.io import savemat
savemat(opt_file, {'A':A,'b':b,'c':c})
else:
from scipy.io import loadmat
d = loadmat(opt_file)
A = d['A']; b = d['b']; c = d['c']
# Send to cplex matlab
df_trips['flow'] = pd.read_csv('solution.csv')
###############################################################################
# Prepare Data for P5.js
df_solution = df_trips[(df_trips['flow']>0) & (df_trips['origin']!=0) & (df_trips['destination']!=0)]
plt.hist(df_solution.flow)
df_solution.groupby('origin').size().sort_values(ascending=False)
df_solution.groupby('destination').size().sort_values(ascending=False)
df_complete = pd.merge(df_cost,df_solution,how="right",on=['origin','destination'])
df_complete['origin_datetime'] = pd.to_datetime(df_complete['origin_time'])
df_complete['destination_datetime'] = pd.to_datetime(df_complete['destination_time'])
df_complete['origin_datetime'].value_counts()
df_complete['destination_datetime'].value_counts()
times_origin = pd.Series(df_complete['origin_datetime'].unique()).sort_values().dt.round('15min')
times_destination = pd.Series(df_complete['destination_datetime'].unique()).sort_values().dt.round('15min')
minimum = min([min(times_origin),min(times_destination)])
maximum = max([max(times_origin),max(times_destination)])
timestamps = pd.date_range(minimum,maximum,freq='15T')#.tolist()
def find_nearest(x,elem):
R = np.abs(x-elem)
idx = np.where(R==R.min())[0][0]
return idx
df_complete['origin_idx'] = df_complete['origin_datetime'].apply(lambda x: find_nearest(timestamps,x))
df_complete['origin_timestamp'] = timestamps[df_complete['origin_idx']]
df_complete['destination_idx'] = df_complete['destination_datetime'].apply(lambda x: find_nearest(timestamps,x))
df_complete['destination_timestamp'] = timestamps[df_complete['destination_idx']]
print(json.dumps(json.loads(df_complete.iloc[0:1].to_json(orient='index')), indent=2))
one_day = df_complete[(df_complete['origin_datetime']>"2018-09-09") & (df_complete['origin_datetime']<"2018-09-10")]
with open('one_day.json', 'w') as outfile:
json.dump(json.loads(one_day.to_json(orient='records')),outfile)
###############################################################################
## DATA PREPARATION FOR KEPLER.GL
df_complete.to_csv('complete.csv', index=False)
###############################################################################
# Prepare Data for Deck.GL
df_solution = df_trips[(df_trips['flow']>0) & (df_trips['origin']!=0) & (df_trips['destination']!=0)]
df_complete = pd.merge(df_cost,df_solution,how="right",on=['origin','destination'])
df_complete['origin_datetime'] = pd.to_datetime(df_complete['origin_time'])
df_complete['destination_datetime'] = pd.to_datetime(df_complete['destination_time'])
from math import sin, cos, sqrt, atan2, radians
def distance_lat_lon(lat1,lon1,lat2,lon2):
# approximate radius of earth in km
R = 6373.0
lat1 = radians(lat1)
lon1 = radians(lon1)
lat2 = radians(lat2)
lon2 = radians(lon2)
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
distance = R * c
return distance
selection = df_complete.sort_values('origin_time')#.iloc[500:2000]
tmin = np.min(selection.origin_datetime)
tmin = int(tmin.timestamp())
tmax = np.max(selection.destination_datetime)
tmax = int(tmax.timestamp())
loop = tmax-tmin
loop_deck = 2000
selection['t'] = [x.floor('30min') for x in selection.origin_datetime]
time_resolution = pd.to_datetime(list(range(tmin, tmax, 60*60)), unit='s')
time_resolution_count = np.zeros(len(time_resolution))
for row, item in selection.iterrows():
for i in range(len(time_resolution)):
if (time_resolution[i] > item.origin_datetime) & (time_resolution[i] < item.destination_datetime):
time_resolution_count[i] = time_resolution_count[i] + item.flow;
s = pd.DataFrame({"t":time_resolution, "count":time_resolution_count})
s.t = s.t.astype(str)
trips_count = [{"date":row['t'], "count":row[0]} for index, row in s.iterrows()]
with open('trips_count.json', 'w') as outfile:
json.dump(trips_count, outfile)
maxCount = np.ceil(np.max([x['count'] for x in trips_count])/10)*10
trips_init = {"tmin":tmin,"tmax":tmax,"maxCount": maxCount}
with open('trips_init.json', 'w') as outfile:
json.dump(trips_init, outfile)
to_save = []
for row, item in selection.iterrows():
for i in range(int(item.flow)):
vendor = 1 if item.distance > 1000 else 0; #item['flow']
segments = []
# time interpolation based on distance
segments.append([item['origin_longitude'] + np.random.rand()*1e-4,
item['origin_latitude'] + np.random.rand()*1e-4,
np.round((int(item['origin_datetime'].timestamp())-tmin + np.random.rand()*30)/loop*loop_deck,4) ])
t = item['trajectory']
n = len(t)
d = [distance_lat_lon(t[i]['latitude'],t[i]['longitude'],t[i+1]['latitude'],t[i+1]['longitude']) for i in range(0,n-1)]
d = np.cumsum(d/np.sum(d))
# segments.append([item['destination_latitude'],
# item['destination_longitude'],
# item['destination_datetime'].timestamp() ])
tdelta = (item.destination_datetime-item.origin_datetime)
for i in range(0,n-1):
pt = item.origin_datetime+d[i]*tdelta
segments.append([t[i+1]['longitude'] + np.random.rand()*1e-4,
t[i+1]['latitude'] + np.random.rand()*1e-4,
np.round((int(pt.timestamp())-tmin + np.random.rand()*30)/loop*loop_deck,4)])
to_save.append({"vendor":vendor,"segments":segments})
with open('trips.json', 'w') as outfile:
json.dump(to_save, outfile)
|
import os
import urllib.request
class GetPoliticalParty:
def __init__(self, estados, partidos):
os.system("rm -r DadosEleitorais")
os.system("mkdir DadosEleitorais")
self.estados = estados
self.partidos = partidos
self.get_data()
@classmethod
def _make_url(cls, partido, estado):
nome = partidos[partido] + "_" + estados[estado] + ".zip"
url = (
"http://agencia.tse.jus.br/estatistica/sead/eleitorado/filiados/uf/filiados_"
+ nome
)
return url, nome
def _request_data(self, url, nome):
try:
urllib.request.urlretrieve(url, nome)
print("Ok")
return True
except:
print("Deu Erro")
pass
def get_data(self):
part_lenth = len(partidos)
est_lenth = len(estados)
for partido in range(0, part_lenth):
for estado in range(0, est_lenth):
url, nome = self._make_url(partido=partido, estado=estado)
if self._request_data(url, nome):
os.system("mv " + nome + " dados/")
if __name__ == "__main__":
# Lista de Estados
estados = [
"ac",
"al",
"ap",
"am",
"ba",
"ce",
"df",
"es",
"go",
"ma",
"mt",
"ms",
"mg",
"pa",
"pb",
"pr",
"pe",
"pi",
"rj",
"rn",
"rs",
"ro",
"rr",
"sc",
"sp",
"se",
"to",
]
# Lista de Partidos
partidos = [
"phs",
"ppl",
"prp",
"avante",
"cidadania",
"dc",
"dem",
"mdb",
"novo",
"patriota",
"pcb",
"pcdob ",
"pco",
"pdt",
"pl",
"pmb",
"pmn",
"pode",
"pp",
"pros",
"prtb",
"psb",
"psc",
"psd",
"psdb",
"psl",
"psol",
"pstu",
"pt",
"ptb",
"ptc",
"pv",
"rede",
"republicanos",
"solidariedade",
"up",
]
GetPoliticalParty(estados=estados, partidos=partidos)
|
"""mineral_catalog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
from . import views
urlpatterns = [
path('', views.mineral_list, name='list'),
path('by-letter/<letter>/',
views.mineral_list_by_letter,
name='mineral_list_by_letter'
),
path('by-group/<group>/',
views.mineral_list_by_group,
name='mineral_list_by_group'
),
path('by-color/<color>/',
views.mineral_list_by_color,
name='mineral_list_by_color'
),
path('results/',
views.search_minerals,
name='search'
),
path('random/', views.random_mineral_detail, name='random'),
path('<slug:pk>/', views.mineral_detail, name='detail'),
]
|
"""
Stochastic Gacs-Korner common information. See "Gacs-Korner Common Information
Variational Autoencoder" for details.
"""
import numpy as np
from ...algorithms import BaseAuxVarOptimizer
from ...helpers import normalize_rvs
from ...utils import unitful
__all__ = (
'StochasticGKCommonInformation',
)
class StochasticGKCommonInformation(BaseAuxVarOptimizer):
"""
Abstract base class for constructing auxiliary variables which render a set
of variables conditionally independent.
"""
name = ""
description = ""
def __init__(self, dist, rvs=None, crvs=None, bound=None, rv_mode=None):
"""
Initialize the optimizer.
Parameters
----------
dist : Distribution
The distribution to compute the auxiliary Markov variable, W, for.
rvs : list, None
A list of lists. Each inner list specifies the indexes of the random
variables to render conditionally independent. If None, then all
random variables are used, which is equivalent to passing
`rvs=dist.rvs`.
crvs : list, None
A single list of indexes specifying the random variables to
condition on. If None, then no variables are conditioned on.
bound : int
Place an artificial bound on the size of W.
rv_mode : str, None
Specifies how to interpret `rvs` and `crvs`. Valid options are:
{'indices', 'names'}. If equal to 'indices', then the elements of
`crvs` and `rvs` are interpreted as random variable indices. If
equal to 'names', the the elements are interpreted as random
variable names. If `None`, then the value of `dist._rv_mode` is
consulted, which defaults to 'indices'.
"""
super().__init__(dist, rvs=rvs, crvs=crvs, rv_mode=rv_mode)
theoretical_bound = self.compute_bound()
bound = min(bound, theoretical_bound) if bound else theoretical_bound
self._construct_auxvars([({0}, bound)])
self.constraints += [{'type': 'eq',
'fun': self.constraint_match_conditional_distributions,
},
]
def compute_bound(self):
"""
Return a bound on the cardinality of the auxiliary variable.
Returns
-------
bound : int
The bound on the size of W.
"""
return 2 * min(self._shape[:len(self._rvs)]) + 1
def constraint_match_conditional_distributions(self, x):
"""
Ensure that p(z|x_i) = p(z|x_j) for all i, j.
Parameters
----------
x : np.ndarray
An optimization vector.
"""
joint = self.construct_joint(x)
rv_joint = joint.sum(axis=tuple(self._crvs | self._arvs))
idxs = [idx for idx, support in np.ndenumerate(~np.isclose(rv_joint, 0.0)) if support]
marginals = []
for rv in self._rvs:
others = tuple(self._rvs - {rv})
p_xyz = joint.sum(axis=others)
p_xy = p_xyz.sum(axis=2, keepdims=True)
p_z_g_xy = p_xyz / p_xy
marginals.append(p_z_g_xy)
delta = 0
target_marginal = marginals[0]
for idx in idxs:
for i, m in zip(idx[1:], marginals[1:]):
delta += ((target_marginal[idx[0]] - m[i])**2).sum()
return 100 * delta
def _objective(self):
"""
The mutual information between the auxiliary random variable and `rvs`.
Returns
-------
obj : func
The objective function.
"""
conditional_mutual_information = self._conditional_mutual_information({min(self._rvs)}, self._arvs, self._crvs)
def objective(self, x):
"""
Compute I[rv_i : W | crvs]
Parameters
----------
x : np.ndarray
An optimization vector.
Returns
-------
obj : float
The value of the objective.
"""
pmf = self.construct_joint(x)
return -conditional_mutual_information(pmf)
return objective
@unitful
def stochastic_gk_common_information(dist, rvs=None, crvs=None, niter=None, maxiter=1000, polish=1e-6, bound=None, rv_mode=None):
"""
Compute the functional common information, F, of `dist`. It is the entropy
of the smallest random variable W such that all the variables in `rvs` are
rendered independent conditioned on W, and W is a function of `rvs`.
Parameters
----------
dist : Distribution
The distribution from which the functional common information is
computed.
rvs : list, None
A list of lists. Each inner list specifies the indexes of the random
variables used to calculate the total correlation. If None, then the
total correlation is calculated over all random variables, which is
equivalent to passing `rvs=dist.rvs`.
crvs : list, None
A single list of indexes specifying the random variables to condition
on. If None, then no variables are conditioned on.
rv_mode : str, None
Specifies how to interpret `rvs` and `crvs`. Valid options are:
{'indices', 'names'}. If equal to 'indices', then the elements of
`crvs` and `rvs` are interpreted as random variable indices. If equal
to 'names', the the elements are interpreted as random variable names.
If `None`, then the value of `dist._rv_mode` is consulted, which
defaults to 'indices'.
Returns
-------
F : float
The functional common information.
"""
rvs, crvs, rv_mode = normalize_rvs(dist, rvs, crvs, rv_mode)
# dtc = dual_total_correlation(dist, rvs, crvs, rv_mode)
# ent = entropy(dist, rvs, crvs, rv_mode)
# if np.isclose(dtc, ent):
# return dtc
sgkci = StochasticGKCommonInformation(dist, rvs, crvs, bound, rv_mode)
sgkci.optimize(niter=niter, maxiter=maxiter, polish=polish)
return -sgkci.objective(sgkci._optima)
|
#在这里添加这行代码,站点才能显示中文的app名字
#如果我们在注册app的时候就用了users.apps.UsersConfig的方式注册了,这行代码就可以省略
default_app_config='users.apps.UsersConfig' |
class Solution:
ret = []
def DFS(self, candidate, target, start, valuelist):
length = len(candidates)
if target == 0:
return Solution.ret.append(valuelist)
for i in range(start, length):
if target < candidates[i]:
return
if i > 0 and candidates[i] == candidates[i - 1]: # remove duplicate
continue
self.DFS(candidates, target - candidates[i], i + 1, valuelist + [candidates[i]]) # i + 1: each element only use once.
def combinationSum(self, candidates, target):
candidates.sort()
#Solution.ret = []
self.DFS(candidates, target, 0, [])
return Solution.ret
candidates = [1, 2, 2, 3, 2, 1]
target = 6
code = Solution()
print code.combinationSum(candidates, target)
|
import os
from math import log, floor
from pathlib import Path
import argparse
import tensorflow as tf
from data_pipeline import load_image, preprocess_image_tfRecord, preprocess_label, preprocess_dataset, get_data_generator_single
from optional import count_files_by_extension, create_dirs, str2bool
from configuration import constants
# Command line interface -----------------------
def get_command_line_args():
"""Parses command line arguments that are passed to the script"""
parser = argparse.ArgumentParser(description='Define script params')
# paths
parser.add_argument("-o", '--output_dir', type=str, default='tfrecord_data/')
# script
parser.add_argument('-s', '--imgs_per_shard', type=int, default=1000)
parser.add_argument('-dn', '--dataset_name', type=str, default='imagenet')
parser.add_argument("-dt", '--dataset_type', type=str, default='val') # val, test, train
# data preprocessing
parser.add_argument('-p', '--data_preprocessing', type=str2bool, default=True)
parser.add_argument('-a', '--architecture', type=str, default='VGG16') # only necessary if preprocessing is used
return vars(parser.parse_args())
# functions to create tfRecords ------------------------------
def _bytes_feature(value):
"""Returns a bytes_list from a string / byte."""
if isinstance(value, type(tf.constant(0))):
value = value.numpy()
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _int64_feature(value):
"""Returns an int64_list from a bool / enum / int / uint."""
if isinstance(value, type(tf.constant(0))):
value = value.numpy()
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def map_serializaton(img, label):
"""Wrapper function that enables serialize datapoint to work with the
tf.dataset.Dataset.map function."""
def serialize_datapoint(img, label):
"""Returns a serialized version of the img and label provided."""
feature = {'img_raw': _bytes_feature(tf.io.serialize_tensor(img)),
'label': _int64_feature(label)}
features = tf.train.Features(feature=feature)
feature_proto = tf.train.Example(features=features)
serialized_data = feature_proto.SerializeToString()
return serialized_data
serialized_data = tf.py_function(serialize_datapoint,
(img, label),
tf.string)
return serialized_data
def get_dataset_path(dataset_type, dataset):
"""
Parses and checks the dataset path. If the path provided to the CLI is eihter
val_imgnet or train_imgnet, the path is set to the one on the IKW computing
cluster.
"""
from configuration import constants
dataset_config = constants.DATASETS[dataset]
if dataset_type == 'val':
data_dir = Path(dataset_config['val_path'])
elif dataset_type == 'test':
data_dir = Path(dataset_config['test_path'])
elif dataset_type == 'train':
data_dir = Path(dataset_config['train_path'])
if not data_dir.is_dir():
raise ValueError(f'The provided data set directory {path} is no valid path.')
return data_dir
def create_tfRecord_from_dataset(ds, n_datapoints):
"""
Based on a tf.dataset.Dataset object, whichs content is the serialized dataset,
tfRecord files are created.
n_datapoints: The amount of datapoints within the dataset
"""
n_shards = (n_datapoints//args['imgs_per_shard']) + 1
if n_datapoints%args['imgs_per_shard'] == 0:
n_shards -= 1
# create shards
shard_power_10 = floor(log(n_shards, 10)) + 1 # used for creating file names
shard_count = 0
file_count = 0
for serialized_data in ds:
# instantiate new writer
if file_count % args['imgs_per_shard'] == 0:
if shard_count != 0:
writer.close()
print(f'Currently at file {file_count:0>2d} of {n_datapoints}, generating shard {shard_count} of {n_shards-1}.')
tmp_shard_filename = f'{shard_count:0>{shard_power_10}d}_{n_shards-1}-data.tfrecords'
tmp_shard_filepath = out_dir.joinpath(tmp_shard_filename)
writer = tf.io.TFRecordWriter(str(tmp_shard_filepath))
shard_count += 1
# write serialized data to the shard
writer.write(serialized_data.numpy())
file_count += 1
writer.close()
# functions to extract tfRecords --------------
def _parse_function(example_proto):
"""
Used as map function for tf.dataset.Dataset to extract img and label data
from the created tfRecords files.
"""
feature_description = {'img_raw': tf.io.FixedLenFeature([], tf.string),
'label': tf.io.FixedLenFeature([], tf.int64)}
feature = tf.io.parse_single_example(example_proto, feature_description)
img = tf.io.parse_tensor(feature['img_raw'], out_type=tf.float32)
return img, feature['label']
def create_dataset_from_tfRecords(filepaths: list, n_classes, batch_size, interleave_cycle=4, shuffle_buffer=8192):
"""
Creates a tf.dataset.Dataset object from the passed list of tfRecords files.
As the labels were not one-hot encoded before saving, this is also taken
care of.
filepaths: list of relative or absolute paths to each tfRecord file
n_classes: number of classes
"""
# 1. create dataset from the filepaths and shuffle them
fn_dataset = tf.data.Dataset.from_tensor_slices(filepaths).shuffle(len(filepaths))
# 2. create dataset of the serialized data and shuffle again
serialized_dataset = fn_dataset.interleave(tf.data.TFRecordDataset,
cycle_length=interleave_cycle,
num_parallel_calls=tf.data.AUTOTUNE)
serialized_dataset = serialized_dataset.shuffle(buffer_size=shuffle_buffer)
# 3. Parse and preprocess the serialized dataset
parsed_dataset = serialized_dataset.map(_parse_function)
parsed_dataset = parsed_dataset.map(lambda img, label: (img, preprocess_label(label, n_classes)))
parsed_dataset = parsed_dataset.batch(6).prefetch(6)
return parsed_dataset
if __name__ == '__main__':
args = get_command_line_args()
# 1. check directories
data_dir = get_dataset_path(dataset_type=args['dataset_type'],
dataset=args['dataset_name'])
# define the output dir
out_dir = Path(args['output_dir']).joinpath(args['dataset_type'])
out_dir.mkdir(parents=True, exist_ok=True)
# 2. create generator
gen_output_signature = (tf.TensorSpec([None, None, 3], dtype=tf.float32),
tf.TensorSpec((), dtype=tf.int32))
data_gen = get_data_generator_single(data_directory=data_dir,
n_classes=constants.DATASETS[args['dataset_name']]['n_classes'])
ds = tf.data.Dataset.from_generator(generator=data_gen,
output_signature=gen_output_signature)
# 3. preprocess images
if args['data_preprocessing']:
ds = ds.map(lambda img, label: (preprocess_image_tfRecord(img,
dataset=args['dataset_name'],
architecture=args['architecture']),
label),
num_parallel_calls=tf.data.AUTOTUNE)
# 4. serialize data
ds = ds.map(lambda img, label: map_serializaton(img, label))
# 5. save serialized data to tfRecords
create_tfRecord_from_dataset(ds=ds,
n_datapoints=count_files_by_extension(data_dir, 'JPEG'))
|
import pyDes
from PIL import Image
with open("twitter.bmp", "rb") as imageFile:
f = imageFile.read(138)
b = bytearray(f)
a = imageFile.read(1441910)#a = imageFile.read(363049472)para la del pinguino
imageFile.close()
data=a
k = pyDes.des(b"Asegurar", pyDes.ECB, "\0\0\0\0\0\0\0\0", pad=None, padmode=pyDes.PAD_PKCS5)
d = k.encrypt(data)
with open("DES-ECB-encrypt.bmp","ab") as f2:
f2.write(b)
f2.write(d)
f2.close()
c=k.decrypt(d)
with open("DES-ECB-decrypt.bmp","ab") as f3:
f3.write(b)
f3.write(c)
f3.close()
|
"""
演示文件的读操作
"""
# file1 = open('hello.txt','r')
# data = file1.read()
# print(data)
# file1.close()
# 如果说一个文件里面的内容太多了 read() 会讲里面的内容一次性读取出来, 负荷太大,,,
# 第一种方式 read(参数) # 每次读取参数大小的内容
# file1 = open('hello.txt','r')
# while True: # 循环取值, 把里面的内容都读取出来,, 每次都多少由参数决定
# data = file1.read(5) # 字符来作为单位 一次读取出来的大小 5 hello 10 hello hell 空格也属于一个字符
# print(data)
# if len(data) == 0: # 没有内容了
# break
# file1.close()
# file1 = open('hello.txt','r')
# while True: # 循环取值, 把里面的内容都读取出来,, 每次都多少由参数决定
# data = file1.read(5) # 字符来作为单位 一次读取出来的大小 5 hello 10 hello hell 空格也属于一个字符
# print(data,end = '')
# if len(data) == 0: # 没有内容了
# break
# file1.close()
# 第二种方式 readline>>> 每次读取一行
# file1 = open('hello.txt','r')
# while True:
# data = file1.readline()
# print(data)
# if len(data) == 0:
# break
# file1.close()
# 第三种 redalines 文件中所有字符数据成为一个列表,每一行字符数据都是一个元素
# file1 = open('hello.txt','r')
# data = file1.readlines()
# print(data)
# file1.close()
|
from apps.forms import BaseForm
from wtforms import StringField, IntegerField
from wtforms.validators import Email,InputRequired,Length,EqualTo
from utils import rmcache
from wtforms import ValidationError
from flask import g
class LoginForm(BaseForm):
email = StringField(validators=[Email(message='邮箱格式不正确'),InputRequired(message='请输入邮箱')])
password = StringField(validators=[Length(6,12,message='密码格式不正确'),InputRequired(message='请输入密码')])
remember = IntegerField()
class ResetPwdForm(BaseForm):
oldpwd = StringField(validators=[Length(6,12,message='原密码格式不正确'),InputRequired(message='请输入原密码')])
newpwd = StringField(validators=[Length(6,12,message='新密码格式不正确'),InputRequired(message='请输入新密码')])
newpwd2 = StringField(validators=[EqualTo("newpwd",message="密码不一致")])
class ResetEmailForm(BaseForm):
newemail = StringField(validators=[Email(message='邮箱格式不正确'),InputRequired(message='请输入邮箱')])
captcha = StringField(validators=[Length(4,4,message='验证码格式不正确'),InputRequired(message='请输入验证码')])
def validate_captcha(self, field):
captcha = field.data
email = self.newemail.data
captcha_cache = "".join(rmcache.get(email) or [])
if not captcha_cache:
raise ValidationError('验证码已过期,请重新获取!')
elif captcha.lower() != captcha_cache.lower():
raise ValidationError('验证码错误!')
def validate_newemail(self, field):
email = field.data
user = g.cms_user
if user.email == email:
raise ValidationError('此邮箱已被注册!')
class AddBannerForm(BaseForm):
name = StringField(validators=[InputRequired(message='请输入图片名称')])
image_url = StringField(validators=[InputRequired(message='请输入图片链接')])
link_url = StringField(validators=[InputRequired(message='请输入跳转链接')])
priority = IntegerField(validators=[InputRequired(message='请输入权重')])
class UpdateBannerForm(AddBannerForm):
banner_id = IntegerField()
class AddBoardForm(BaseForm):
name = StringField(validators=[InputRequired(message='请输入版块名称')])
class UpdateBoardForm(AddBoardForm):
board_id = IntegerField()
|
import re
class LocationIndex:
def __init__(self, location_str):
self.location_str = location_str
def __str__(self):
return "{}".format(self.location_str)
"""main location function, gives response to building number, pool and map"""
def location_passer(self, message_text):
if re.match(r'.*map', message_text, re.I):
self.location_str = "Here's a map! https://maps.jcu.edu.au/campus/townsville/"
elif re.match(r'.*[0-354]', message_text, re.I):
message_text_number = re.findall('\d+', message_text)
message_text_number = ("".join(message_text_number))
self.location_str = "Are you looking for this building? \nhttps://maps.jcu.edu.au/campus/townsville/?location={}".format(message_text_number)
elif re.match(r'.*pool|swim|swimming', message_text, re.I):
self.location_str = "Are you looking for the pool man?\nhttps://maps.jcu.edu.au/campus/townsville/?location=241"
else:
with open("buildinglist.csv") as buildinglist:
for line in buildinglist:
if message_text in line[0]:
building_number = line.title().split(", ")[1]
self.location_str = "Are you looking for this building? \nhttps://maps.jcu.edu.au/campus/townsville/?location={}".format(building_number)
else:
self.location_str = "idk what '{}' means".format(message_text)
"""gets building name based off building number"""
def location_name_passer(self, message_text):
with open("buildinglist.csv") as buildinglist:
for line in buildinglist:
if message_text in line.split(", ")[1]:
self.location_str = "are you looking for building {}?\n\nhttps://maps.jcu.edu.au/campus/townsville/?location={}".format(line.title().split(", ")[0], line.title().split(", ")[0])
else:
"thingihfaeihf"
"""returns appropriate office number based off name"""
def office_passer(self, last_name_message):
with open("peoplelist.csv") as peoplelist:
for line in peoplelist:
if last_name_message in line:
if line.split(', ')[2] == '0\n':
self.location_str = "{} does not have an office".format(line.split(', ')[0])
else:
building_number = line.split(", ")[2]
building_number_final = "{}".format(building_number.split("-")[0])
self.location_str = building_number_final
else:
self.location_str = "im not sure who you are talking about"
"""i dont even know what this does, i kinda forgot what the difference is but it needs to be here"""
def office_passer_new(self, last_name_message):
with open("peoplelist.csv") as peoplelist:
for line in peoplelist:
if last_name_message in line:
if line.split(", ")[2] == "0\n":
self.location_str = "{} does not have an office".format(line.split(", ")[0])
else:
line = line.split(", ")[2]
self.location_str = "their office is in building {}, room {}".format(line.split("-")[0], line.split("-")[1])
def office_passer_with_name(self, name):
with open("peoplelist.csv") as peoplelist:
for line in peoplelist:
if name in line:
print line
print line.split(", ")[2]
if line.split(", ")[2] == '0\n':
self.location_str = "{} does not have an office".format(line.split(", ")[0].title())
else:
line = line.split(", ")[2]
self.location_str = "their office is in building {}, room {}".format(line.split("-")[0], line.split("-")[1])
|
# Import Panda3D Modules
import sys, os, math
from pandac.PandaModules import loadPrcFileData, TextNode, NodePath, CardMaker,TextureStage, Texture, VBase3, TransparencyAttrib, WindowProperties, TextProperties, TextPropertiesManager
from direct.actor.Actor import Actor
from direct.interval.LerpInterval import LerpHprInterval
from panda3d.core import loadPrcFile
from direct.gui.OnscreenText import OnscreenText
from direct.gui.OnscreenImage import OnscreenImage
from direct.fsm.FSM import FSM
from direct.interval.IntervalGlobal import *
from direct.interval.LerpInterval import LerpColorScaleInterval, LerpPosHprScaleInterval, LerpHprInterval, LerpPosInterval
from direct.interval.MetaInterval import Sequence
from lib import menuBars, menuOptions, inputHelp
import gameInput
loadPrcFile("../../assets/Config.prc")
# ShowBase
import direct.directbase.DirectStart
from direct.showbase.ShowBase import ShowBase
# Path to game assets.
base.assetPath = "../../assets"
# Ext to use when loading tiles.
base.tileExt = "egg"
# Ext to use when loading character.
base.charExt = "egg"
# Load Character List
charDir = os.listdir(base.assetPath + "/characters")
charList = []
for i in charDir:
if os.path.isdir(base.assetPath + "/characters/" + i):
charList.append(i)
charData = []
for i in charList:
data = {}
# Load Name from external file
name = "None"
if os.path.exists(base.assetPath + "/characters/" + i + "/name.txt"):
name = open(base.assetPath + "/characters/" + i + "/name.txt", "r").read()
# Load Special ability name from external file
special = "n/a"
if os.path.exists(base.assetPath + "/characters/" + i + "/special.txt"):
special = open(base.assetPath + "/characters/" + i + "/special.txt", "r").read()
# Load stats from external file...
moveSpeed = 1.0
power = 1.0
resist = 1.0
if os.path.exists(base.assetPath + "/characters/" + i + "/stats.txt"):
statFile = open(base.assetPath + "/characters/" + i + "/stats.txt").read().split("\n")
moveSpeed = float(statFile[0].split(" ")[0])
power = float(statFile[1].split(" ")[0])
resist = float(statFile[2].split(" ")[0])
data['name'] = name.strip()
data['speed'] = moveSpeed
data['power'] = power
data['resist'] = resist
data['special'] = special
data['picture'] = base.assetPath + "/characters/" + i + "/picture.jpg"
charData.append(data)
base.playerProfiles = []
# Character Select Col Size
CHAR_COL_SIZE = 5
class gameLobby(FSM):
def __init__(self):
# Init FSM
FSM.__init__(self, 'MenuFSM')
# Disable Mouse
base.disableMouse()
# Load Fonts
self.boldFont = loader.loadFont(base.assetPath + "/fonts/DejaVuSansBold.ttf")
self.regFont = loader.loadFont(base.assetPath + "/fonts/DejaVuSansCondensed.ttf")
self.pixelFont = loader.loadFont(base.assetPath + "/fonts/04B.egg")
# Lobby node
self.node = NodePath("LobbyNode")
self.node.reparentTo(render)
self.node2d = NodePath("Lobby2DNode")
self.node2d.reparentTo(aspect2d)
# Prepare menu BG
menu_frame = CardMaker("Menu_Background")
menu_frame.setFrame(-4,4,-4,4)
menu_frame.setColor(0.3843137254901961, 0.8509803921568627, 0.6117647058823529,1)
menu_frame_node = self.node.attachNewNode(menu_frame.generate())
menu_frame_node.setY(8)
bgTex = loader.loadTexture(base.assetPath + "/menu/bg_overlay.png")
bgTex.setWrapU(Texture.WMRepeat)
bgTex.setWrapV(Texture.WMRepeat)
#menu_frame_node.setTexture(bgTex)
ts = TextureStage('PlayerHud_Snapshot_TextureStage')
ts.setMode(TextureStage.MDecal)
menu_frame_node.setTexture(ts, bgTex)
lerper = NodePath("Menu_Background_Lerper")
menu_frame_node.setTexProjector(ts, NodePath(), lerper)
menu_lerp = lerper.posInterval(60, VBase3(-1, 1, 0))
menu_lerp.loop()
# Window Object
self.windowObj = []
# MENU ASSETS
# ===================
# Menu Bar
self.menuBar = menuBars.menuBars([])
self.menuBar.node.setZ(.7)
# Menu Title
TITLE = OnscreenImage(image = base.assetPath + "/menu/title_gamelobby.png", pos = (0, 0, .87), scale=(.4), parent=self.node2d)
TITLE.setTransparency(TransparencyAttrib.MAlpha)
self.addWindowNode(TITLE, -1, .4)
# Sidebar
SIDEBAR = OnscreenImage(image = base.assetPath + "/menu/lobby_sidebar.png", pos = (0, 0, 0), scale=(1.5), parent=self.node2d)
SIDEBAR.setTransparency(TransparencyAttrib.MAlpha)
self.addWindowNode(SIDEBAR, 1, -1.5)
# Seperators
charSelect = OnscreenText(text = 'character select', pos = (0, .535), scale = .065, shadow=(.1,.1,.1,.5), fg=(1,1,1,1), font=self.pixelFont, align=TextNode.ALeft, parent=self.node2d)
self.addWindowNode(charSelect, -1, .1)
# Show Characters
x = 0
y = 0
self.charSelect = NodePath("CharacterSelectPortraits")
self.charSelect.reparentTo(self.node2d)
self.addWindowNode(self.charSelect, -1, .3)
self.charSelect.setZ(.4)
base.charSlots = []
for i in charData:
port = characterPortrait(i['picture'])
port.reparentTo(self.charSelect)
port.setScale(.125)
port.setPos((x * .25), 0, (y * -.25))
base.charSlots.append(port)
# Increment
x += 1
if x > CHAR_COL_SIZE:
x = 0
y += 1
# Player Profiles
self.playerProfiles = NodePath("PlayerProfiles")
self.playerProfiles.reparentTo(self.node2d)
self.addWindowNode(self.playerProfiles, 1, -.65)
self.playerProfiles.setZ(.865)
self.playerSlots = []
for i in range(8):
pp = playerProfile(i + 1)
pp.reparentTo(self.playerProfiles)
pp.setZ(i * -.25)
self.playerSlots.append(pp)
# All Ready
allRdyTxt = OnscreenText(text = 'All Ready?', pos = (0, .05), scale = .1, shadow=(.1,.1,.1,.5), fg=(1,1,1,1), font=self.boldFont, align=TextNode.ALeft, parent=self.node2d)
cntTxt = OnscreenText(text = "Press 'button 1' to continue!", pos = (0, -.05), scale = .065, shadow=(.1,.1,.1,.5), fg=(1,1,1,1), font=self.regFont, align=TextNode.ALeft, parent=self.node2d)
self.rdyLerp = Parallel(
LerpPosInterval(allRdyTxt, 1.0, (0,0,0), (-.5, 0, 0)),
LerpPosInterval(cntTxt, 1.0, (0,0,0), (.5, 0, 0)),
LerpColorScaleInterval(allRdyTxt, 1.0, (1,1,1,1), (1,1,1,0)),
LerpColorScaleInterval(cntTxt, 1.0, (1,1,1,1), (1,1,1,0))
)
self.unRdyLerp = Parallel(
LerpPosInterval(allRdyTxt, 1.0, (-.5,0,0), (0, 0, 0)),
LerpPosInterval(cntTxt, 1.0, (.5,0,0), (0, 0, 0)),
LerpColorScaleInterval(allRdyTxt, 1.0, (1,1,1,0), (1,1,1,1)),
LerpColorScaleInterval(cntTxt, 1.0, (1,1,1,0), (1,1,1,1))
)
self.unRdyLerp.start()
# Bind Window Event
self.windowEvent()
self.accept("window-event", self.windowEvent)
def addWindowNode(self, node, side, xoffset):
"""
Add NodePath to list of nodes that will be
repositioned everytime the window is resized.
"""
self.windowObj.append([node, side, xoffset])
def rmWindowNode(self, node):
"""
Remove NodePath from above list.
"""
for i in self.windowObj:
if i[0] == node:
self.windowObj.remove(i)
break
def windowEvent(self, window = None):
"""
Update window size info everytime a window event occurs.
"""
self.winWidth = base.win.getProperties().getXSize()
self.winHeight = base.win.getProperties().getYSize()
self.winAspect = float(self.winWidth) / float(self.winHeight)
# Update Node
for i in self.windowObj:
i[0].setX((self.winAspect * i[1]) + i[2])
# The following handles the portraits for each character...
PORTRAIT_BG = base.assetPath + "/menu/character_portrait_bg.png"
PORTRAIT_BG_SELECTED = base.assetPath + "/menu/character_portrait_bg_selected.png"
class characterPortrait(NodePath):
def __init__(self, char_image):
NodePath.__init__(self, "CharacterPortrait")
self.pixelFont = loader.loadFont(base.assetPath + "/fonts/04B.egg")
self.bg = OnscreenImage(image = PORTRAIT_BG, pos = (0, 0, 0), scale=(1), parent=self)
self.bg.setTransparency(TransparencyAttrib.MAlpha)
self.bg_selected = OnscreenImage(image = PORTRAIT_BG_SELECTED, pos = (0, 0, 0), scale=(1), parent=self)
self.bg_selected.setTransparency(TransparencyAttrib.MAlpha)
self.bg_selected.hide()
self.char_image = char_image
self.char = OnscreenImage(image = char_image, pos = (0, 0, 0), scale=(.68), parent=self)
self.text = []
for i in range(4):
x = i % 2
y = int(math.floor(i / 2))
self.text.append(
OnscreenText(text = str(i + 1), pos = ( -.3 + (x * .66), y * -.55), scale = .8, shadow=(.1,.1,.1,.5), fg=(1,1,1,1), font=self.pixelFont, align=TextNode.ACenter, parent=self)
)
self.text[i].hide()
def select(self):
self.bg.hide()
self.bg_selected.show()
def unselect(self):
self.bg.show()
self.bg_selected.hide()
def setNumber(self, number, show=True):
if show:
self.text[number - 1].show()
else:
self.text[number - 1].hide()
def setImage(self, char_image):
self.char_image = char_image
self.char.setImage(char_image)
# The Following handles the loading of player profiles...
base.playerProfileSelector = []
class playerProfile(NodePath):
def __init__(self, slotNo = 1):
NodePath.__init__(self, "Player" + str(slotNo) + "Profile")
base.playerProfileSelector.append(self)
self.state = 0
self.player = -1
self.pixelFont = loader.loadFont(base.assetPath + "/fonts/04B.egg")
self.pNumText = OnscreenText(text = str(slotNo), pos = (.55, -.1), scale = .35, fg=(.745,.745,.745,.7), font=self.pixelFont, align=TextNode.ACenter, parent=self)
# Press Button to Join State
self.joinText = OnscreenText(text = "press button 1\nto join.", pos = (0, 0), scale = .07, fg=(.694,.031,.031,1), font=self.pixelFont, align=TextNode.ALeft, parent=self)
self.joinText.hide()
# Profile Select State
self.profileSelect = NodePath("ProfileSelect")
self.profileSelect.reparentTo(self)
self.profileSelect.hide()
profileText = OnscreenText(text = "profile", pos = (0, .025), scale = .1, fg=(.694,.031,.031,1), font=self.pixelFont, align=TextNode.ALeft, parent=self.profileSelect)
self.profileName = OnscreenText(text = "< guest >", pos = (.25, -.065), scale = .1, fg=(1,1,1,1), font=self.pixelFont, align=TextNode.ACenter, parent=self.profileSelect)
# Character Select State
self.charSelect = NodePath("CharacterSelect")
self.charSelect.reparentTo(self)
self.charSelect.hide()
self.pPortrait = characterPortrait(charData[0]['picture'])
self.pPortrait.setScale(.08)
self.pPortrait.setPos(.06,0,-.04)
self.pPortrait.reparentTo(self.charSelect)
self.playerName = OnscreenText(text = "guest", pos = (0, .08), scale = .05, fg=(.694,.031,.031,1), font=self.pixelFont, align=TextNode.ALeft, parent=self.charSelect)
self.charName = OnscreenText(text = "chompy", pos = (0, .045), scale = .04, fg=(1,1,1,1), font=self.pixelFont, align=TextNode.ALeft, parent=self.charSelect)
# Character Stats
OnscreenText(text = "pow", pos = (.15, -.01), scale = .04, fg=(1,1,1,1), font=self.pixelFont, align=TextNode.ALeft, parent=self.charSelect)
OnscreenText(text = "rst", pos = (.15, -.04), scale = .04, fg=(1,1,1,1), font=self.pixelFont, align=TextNode.ALeft, parent=self.charSelect)
OnscreenText(text = "spd", pos = (.15, -.07), scale = .04, fg=(1,1,1,1), font=self.pixelFont, align=TextNode.ALeft, parent=self.charSelect)
OnscreenText(text = "spc", pos = (.15, -.1), scale = .04, fg=(1,1,1,1), font=self.pixelFont, align=TextNode.ALeft, parent=self.charSelect)
self.spc_text = OnscreenText(text = "chomp tackle", pos = (.25, -.1), scale = .03, fg=(.694,.031,.031,1), font=self.pixelFont, align=TextNode.ALeft, parent=self.charSelect)
stat_frame = CardMaker("PlayerHud_Snapshot")
stat_frame.setFrame(0, .2, 0, .02)
stat_frame.setColor(.694,.031,.031,1)
self.pow_bar = self.charSelect.attachNewNode(stat_frame.generate())
self.pow_bar.setPos(.25, 0, -.01)
self.rst_bar = self.charSelect.attachNewNode(stat_frame.generate())
self.rst_bar.setPos(.25, 0, -.04)
self.spd_bar = self.charSelect.attachNewNode(stat_frame.generate())
self.spd_bar.setPos(.25, 0, -.07)
# Ready Text
self.rdy_text = OnscreenText(text = "READY", pos = (0, -.12), scale = .2, fg=(1,1,1,1), shadow=(.8,.8,.8,.6), font=self.pixelFont, align=TextNode.ALeft, parent=self.charSelect)
self.rdy_text.setR(-8)
rdyTextLerp = Sequence(
LerpColorScaleInterval(self.rdy_text, .25, (.694,.031,.031,1)),
LerpColorScaleInterval(self.rdy_text, .25, (1,1,1,1))
)
rdyTextLerp.loop()
self.rdy_text.hide()
self.showNextJoinProfile()
self.charSelected = 0
def showNextJoinProfile(self):
# See if this one should display Join text
hasOne = False
for i in base.playerProfileSelector:
if i.state == 0 and not hasOne:
i.joinText.show()
hasOne = True
# Accept Player Input
for x in range(4):
isOK = False
for y in base.playerProfileSelector:
if y.player == x + 1:
isOK = True
break
if not isOK:
base.accept("p" + str(x + 1) + "_btna", i.selectProfile, [x + 1])
else:
i.joinText.hide()
def cancelPlayer(self):
self.player = -1
self.state = 0
self.joinText.show()
self.charSelect.hide()
self.profileSelect.hide()
self.showNextJoinProfile()
def selectProfile(self, playerNo = None):
profile = "guest"
# Remove instructions for all ready
allRdy = True
noneActive = True
for i in base.playerProfileSelector:
if not i.player > 0: continue
else: noneActive = False
if not i.state == 3: allRdy = False
if allRdy and not noneActive:
base.gameLobby.unRdyLerp.start()
# Set Player Number
self.player = playerNo
self.visibleNumber = 0
for i in base.playerProfileSelector:
self.visibleNumber += 1
if i == self: break
# Unselect character slot
base.charSlots[self.charSelected].setNumber(self.visibleNumber, False)
stillSelected = False
for i in base.playerProfileSelector:
if i == self: continue
if i.charSelected == self.charSelected and i.state == 2:
stillSelected = True
if not stillSelected:
base.charSlots[self.charSelected].unselect()
base.accept("p" + str(playerNo) + "_btna", self.selectCharacter, [profile])
base.accept("p" + str(playerNo) + "_btnb", self.cancelPlayer)
base.ignore("p" + str(self.player) + "_left")
base.ignore("p" + str(self.player) + "_right")
base.ignore("p" + str(self.player) + "_up")
base.ignore("p" + str(self.player) + "_down")
self.state = 1
self.showNextJoinProfile()
self.joinText.hide()
self.charSelect.hide()
self.profileSelect.show()
def selectCharacter(self, profile):
# Remove instructions for all ready
allRdy = True
for i in base.playerProfileSelector:
if not i.player > 0: continue
if not i.state == 3: allRdy = False
if allRdy:
base.gameLobby.unRdyLerp.start()
self.state = 2
self.rdy_text.hide()
base.accept("p" + str(self.player) + "_btna", self.ready, [profile])
base.accept("p" + str(self.player) + "_btnb", self.selectProfile, [self.player])
base.accept("p" + str(self.player) + "_left", self.updateCharSelectGrid, [-1])
base.accept("p" + str(self.player) + "_right", self.updateCharSelectGrid, [1])
base.accept("p" + str(self.player) + "_up", self.updateCharSelectGrid, [-CHAR_COL_SIZE])
base.accept("p" + str(self.player) + "_down", self.updateCharSelectGrid, [CHAR_COL_SIZE])
self.updateCharSelectGrid(0)
self.charSelect.show()
self.joinText.hide()
self.profileSelect.hide()
def ready(self, profile):
self.state = 3
self.rdy_text.show()
# Show instructions for all ready
allRdy = True
for i in base.playerProfileSelector:
if not i.player > 0: continue
if not i.state == 3: allRdy = False
if allRdy:
base.gameLobby.rdyLerp.start()
base.accept("p" + str(self.player) + "_btnb", self.selectCharacter, [profile])
base.ignore("p" + str(self.player) + "_left")
base.ignore("p" + str(self.player) + "_right")
base.ignore("p" + str(self.player) + "_up")
base.ignore("p" + str(self.player) + "_down")
def updateCharSelectGrid(self, select):
select = self.charSelected + select
if select > len(base.charSlots) - 1:
select = 0
if select < 0: select = len(base.charSlots) - 1
stillSelected = False
for i in base.playerProfileSelector:
if i == self: continue
if i.charSelected == self.charSelected and i.state >= 2:
stillSelected = True
if not stillSelected:
base.charSlots[self.charSelected].unselect()
base.charSlots[self.charSelected].setNumber(self.visibleNumber, False)
self.charSelected = select
base.charSlots[self.charSelected].select()
base.charSlots[self.charSelected].setNumber(self.visibleNumber, True)
self.pPortrait.setImage(charData[self.charSelected]['picture'])
self.charName.setText(charData[self.charSelected]['name'].lower())
self.pow_bar.setScale( (charData[self.charSelected]['power'] / 3.0, 1, 1) )
self.rst_bar.setScale( (charData[self.charSelected]['resist'] / 3.0, 1, 1) )
self.spd_bar.setScale( (charData[self.charSelected]['speed'] / 3.0, 1, 1) )
self.spc_text.setText(charData[self.charSelected]['special'].lower())
base.gameLobby = gameLobby()
|
from tkinter import *
from tkinter import messagebox
window=Tk()
window.title("SIMPLE CALCULATOR")
window.geometry('312x370')
window.resizable(0,0)
def btn_click(item):
global expression
expression=expression+str(item)
input_text.set(expression)
def btn_clear():
global expression
expression=""
input_text.set(expression)
def btn_equal():
try:
global expression
result=str(eval(expression))
expression=""
btn_click(result)
except:
input_text.set("Error!!!!")
expression=""
expression=""
input_text=StringVar()
input_frame=Frame(window,height=50,width=312)
input_frame.pack(side=TOP)
input_field=Entry(input_frame,width=50,font=('arial',18,'bold'),textvariable=input_text)
input_field.grid(row=0,column=0)
input_field.pack(ipady=10) #ipady is the internal padding to increase the height of input field
btn_frame=Frame(window,height=272,width=312,bg='grey')
btn_frame.pack()
clear=Button(btn_frame,text='C',bg='grey',fg='black',width=33,height=3,command=btn_clear).grid(row=0,columnspan=3)
divide=Button(btn_frame,text='/',bg='grey',fg='black',width=10,height=3,command=lambda:btn_click('/')).grid(row=0,column=3)
seven=Button(btn_frame,text='7',bg='grey',fg='black',width=10,height=3,command=lambda:btn_click(7)).grid(row=1,column=0)
eight=Button(btn_frame,text='8',bg='grey',fg='black',width=10,height=3,command=lambda:btn_click(8)).grid(row=1,column=1)
nine=Button(btn_frame,text='9',bg='grey',fg='black',width=10,height=3,command=lambda:btn_click(9)).grid(row=1,column=2)
multiply=Button(btn_frame,text='*',bg='grey',fg='black',width=10,height=3,command=lambda:btn_click('*')).grid(row=1,column=3)
four=Button(btn_frame,text='4',bg='grey',fg='black',width=10,height=3,command=lambda:btn_click(4)).grid(row=2,column=0)
five=Button(btn_frame,text='5',bg='grey',fg='black',width=10,height=3,command=lambda:btn_click(5)).grid(row=2,column=1)
six=Button(btn_frame,text='6',bg='grey',fg='black',width=10,height=3,command=lambda:btn_click(6)).grid(row=2,column=2)
minus=Button(btn_frame,text='-',bg='grey',fg='black',width=10,height=3,command=lambda:btn_click('-')).grid(row=2,column=3)
one=Button(btn_frame,text='1',bg='grey',fg='black',width=10,height=3,command=lambda:btn_click(1)).grid(row=3,column=0)
two=Button(btn_frame,text='2',bg='grey',fg='black',width=10,height=3,command=lambda:btn_click(2)).grid(row=3,column=1)
three=Button(btn_frame,text='3',bg='grey',fg='black',width=10,height=3,command=lambda:btn_click(3)).grid(row=3,column=2)
plus=Button(btn_frame,text='+',bg='grey',fg='black',width=10,height=3,command=lambda:btn_click('+')).grid(row=3,column=3)
zero=Button(btn_frame,text='0',bg='grey',fg='black',width=21,height=3,command=lambda:btn_click(0)).grid(row=4,columnspan=2)
point=Button(btn_frame,text='.',bg='grey',fg='black',width=10,height=3,command=lambda:btn_click('.')).grid(row=4,column=2)
equalto=Button(btn_frame,text='=',bg='grey',fg='black',width=10,height=3,command=btn_equal).grid(row=4,column=3)
window.mainloop() |
# -*- coding: utf-8 -*-
# Copyright (c) 2007-2013 NovaReto GmbH
# cklinger@novareto.de
from .interfaces import IPageTop, IFooter, INavigation, IAboveContent, IBelowContent
from five import grok
from grokcore.layout import Layout
from plone import api as ploneapi
from uvc.api import api
from Products.CMFCore.interfaces import ISiteRoot
from uvc.shards.components import ShardsAsViews
from uvc.shards.interface import IShardedView
from zope import interface
from zope.component import getMultiAdapter
from plone.app.folder.nextprevious import NextPrevious
from Products.CMFCore.interfaces import IContentish
from nva.magazinfolder.interfaces import IAnonymousLayer
api.templatedir('templates')
class NPWebMag(NextPrevious):
def getData(self, obj):
""" return the expected mapping, see `INextPreviousProvider` """
gNN = getattr(obj, 'excludenextprev', False)
if gNN:
return None
if not self.security.checkPermission('View', obj):
return None
elif not IContentish.providedBy(obj):
# do not return a not contentish object
# such as a local workflow policy for example (#11234)
return None
ptype = obj.portal_type
url = obj.absolute_url()
if ptype in self.vat: # "use view action in listings"
url += '/view'
return dict(
id=obj.getId(),
url=url,
title=obj.Title(),
description=obj.Description(),
portal_type=ptype
)
class NewsPaperLayout(Layout):
api.context(interface.Interface)
grok.layer(IAnonymousLayer)
def getAcquisitionChain(self, context):
inner = context.aq_inner
iter = inner
while iter is not None:
yield iter
if ISiteRoot.providedBy(iter):
break
if not hasattr(iter, "aq_parent"):
raise RuntimeError("Parent traversing interrupted by object: " + str(parent))
iter = iter.aq_parent
def update(self):
self.og_title = ''
self.og_description = ''
self.og_image = ''
self.og_url = self.context.absolute_url() + '/document_view'
if self.context.title:
self.og_title = self.context.title
if hasattr(self.context, 'newstitle'):
if self.context.newstitle:
self.og_title = self.context.newstitle
if self.context.description:
self.og_description = self.context.description
if hasattr(self.context, 'newstext'):
if self.context.newstext:
self.og_description = self.context.newstext
if hasattr(self.context, 'titleimage'):
if self.context.titleimage:
self.og_image = '%s/@@images/image' %self.context.titleimage.to_object.absolute_url()
if hasattr(self.context, 'newsimage'):
if self.context.newsimage:
self.og_image = '%s/@@images/newsimage' %self.context.absolute_url()
if self.context.portal_type == 'Magazinfolder':
if hasattr(self.context, 'defaultimage'):
if self.context.defaultimage:
self.og_image = '%s/@@images/defaultimage' %self.context.absolute_url()
if not self.og_image:
parentobjects = self.getAcquisitionChain(self.context)
for i in parentobjects:
if i.portal_type == 'Magazinfolder':
if i.defaultimage:
self.og_image = '%s/@@images/defaultimage' %i.absolute_url()
return
class NavigationManager(api.ViewletManager):
api.name('navigation')
api.implements(INavigation)
api.context(interface.Interface)
grok.layer(IAnonymousLayer)
class PageTop(api.ViewletManager):
api.implements(IPageTop)
api.context(interface.Interface)
grok.layer(IAnonymousLayer)
def nextprevious(self):
portal = ploneapi.portal.get()
pathroot = self.context.absolute_url_path().split('/')[1]
try: # BBB
nextprev = NPWebMag(portal[pathroot])
return {'next': nextprev.getNextItem(self.context),
'previous': nextprev.getPreviousItem(self.context)}
except:
return {'next': None, 'previous': None}
class AboveContent(api.ViewletManager):
api.implements(IAboveContent)
api.context(interface.Interface)
grok.layer(IAnonymousLayer)
class BelowContent(api.ViewletManager):
api.implements(IBelowContent)
api.context(interface.Interface)
grok.layer(IAnonymousLayer)
class Footer(api.ViewletManager):
api.implements(IFooter)
api.context(interface.Interface)
grok.layer(IAnonymousLayer)
class BSPage(api.Page):
api.implements(IShardedView)
shards = ShardsAsViews()
api.baseclass()
layoutClass = NewsPaperLayout
def _get_layout(self):
return self.layoutClass(self.request, self.context)
def application_url(self):
context = self.context.aq_inner
portal_state = getMultiAdapter(
(context, self.request), name=u'plone_portal_state')
return portal_state.portal_url()
|
import rospy, os, sys, time
from sensor_msgs.msg import Image
import cv2, copy
from cv_bridge import CvBridge, CvBridgeError
import numpy as np
from detecto import core, utils, visualize
from PIL import Image as Img
import matplotlib.pyplot as plt
class Digit_Perception:
def __init__(self):
rospy.Subscriber('/forward_chest_realsense_d435/color/image_raw', Image, self.camera_callback)
self.camera_feed = None
self.obs = None
self.obj_list = ['banana', 'cube', 'sponge','lion', 'chlorox','tea']
self.model = core.Model.load('/home/alphonsus/research/digit/digit_ws/src/digit_perception/perception_models/detector_v3.pth', self.obj_list)
self.bridge = CvBridge()
self.data_ind = 1
def camera_callback(self, image_data):
img = np.frombuffer(image_data.data, dtype=np.uint8).reshape(image_data.height, image_data.width, -1)
imge = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.obs = Img.fromarray(img) #for predictions
self.camera_feed = imge #for display
self.disp = copy.deepcopy(imge)
def detect_objects(self):
if self.obs is not None:
predictions = self.model.predict(self.obs)
labels, boxes, scores = predictions
boxes = boxes.numpy()
scores = scores.numpy()
num_observed = len(labels)
observed = {}
preds = []
idd = 0
for i in range(num_observed):
dicts = {'name':labels[i],
'id':idd,
'coordinates': boxes[i],
'confidence':scores[i],
'color': (np.random.randint(255),\
np.random.randint(255),\
np.random.randint(255))
}
preds.append(dicts)
observed[labels[i]] = dicts
idd+=1
clusters = {}
for box in preds:
fit = False
mid = ((box['coordinates'][0] + box['coordinates'][2])/2, \
(box['coordinates'][1] + box['coordinates'][3])/2)
for key in clusters:
kcd = key.split('_')
ikcd = [float(i) for i in kcd]
dist = np.sqrt((ikcd[0] - mid[0])**2 + (ikcd[1]-mid[1])**2)
if dist < 15:
clusters[key].append(box)
fit = True
break
if not fit:
clusters[str(mid[0])+'_'+str(mid[1])] = [box]
scene = {}
for key in clusters:
weights = [b['confidence'] for b in clusters[key]]
maxind = np.argmax(weights)
maxbox = clusters[key][maxind]
if maxbox['confidence']>0.1 and maxbox['coordinates'][1]>70:
name = maxbox['name']
if name not in scene:
scene[name] = [(box['name'], box['confidence'], \
[int(b) for b in box['coordinates']]) for box in clusters[key]]
else:
scene[name].append((name, maxbox['confidence'], [int(b) for b in maxbox['coordinates']]))
norm_scene = self.normalize_weights(scene)
self.scene_belief = self.compute_scene_belief(norm_scene)
return norm_scene
def compute_scene_belief(self, scene):
num_objects = len(self.obj_list)
beliefs = []
for item in scene:
hypotheses = []; iih=[]; wih=[]
cd = scene[item][0][2]
gx = int((cd[0]+cd[2])/2); gy = int((cd[1]+cd[3])/2)
for hypothesis in scene[item]:
s = [hypothesis[0], hypothesis[1]]
hypotheses.append(s)
beliefs.append(hypotheses)
return beliefs
def normalize_weights(self, scene):
norm_scene = {}
for item in scene:
names=[]; weights=[];coord=[]; ids=[]
norm_scene[item]=[]
for name,wt,cd in scene[item]:
if name not in names:
names.append(name)
weights.append(wt)
coord.append([int(c) for c in cd])
rest = len(self.obj_list)-len(weights)
if rest != 0:
p = np.abs(1-np.sum(weights))/(len(self.obj_list)-len(weights))
for i in range(rest):
weights.append(p)
summ = np.sum(weights)
norm_wt = weights/summ
for n in self.obj_list:
if n not in names:
names.append(n)
coord.append(coord[-1])
if len(names) > 0:
for name, wt, cd in zip(names, norm_wt, coord):
norm_scene[item].append((name,wt,cd))
else:
norm_scene.pop(item, None)
return norm_scene
def annotate_object_beliefs(self, scene):
camera_view = self.camera_feed
for item in scene:
nm, cf, cd = scene[item][0]
color = (np.random.randint(255),\
np.random.randint(125),\
np.random.randint(100))
camera_view = cv2.rectangle(camera_view, (int(cd[0]),
int(cd[1])), (int(cd[2]), int(cd[3])),color , 1)
cv2.putText(camera_view, nm+':'+str(round(cf,2)), (int(cd[0]),int(cd[1])-10),\
cv2.FONT_HERSHEY_SIMPLEX, 0.5, color,2)
return camera_view
def run_detection(self):
while not rospy.is_shutdown():
if self.camera_feed is not None:
belief = self.detect_objects()
camera_view = self.annotate_object_beliefs(belief)
# cv2.imshow('Test',self.disp)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
yield belief
if __name__ == "__main__":
rospy.init_node('digit_perception')
try:
dp = Digit_Perception()
while not rospy.is_shutdown():
bel = next(dp.run_detection())
print(bel)
print('')
except KeyboardInterrupt:
rospy.loginfo('Shutting down')
rospy.spin()
|
"""
test_generator.py
tests the solution to the generator lab
can be run with py.test or nosetests
"""
import generator as gen
def test_intsum():
g = gen.intsum()
assert next(g) == 0
assert next(g) == 1
assert next(g) == 3
assert next(g) == 6
assert next(g) == 10
assert next(g) == 15
def test_doubler():
g = gen.doubler()
assert next(g) == 1
assert next(g) == 2
assert next(g) == 4
assert next(g) == 8
assert next(g) == 16
assert next(g) == 32
for i in range(10):
j = next(g)
assert j == 2**15
#
def test_fib():
g = gen.fib()
assert [next(g) for i in range(9)] == [1, 1, 2, 3, 5, 8, 13, 21, 34]
def test_square():
g=gen.square()
for val in [1,4,16,49,121,256]:
assert(next(g)==val)
|
#import math
n=int(input("enter num"))
result=n**0.5
#result=math.sqrt(n)
print("square root of", n ,"is : ",result) |
# -*- coding: utf-8 -*-
import glob
import numpy as np
import matplotlib.image as mpimg
def EST_NOISE(images):
"""Implementation of EST_NOISE in Chapter 2 of Trucco and Verri."""
num = images.shape[0]
m_e_bar = sum(images)/num
m_sigma = np.sqrt(sum((images - m_e_bar)**2) / (num - 1))
return m_sigma
def Load_Images(path, imgtype="*.jpg"):
"""Load frame images from folder.
Parameters
----------
path: string
Image path
imgtype: string
Type of images
"""
loadpath = f"{path}{imgtype}"
all_img_path = glob.glob(loadpath)
img_num = len(all_img_path)
all_img = [0] * img_num
for i in np.arange(img_num):
one_img_path = all_img_path[i]
all_img[i] = mpimg.imread(one_img_path)
all_img = np.array(all_img)
return all_img
def Gen_Gaussian_Filter(dim, sigma, size=0):
"""Generate 1D or 2D Gaussian filter.
Parameters
----------
dim: int
Dimension of filter
sigma: float
Standard deviation
n: int
Size
"""
n = max(2 * np.ceil(2 * sigma) + 1, size)
ovrlay = int(n / 2)
inds = np.arange(-ovrlay, ovrlay + 1)
gaussian_1d = np.exp(-inds**2/(2 * sigma**2))
mask = gaussian_1d /sum(gaussian_1d).reshape((-1, 1))
if dim == 2:
mask = gaussian_1d * gaussian_1d.T
return mask
def Gen_Box_Filter(n):
"""Generate 1D or 2D Gaussian filter.
Parameters
----------
n: int
Size
"""
size = int(n)
box_mask = np.ones((size, size)) / (size ** 2)
return box_mask
|
st11, st22 = map(str, input().split())
if len(st11) < len(st22):
s = len(st11)
zero_pop = len(st22)
else:
s = len(st22)
zero_pop = len(st11)
lst1 = list('0') * zero_pop
for i1 in range(s):
if st22 in st11 or st11 in st22:
lst1[i1] = '1'
if st11[i1] == st22[i1]:
lst1[i1] = '1'
print(lst1.count('0'))
# print(lst)
|
# Lint as: python3
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for the parser that implements token peeking/popping."""
from typing import Text, Sequence, Union, Optional
from xls.dslx.parse_error import ParseError
from xls.dslx.python.cpp_pos import Pos
from xls.dslx.python.cpp_scanner import Keyword
from xls.dslx.python.cpp_scanner import Scanner
from xls.dslx.python.cpp_scanner import Token
from xls.dslx.python.cpp_scanner import TokenKind
class TokenParser(object):
"""Single-token lookahead parser (base class) that wraps a scanner."""
def __init__(self, scanner: Scanner):
self._scanner = scanner
self._lookahead = None
def _get_pos(self) -> Pos:
if self._lookahead:
return self._lookahead.span.start
return self._scanner.pos
def _at_eof(self) -> bool:
return self._scanner.at_eof()
def _peekt(self) -> Token:
"""Returns token that has been peeked at (non-destructively) from stream."""
if self._lookahead is None:
self._lookahead = self._scanner.pop()
assert self._lookahead is not None
return self._lookahead
def _popt(self) -> Token:
"""Returns a token that has been popped destructively from token stream."""
tok = self._peekt()
self._lookahead = None
return tok
def _dropt(self) -> None:
"""Wraps _popt() to signify popping a token without needing the value."""
self._popt()
def _peekt_is(self, target: TokenKind) -> bool:
assert not isinstance(target, Keyword), \
'Not a token kind: {!r}'.format(target)
return self._peekt().kind == target
def _peekt_is_keyword(self, target: Keyword) -> bool:
return self._peekt().is_keyword(target)
def _peekt_is_identifier(self, target: Text) -> bool:
return self._peekt().is_identifier(target)
def _peekt_in(self, targets: Sequence[Union[TokenKind, Keyword]]) -> bool:
tok = self._peekt()
for target in targets:
if isinstance(target, TokenKind) and tok.kind == target:
return True
if isinstance(target, Keyword) and tok.is_keyword(target):
return True
return False
def _try_popt(self, target: TokenKind) -> bool:
tok = self._peekt()
if tok.kind == target:
self._dropt()
return True
return False
def _try_pop_keyword(self, target: Keyword) -> bool:
tok = self._peekt()
if tok.is_keyword(target):
self._dropt()
return True
return False
def _try_pop_identifier_token(self, target: Text) -> Optional[Token]:
tok = self._peekt()
if tok.is_identifier(target):
return self._popt()
return None
def _try_pop_identifier(self, target: Text) -> bool:
return bool(self._try_pop_identifier_token(target))
def _popt_or_error(self,
target: TokenKind,
start: Optional[Token] = None,
context: Optional[Text] = None) -> Token:
"""Pops a token of the target kind 'target' or raises a ParseError."""
tok = self._peekt()
if tok.kind == target:
return self._popt()
if start is None:
msg = "Expected '{}', got '{}'".format(target.value, tok.to_error_str())
else:
msg = ("Expected '{}' for construct starting with '{}' @ {}, "
"got '{}'").format(target.value, start.to_error_str(), start.span,
tok.to_error_str())
if context:
msg += ': ' + context
raise ParseError(tok.span, msg)
def _dropt_or_error(self,
target: TokenKind,
start: Optional[Token] = None,
context: Optional[Text] = None) -> None:
"""Just a wrapper around _popt_or_error that doesn't return the token.
This helps to signify that the intent was to drop the token in caller code
vs 'forgetting' to do something with a popped token.
Args:
target: The token kind that we want to pop from the token stream; if this
is not the kind we observe from the pop, we raise an error.
start: An optional 'start of the parsing construct' token for use in error
messages; e.g. pointing out the start of an open paren when we're trying
to pop a corresponding closing paren.
context: Context string to be used in reporting an error.
Raises:
ParseError: When the popped token is not of kind 'target'.
"""
self._popt_or_error(target, start=start, context=context)
def _pop_keyword_or_error(self,
keyword: Keyword,
context: Optional[Text] = None) -> Token:
"""Pops a token of target keyword and returns it or raises a ParseError."""
tok = self._popt()
if tok.is_keyword(keyword):
return tok
msg = 'Expected keyword \'{}\', got {}'.format(keyword.value.lower(),
tok.to_error_str())
if context:
msg += ': ' + context
raise ParseError(tok.span, msg)
def _drop_keyword_or_error(self, keyword: Keyword) -> None:
self._pop_keyword_or_error(keyword)
|
from Bio import SeqIO
from Bio import pairwise2
from Bio.pairwise2 import format_alignment
file_name = 'MATN3_DES136-7_2-1RD072018-02-14-16-04-15_copy.ab1'
record = SeqIO.read(file_name, 'abi')
sequence = record.annotations['abif_raw']['PBAS1']
def abi_trim(seq_record):
start = False # flag for starting position of trimmed sequence
segment = 20 # minimum sequence length
trim_start = 0 # init start index
cutoff = 0.05 # default cutoff value for calculating base score
if len(seq_record) <= segment:
return seq_record
else:
score_list = [cutoff - (10 ** (qual / -10.0)) for qual in
seq_record.letter_annotations['phred_quality']]
cummul_score = [0]
for i in range(1, len(score_list)):
score = cummul_score[-1] + score_list[i]
if score < 0:
cummul_score.append(0)
else:
cummul_score.append(score)
if not start:
trim_start = i
start = True
trim_finish = cummul_score.index(max(cummul_score))
return [trim_start,trim_finish]
trimmed_position = abi_trim(record)
trimmed_sequence = sequence[trimmed_position[0]:trimmed_position[1]]
alignments = pairwise2.align.globalxx(trimmed_sequence,sequence)
print(format_alignment(*alignments[0]))
|
import os
import zipfile
for file in os.listdir():
print(file)
if(file.endswith(".zip")):
with zipfile.ZipFile(file, 'r') as zip_ref:
zip_ref.extractall('.')
os.remove(file)
|
Title: Creative Crossroads
Overview: Website dedicated to a full cross section of artistic talents.
Capstone Goals:
-Images
-Writings
-Voting system
-User Accounts
-Comments
-Some sort of API integration
Extended Goals:
-Audio works
-Videography (embedded, not hosted)
-Combination entry
-Tiered administrator Accounts
-Private access portfolios(passworded/sharable)
-Tutorials/Tips and Tricks
-Discussion forums
-Selectable site themes
-Scalable flex site for mobile use
-Keywords and search functionality
|
from django.test import TestCase
from django.test import Client
from django.contrib.auth.models import User, Group
from django.core.urlresolvers import reverse
import datetime
from ..models.projects import Project, ProjectSeries
from ..models.authors import Author
from ..models.artifacts import Artifact
from ..forms.forms import ModalAddUserForm, ModalAddGroupForm
class ProjectSeriesTest(TestCase):
"""Testing the project series functionality"""
def setUp(self):
# Every test needs a client.
self.client = Client()
# path for the queries to the project details
self.path = "project_series_all"
self.first_user = User.objects.create_user(
username="test_series_user", password="test_series_user", email="b@b.com"
)
self.author1 = Author.objects.create(
lastname="1",
firstname="1f",
gravatar_email="",
email="1@1.fr",
home_page_url="",
)
self.project = Project.objects.create(name="test_project")
self.project.authors = [self.author1]
self.project.administrators = [self.first_user]
def test_project_series_empty(self):
"""Tests if the project series is ok"""
# test non existing project
response = self.client.get(reverse(self.path, args=[self.project.id + 1]))
self.assertEqual(response.status_code, 404)
# test existing project
response = self.client.get(reverse(self.path, args=[self.project.id]))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context["series"]), 0)
def test_project_series_create_new_no_public(self):
"""Test the creation of a new project series, with non public visibility"""
new_series = ProjectSeries.objects.create(
series="1234", project=self.project, release_date=datetime.datetime.now()
)
response = self.client.get(reverse(self.path, args=[self.project.id]))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context["series"]), 0)
self.assertEqual(
len(response.context["series"]), len(response.context["last_update"])
)
def test_project_series_create_new_public(self):
"""Test the creation of a new project series, with public visibility"""
new_series = ProjectSeries.objects.create(
series="1234",
project=self.project,
release_date=datetime.datetime.now(),
is_public=True,
)
response = self.client.get(reverse(self.path, args=[self.project.id]))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context["series"]), 1)
self.assertEqual(
len(response.context["series"]), len(response.context["last_update"])
)
def test_project_series_addview(self):
"""Test the view access of an existing project"""
response = self.client.login(
username="test_series_user", password="test_series_user"
)
self.assertTrue(response)
response = self.client.get(
reverse("project_series_add", args=[self.project.id])
)
self.assertEqual(response.status_code, 200)
def test_project_series_addview_non_existing(self):
"""Test the view access of an non existing project"""
response = self.client.login(
username="test_series_user", password="test_series_user"
)
self.assertTrue(response)
response = self.client.get(
reverse("project_series_add", args=[self.project.id + 1])
)
# returns unauthorized to avoid the distinction between non existing project
# spoofing and the authorization.
self.assertEqual(response.status_code, 401)
def test_project_series_addview_restrictions(self):
"""Tests if the admins have the right to modify the project configuration,
and the others don't"""
# user2 is not admin of this project
user2 = User.objects.create_user(
username="user2", password="user2", email="c@c.com"
)
response = self.client.login(username="user2", password="user2")
self.assertTrue(response)
response = self.client.get(
reverse("project_series_add", args=[self.project.id])
)
self.assertEqual(response.status_code, 401)
def test_series_view_no_restricted_series(self):
"""Test permission on series that has no restriction"""
new_series = ProjectSeries.objects.create(
series="1234",
project=self.project,
release_date=datetime.datetime.now(),
is_public=True,
)
current_permission = "code_doc.series_view"
self.assertTrue(self.first_user.has_perm(current_permission, new_series))
self.assertFalse(self.first_user.has_perm(current_permission))
self.assertIn(
current_permission, self.first_user.get_all_permissions(new_series)
)
self.assertNotIn(current_permission, self.first_user.get_all_permissions())
user2 = User.objects.create_user(
username="user2", password="user2", email="c@c.com"
)
self.assertTrue(user2.has_perm(current_permission, new_series))
self.assertIn(current_permission, user2.get_all_permissions(new_series))
self.assertNotIn(current_permission, user2.get_all_permissions())
def test_series_view_with_restriction_on_series(self):
new_series = ProjectSeries.objects.create(
series="1234", project=self.project, release_date=datetime.datetime.now()
)
new_series.view_users.add(self.first_user)
current_permission = "code_doc.series_view"
self.assertTrue(self.first_user.has_perm(current_permission, new_series))
self.assertFalse(self.first_user.has_perm(current_permission))
self.assertIn(
current_permission, self.first_user.get_all_permissions(new_series)
)
self.assertNotIn(current_permission, self.first_user.get_all_permissions())
user2 = User.objects.create_user(
username="user2", password="user2", email="c@c.com"
)
self.assertFalse(user2.has_perm(current_permission, new_series))
self.assertNotIn(current_permission, user2.get_all_permissions(new_series))
self.assertNotIn(current_permission, user2.get_all_permissions())
def test_series_view_with_restriction_on_series_through_groups(self):
newgroup = Group.objects.create(name="group1")
user2 = User.objects.create_user(
username="user2", password="user2", email="c@c.com"
)
user2.groups.add(newgroup)
new_series = ProjectSeries.objects.create(
series="1234", project=self.project, release_date=datetime.datetime.now()
)
new_series.view_users.add(self.first_user)
current_permission = "code_doc.series_view"
self.assertTrue(self.first_user.has_perm(current_permission, new_series))
self.assertFalse(self.first_user.has_perm(current_permission))
self.assertIn(
current_permission, self.first_user.get_all_permissions(new_series)
)
self.assertNotIn(current_permission, self.first_user.get_all_permissions())
# user2 not yet in the group
self.assertFalse(user2.has_perm(current_permission, new_series))
self.assertNotIn(current_permission, user2.get_all_permissions(new_series))
self.assertNotIn(current_permission, user2.get_all_permissions())
# user2 now in the group
new_series.view_groups.add(newgroup)
self.assertTrue(user2.has_perm(current_permission, new_series))
self.assertIn(current_permission, user2.get_all_permissions(new_series))
self.assertNotIn(current_permission, user2.get_all_permissions())
def test_series_views_with_artifact_without_revision(self):
"""Test the view in case an artifact has no revision."""
new_series = ProjectSeries.objects.create(
series="1234",
project=self.project,
release_date=datetime.datetime.now(),
is_public=True,
)
art = Artifact.objects.create(
project=self.project, md5hash="1", artifactfile="mais_oui!"
)
art.project_series = [new_series]
# Test the series details page (one artifact with no revision)
response = self.client.get(
reverse("project_series", args=[self.project.id, new_series.id])
)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context["artifacts"]), 1)
self.assertEqual(len(response.context["revisions"]), 1)
self.assertIsNone(response.context["revisions"][0])
def test_modal_add_user_view_access(self):
"""Test the access to the ModalAddUserView."""
new_series = ProjectSeries.objects.create(
series="1234", project=self.project, release_date=datetime.datetime.now()
)
url = reverse("project_series_add_user", args=[self.project.id, new_series.id])
# Anonymous user (no permission)
response = self.client.get(url)
self.assertEqual(response.status_code, 401)
# Logged-in user but without permission
_ = User.objects.create_user(
username="user2", password="user2", email="c@c.com"
)
response = self.client.login(username="user2", password="user2")
self.assertTrue(response)
response = self.client.get(url)
self.assertEqual(response.status_code, 401)
# Superuser, access granted
response = self.client.login(
username="test_series_user", password="test_series_user"
)
self.assertTrue(response)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_modal_add_group_view_access(self):
"""Test the access to the ModalAddGroupView."""
new_series = ProjectSeries.objects.create(
series="1234", project=self.project, release_date=datetime.datetime.now()
)
url = reverse("project_series_add_group", args=[self.project.id, new_series.id])
# Anonymous user (no permission)
response = self.client.get(url)
self.assertEqual(response.status_code, 401)
# Logged-in user but without permission
_ = User.objects.create_user(
username="user2", password="user2", email="c@c.com"
)
response = self.client.login(username="user2", password="user2")
self.assertTrue(response)
response = self.client.get(url)
self.assertEqual(response.status_code, 401)
# Superuser, access granted
response = self.client.login(
username="test_series_user", password="test_series_user"
)
self.assertTrue(response)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_modal_add_user_via_form(self):
"""Test posting forms to add user to view permissions."""
new_series = ProjectSeries.objects.create(
series="1234", project=self.project, release_date=datetime.datetime.now()
)
# Log in as superuser and post forms
response = self.client.login(
username="test_series_user", password="test_series_user"
)
self.assertTrue(response)
path = reverse("project_series_add_user", args=[self.project.id, new_series.id])
# Create user
dirk = User.objects.create_user(
username="dirk", password="41", email="dirk@dirk.com"
)
# Dirk should not have view permissions
self.assertNotIn(dirk, new_series.view_users.all())
# Forms
form1 = ModalAddUserForm(self.project, new_series, data={})
form2 = ModalAddUserForm(
self.project, new_series, data={"username": "fake_user"}
)
form3 = ModalAddUserForm(self.project, new_series, data={"username": "dirk"})
# Form1 is empty
self.assertFalse(form1.is_valid())
response1 = self.client.post(path, form1.data)
self.assertEqual(response1.status_code, 200)
self.assertTemplateUsed(response1, "code_doc/series/modal_add_user_form.html")
self.assertFormError(response1, "form", "username", "This field is required.")
# Form2 is invalid: error
self.assertFalse(form2.is_valid())
response2 = self.client.post(path, form2.data)
self.assertEqual(response2.status_code, 200)
self.assertTemplateUsed(response2, "code_doc/series/modal_add_user_form.html")
self.assertFormError(
response2, "form", "username", "Username fake_user is not registered"
)
# Form 3 is valid: redirect to edit page via success page.
self.assertTrue(form3.is_valid())
response3 = self.client.post(path, form3.data)
self.assertEqual(response3.status_code, 200)
self.assertTemplateUsed(
response3, "code_doc/series/modal_add_user_or_group_form_success.html"
)
# Dirk should now have view permissions
self.assertIn(dirk, new_series.view_users.all())
def test_modal_add_group_via_form(self):
"""Test posting forms to add group to view permissions."""
new_series = ProjectSeries.objects.create(
series="1234", project=self.project, release_date=datetime.datetime.now()
)
# Log in as superuser and post forms
response = self.client.login(
username="test_series_user", password="test_series_user"
)
self.assertTrue(response)
path = reverse(
"project_series_add_group", args=[self.project.id, new_series.id]
)
# Create group
test_group = Group.objects.create(name="test_group")
# Test group should not have view permissions
self.assertNotIn(test_group, new_series.view_groups.all())
# Forms
form1 = ModalAddGroupForm(self.project, new_series, data={})
form2 = ModalAddGroupForm(
self.project, new_series, data={"groupname": "fake_group"}
)
form3 = ModalAddGroupForm(
self.project, new_series, data={"groupname": "test_group"}
)
# Form1 is empty: error
self.assertFalse(form1.is_valid())
response1 = self.client.post(path, form1.data)
self.assertEqual(response1.status_code, 200)
self.assertTemplateUsed(response1, "code_doc/series/modal_add_group_form.html")
self.assertFormError(response1, "form", "groupname", "This field is required.")
# Form2 is invalid: error
self.assertFalse(form2.is_valid())
response2 = self.client.post(path, form2.data)
self.assertEqual(response2.status_code, 200)
self.assertTemplateUsed(response2, "code_doc/series/modal_add_group_form.html")
self.assertFormError(
response2, "form", "groupname", "Group fake_group is not registered"
)
# Form 3 is valid: redirect to edit page via success page.
self.assertTrue(form3.is_valid())
response3 = self.client.post(path, form3.data)
self.assertEqual(response3.status_code, 200)
self.assertTemplateUsed(
response3, "code_doc/series/modal_add_user_or_group_form_success.html"
)
# Test group should now have view permissions
self.assertIn(test_group, new_series.view_groups.all())
def test_project_series_user_permissions_rendering(self):
"""Test the rendering of the user permissions."""
from .tests import generate_random_string
# Number of users to create
num_xtra_users = 20
for i in range(num_xtra_users):
User.objects.create_user(
username=generate_random_string(),
password="password_" + str(i),
email="user_%s@mail.com" % i,
)
response = self.client.login(
username="test_series_user", password="test_series_user"
)
self.assertTrue(response)
# Case 1: creating a series
# We should see only the current user
response = self.client.get(
reverse("project_series_add", args=[self.project.id])
)
self.assertEqual(response.status_code, 200)
all_users = User.objects.all()
perms = response.context["user_permissions"]
self.assertEqual(len(perms), 1)
for user, checks in perms:
self.assertEqual(user.username, self.first_user.username)
for check in checks:
self.assertTrue(check.data["selected"])
self.assertTrue(check.data["attrs"]["disabled"])
# Case 2: editing a series
# We should see all the users that have at least one permission
new_series = ProjectSeries.objects.create(
series="1234", project=self.project, release_date=datetime.datetime.now()
)
for i, user in enumerate(all_users):
if i % 2 == 0:
new_series.view_users.add(user)
if i % 4 == 0:
new_series.perms_users_artifacts_add.add(user)
else:
new_series.perms_users_artifacts_del.add(user)
else:
if i % 4 == 1:
new_series.perms_users_artifacts_add.add(user)
new_series.perms_users_artifacts_del.add(user)
response = self.client.get(
reverse("project_series_edit", args=[self.project.id, new_series.id])
)
self.assertEqual(response.status_code, 200)
perms = response.context["user_permissions"]
rendered_users = zip(*perms)[0]
# Database
view_users = new_series.view_users.all()
perm_art_add_users = new_series.perms_users_artifacts_add.all()
perm_art_del_users = new_series.perms_users_artifacts_del.all()
union_query = view_users.union(perm_art_add_users, perm_art_del_users)
for user in all_users:
if user in union_query:
self.assertIn(user, rendered_users)
else:
self.assertNotIn(user, rendered_users)
for user, checks in perms:
for check in checks:
# May be there is a better way to do this...
name = check.data["name"]
status = check.data["selected"]
if name == "view_users":
self.assertEqual(status, user in view_users)
elif name == "perms_users_artifacts_add":
self.assertEqual(status, user in perm_art_add_users)
elif name == "perms_users_artifacts_del":
self.assertEqual(status, user in perm_art_del_users)
else:
self.fail("Unknown permission name %s" % name)
def test_project_series_handle_user_permissions(self):
"""Test creating and modifying the user permissions."""
# Log in as admin
response = self.client.login(
username="test_series_user", password="test_series_user"
)
self.assertTrue(response)
# Create series
url = reverse("project_series_add", args=[self.project.id])
response_get = self.client.get(url)
self.assertEqual(response_get.status_code, 200)
# hidden field check
self.assertEqual(
response_get.context["form"]["project"].value(), self.project.id
)
release_date = [unicode(datetime.datetime.now().strftime("%Y-%m-%d"))]
data = {
"csrf_token": response_get.context["csrf_token"],
"series": "New series",
"project": response_get.context["project"].id,
"release_date": release_date,
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 302)
new_series = ProjectSeries.objects.all()[0]
series_url = new_series.get_absolute_url()
self.assertRedirects(response, series_url)
# First user has all rights because he created the series
self.assertIn(self.first_user, new_series.view_users.all())
self.assertIn(self.first_user, new_series.perms_users_artifacts_add.all())
self.assertIn(self.first_user, new_series.perms_users_artifacts_del.all())
# From now on, we will modify the permissions
url = reverse("project_series_edit", args=[self.project.id, new_series.id])
url_redirect = series_url
response_get = self.client.get(url)
self.assertEqual(response_get.status_code, 200)
# checking back that the user appears when updating the form
form = response_get.context["form"]
self.assertEqual([self.first_user.id], form["view_users"].value())
self.assertEqual(
[self.first_user.id], form["perms_users_artifacts_add"].value()
)
self.assertEqual(
[self.first_user.id], form["perms_users_artifacts_del"].value()
)
self.assertEqual(
response_get.context["form"]["project"].value(), self.project.id
)
# Removing view permissions for all users
data = {
"csrf_token": response_get.context["csrf_token"],
"series": response_get.context["series"].id,
"project": response_get.context["project"].id,
"release_date": release_date,
"view_users": [],
"perms_users_artifacts_add": [self.first_user.id],
"perms_users_artifacts_del": [self.first_user.id],
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, url_redirect)
self.assertNotIn(self.first_user, new_series.view_users.all())
self.assertIn(self.first_user, new_series.perms_users_artifacts_add.all())
self.assertIn(self.first_user, new_series.perms_users_artifacts_del.all())
response_get = self.client.get(url)
self.assertEqual(response_get.status_code, 200)
# checking the coherence of the update view
form = response_get.context["form"]
self.assertEqual([], form["view_users"].value())
self.assertEqual(
[self.first_user.id], form["perms_users_artifacts_add"].value()
)
self.assertEqual(
[self.first_user.id], form["perms_users_artifacts_del"].value()
)
self.assertEqual(
response_get.context["form"]["project"].value(), self.project.id
)
# Remove perms_users_artifacts_add
data = {
"csrf_token": response_get.context["csrf_token"],
"series": response_get.context["series"].id,
"project": response_get.context["project"].id,
"release_date": release_date,
"view_users": [],
"perms_users_artifacts_add": [],
"perms_users_artifacts_del": [self.first_user.id],
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, url_redirect)
self.assertNotIn(self.first_user, new_series.view_users.all())
self.assertNotIn(self.first_user, new_series.perms_users_artifacts_add.all())
self.assertIn(self.first_user, new_series.perms_users_artifacts_del.all())
self.assertEqual(
response_get.context["form"]["project"].value(), self.project.id
)
# checking the content of the returned form for editing (again)
response_get = self.client.get(url)
self.assertEqual(response_get.status_code, 200)
# checking the coherence of the update view
form = response_get.context["form"]
self.assertEqual([], form["view_users"].value())
self.assertEqual([], form["perms_users_artifacts_add"].value())
self.assertEqual(
[self.first_user.id], form["perms_users_artifacts_del"].value()
)
# Remove perms_users_artifacts_del
data = {
"csrf_token": response_get.context["csrf_token"],
"series": response_get.context["series"].id,
"project": response_get.context["project"].id,
"release_date": release_date,
"view_users": [],
"perms_users_artifacts_add": [],
"perms_users_artifacts_del": [],
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, url_redirect)
self.assertNotIn(self.first_user, new_series.view_users.all())
self.assertNotIn(self.first_user, new_series.perms_users_artifacts_add.all())
self.assertNotIn(self.first_user, new_series.perms_users_artifacts_del.all())
self.assertEqual(
response_get.context["form"]["project"].value(), self.project.id
)
response_get = self.client.get(url)
self.assertEqual(response_get.status_code, 200)
# checking the coherence of the update view
form = response_get.context["form"]
self.assertEqual([], form["view_users"].value())
self.assertEqual([], form["perms_users_artifacts_add"].value())
self.assertEqual([], form["perms_users_artifacts_del"].value())
# Let's try to give first_user back all his permissions
# It won't work because the user is not among the available choices anymore
# (one needs to add him through the modal)
data = {
"csrf_token": response_get.context["csrf_token"],
"series": response_get.context["series"].id,
"project": response_get.context["project"].id,
"release_date": release_date,
"view_users": [self.first_user.id],
"perms_users_artifacts_add": [self.first_user.id],
"perms_users_artifacts_del": [self.first_user.id],
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200)
# Check form errors
for m2m_field in (
"view_users",
"perms_users_artifacts_add",
"perms_users_artifacts_del",
):
self.assertFormError(
response,
"form",
m2m_field,
"Select a valid choice. %s is not one of the available choices."
% self.first_user.id,
)
self.assertNotIn(self.first_user, new_series.view_users.all())
self.assertNotIn(self.first_user, new_series.perms_users_artifacts_add.all())
self.assertNotIn(self.first_user, new_series.perms_users_artifacts_del.all())
# now does the checks with several users
list_users = [] # self.first_user not part of it
for i in range(10):
user = User.objects.create_user(
username="userXXX%d" % i,
password="test_series_user",
email="b%d@b.com" % i,
)
list_users.append(user)
# what we want now is to check that mixing things up with the permissions and several users
# does consistent work on saving
import random
all_users_to_check = random.sample(list_users, 7)
user_permissions = {}
for index, user in enumerate(all_users_to_check):
perm = [
"view_users",
"perms_users_artifacts_add",
"perms_users_artifacts_del",
][
index % 3
] # to make sure we have one of each
getattr(new_series, perm).add(user)
user_permissions[user] = perm
# now rendering the form
# we should see all users
response_get = self.client.get(url)
self.assertEqual(response_get.status_code, 200)
form = response_get.context["form"]
for perm in (
"view_users",
"perms_users_artifacts_add",
"perms_users_artifacts_del",
):
list_users = [
user
for user, permission in user_permissions.items()
if permission == perm
]
self.assertEqual(
[_.id for _ in sorted(list_users, key=lambda x: x.username)],
form[perm].value(),
)
for user in list_users + [self.first_user]:
if user in all_users_to_check:
self.assertContains(response_get, user.username, 1)
else:
if user is self.first_user:
self.assertContains(
response_get, user.username, 1
) # login button only
else:
self.assertContains(response_get, user.username, 0)
# we should see the correct permissions for all of the users
sorted_users = sorted(all_users_to_check, key=lambda x: x.username)
for user in all_users_to_check:
index = sorted_users.index(user)
for perm in [
"view_users",
"perms_users_artifacts_add",
"perms_users_artifacts_del",
]:
if getattr(new_series, perm).filter(id=user.id).count() == 1:
checked = "checked"
else:
checked = ""
self.assertContains(
response_get,
'<input type="checkbox" name="{permission}" value="{userid}" {checked} id="id_{permission}_{index}" />'.format(
permission=perm, index=index, userid=user.id, checked=checked
),
count=1,
html=True,
)
# now we are rotating some permissions and checking that things are correct at save time
users_artifact_add = [
_
for _ in all_users_to_check
if user_permissions[_] == "perms_users_artifacts_add"
]
users_view = [
_ for _ in all_users_to_check if user_permissions[_] == "view_users"
]
user1, user2 = random.choice(users_artifact_add), random.choice(users_view)
form = response_get.context["form"]
data = {
"csrf_token": response_get.context["csrf_token"],
"series": response_get.context["series"].id,
"project": response_get.context["project"].id,
"release_date": release_date,
"view_users": form["view_users"].value() + [user1.id],
"perms_users_artifacts_add": form["perms_users_artifacts_add"].value()
+ [user2.id],
"perms_users_artifacts_del": form["perms_users_artifacts_del"].value(),
}
# cannot handle the ids directly
if 0:
for user in all_users_to_check:
index = sorted_users.index(user)
for perm in [
"view_users",
"perms_users_artifacts_add",
"perms_users_artifacts_del",
]:
data["id_{perm}_{index}".format(perm=perm, index=index)] = True
response = self.client.post(url, data)
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, url_redirect)
response_get = self.client.get(url)
print(response_get)
# all previous users should be here
for user in list_users + [self.first_user]:
if user in all_users_to_check:
self.assertContains(response_get, user.username, 1)
else:
if user is self.first_user:
self.assertContains(
response_get, user.username, 1
) # login button only
else:
self.assertContains(response_get, user.username, 0)
#
self.assertNotIn(self.first_user, new_series.view_users.all())
self.assertNotIn(self.first_user, new_series.perms_users_artifacts_add.all())
self.assertNotIn(self.first_user, new_series.perms_users_artifacts_del.all())
self.assertIn(user1, new_series.view_users.all())
self.assertIn(user2, new_series.view_users.all())
self.assertIn(user1, new_series.perms_users_artifacts_add.all())
self.assertIn(user2, new_series.perms_users_artifacts_add.all())
self.assertNotIn(user1, new_series.perms_users_artifacts_del.all())
self.assertNotIn(user2, new_series.perms_users_artifacts_del.all())
def test_project_series_group_permissions_rendering(self):
"""Test the rendering of the grouop permissions."""
from .tests import generate_random_string
# Number of groups to create
num_xtra_groups = 20
for i in range(num_xtra_groups):
Group.objects.create(name=generate_random_string())
response = self.client.login(
username="test_series_user", password="test_series_user"
)
self.assertTrue(response)
# Case 1: creating a series
# We should not see any group
response = self.client.get(
reverse("project_series_add", args=[self.project.id])
)
self.assertEqual(response.status_code, 200)
perms = response.context["group_permissions"]
self.assertEqual(len(perms), 0)
# Case 2: editing a series
# We should see all the groups that have at least one permission
new_series = ProjectSeries.objects.create(
series="1234", project=self.project, release_date=datetime.datetime.now()
)
all_groups = Group.objects.all()
for i, group in enumerate(all_groups):
if i % 2 == 0:
new_series.view_groups.add(group)
if i % 4 == 0:
new_series.perms_groups_artifacts_add.add(group)
else:
new_series.perms_groups_artifacts_del.add(group)
else:
if i % 4 == 1:
new_series.perms_groups_artifacts_add.add(group)
new_series.perms_groups_artifacts_del.add(group)
response = self.client.get(
reverse("project_series_edit", args=[self.project.id, new_series.id])
)
self.assertEqual(response.status_code, 200)
perms = response.context["group_permissions"]
rendered_groups = zip(*perms)[0]
# Database
view_groups = new_series.view_groups.all()
perm_art_add_groups = new_series.perms_groups_artifacts_add.all()
perm_art_del_groups = new_series.perms_groups_artifacts_del.all()
union_query = view_groups.union(perm_art_add_groups, perm_art_del_groups)
for group in all_groups:
if group in union_query:
self.assertIn(group, rendered_groups)
else:
self.assertNotIn(group, rendered_groups)
for group, checks in perms:
for check in checks:
# May be there is a better way to do this...
name = check.data["name"]
status = check.data["selected"]
if name == "view_groups":
self.assertEqual(status, group in view_groups)
elif name == "perms_groups_artifacts_add":
self.assertEqual(status, group in perm_art_add_groups)
elif name == "perms_groups_artifacts_del":
self.assertEqual(status, group in perm_art_del_groups)
else:
self.fail("Unknown permission name %s" % name)
def test_project_series_handle_group_permissions(self):
"""Test creating and modifying the group permissions."""
# Log in as admin
response = self.client.login(
username="test_series_user", password="test_series_user"
)
self.assertTrue(response)
# Create series
url = reverse("project_series_add", args=[self.project.id])
response_get = self.client.get(url)
self.assertEqual(response_get.status_code, 200)
release_date = [unicode(datetime.datetime.now().strftime("%Y-%m-%d"))]
data = {
"csrf_token": response_get.context["csrf_token"],
"series": "New series",
"project": response_get.context["project"].id,
"release_date": release_date,
}
response = self.client.post(url, data)
new_series = ProjectSeries.objects.all()[0]
self.assertEqual(response.status_code, 302)
self.assertRedirects(
response, reverse("project_series", args=[self.project.id, new_series.id])
)
# Create group and give it all permissions
test_group = Group.objects.create(name="test_group")
new_series.view_groups.add(test_group)
new_series.perms_groups_artifacts_add.add(test_group)
new_series.perms_groups_artifacts_del.add(test_group)
# Group should have all permissions now
self.assertIn(test_group, new_series.view_groups.all())
self.assertIn(test_group, new_series.perms_groups_artifacts_add.all())
self.assertIn(test_group, new_series.perms_groups_artifacts_del.all())
# From now on, we will modify the permissions
url = reverse("project_series_edit", args=[self.project.id, new_series.id])
url_redirect = reverse("project_series", args=[self.project.id, new_series.id])
# Removing view permissions
response_get = self.client.get(url)
self.assertEqual(response_get.status_code, 200)
data = {
"csrf_token": response_get.context["csrf_token"],
"series": "New series",
"project": response_get.context["project"].id,
"release_date": release_date,
"view_groups": [],
"perms_groups_artifacts_add": [test_group.id],
"perms_groups_artifacts_del": [test_group.id],
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, url_redirect)
self.assertNotIn(test_group, new_series.view_groups.all())
self.assertIn(test_group, new_series.perms_groups_artifacts_add.all())
self.assertIn(test_group, new_series.perms_groups_artifacts_del.all())
# Remove perms_groups_artifacts_add
response_get = self.client.get(url)
self.assertEqual(response_get.status_code, 200)
data = {
"csrf_token": response_get.context["csrf_token"],
"series": "New series",
"project": response_get.context["project"].id,
"release_date": release_date,
"view_groups": [],
"perms_groups_artifacts_add": [],
"perms_groups_artifacts_del": [test_group.id],
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, url_redirect)
self.assertNotIn(test_group, new_series.view_groups.all())
self.assertNotIn(test_group, new_series.perms_groups_artifacts_add.all())
self.assertIn(test_group, new_series.perms_groups_artifacts_del.all())
# Remove perms_groups_artifacts_add
response_get = self.client.get(url)
self.assertEqual(response_get.status_code, 200)
data = {
"csrf_token": response_get.context["csrf_token"],
"series": "New series",
"project": response_get.context["project"].id,
"release_date": release_date,
"view_groups": [],
"perms_groups_artifacts_add": [],
"perms_groups_artifacts_del": [],
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, url_redirect)
self.assertNotIn(test_group, new_series.view_groups.all())
self.assertNotIn(test_group, new_series.perms_groups_artifacts_add.all())
self.assertNotIn(test_group, new_series.perms_groups_artifacts_del.all())
# Let's try to give the group back all his permissions
# It won't work because it is not among the available choices anymore (one needs to add it through the modal)
response_get = self.client.get(url)
self.assertEqual(response_get.status_code, 200)
data = {
"csrf_token": response_get.context["csrf_token"],
"series": "New series",
"project": response_get.context["project"].id,
"release_date": release_date,
"view_groups": [test_group.id],
"perms_groups_artifacts_add": [test_group.id],
"perms_groups_artifacts_del": [test_group.id],
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200)
# Check form errors
for m2m_field in (
"view_groups",
"perms_groups_artifacts_add",
"perms_groups_artifacts_del",
):
self.assertFormError(
response,
"form",
m2m_field,
"Select a valid choice. %s is not one of the available choices."
% test_group.id,
)
|
# Ejercicio 6
# Dada una frase introducida por teclado, guarda en un conjunto todas las palabras que contengan la letra
# indicada por el usuario.
frase = input("Frase: ")
letra = input("Letra: ")
frase = frase.lower()
letra = letra.lower()
palabras = frase.split(" ")
baul_letra = set()
for i in palabras:
if letra in i:
baul_letra.add(i)
print(baul_letra)
|
import sys
import traceback
import logging
import logging.config
import importlib
import bson.objectid
import config.global_configuration as global_conf
import database.client
import util.database_helpers as dh
def main(*args):
"""
Import a dataset into the database from a given folder.
:args: First argument is the module containing the do_import function to use,
second argument is the location of the dataset, (either some file or root folder)
Third argument is optionally and experiment to give the imported dataset id to.
:return:
"""
if len(args) >= 2:
loader_module_name = str(args[0])
path = str(args[1])
experiment_id = bson.objectid.ObjectId(args[2]) if len(args) >= 3 else None
config = global_conf.load_global_config('config.yml')
logging.config.dictConfig(config['logging'])
log = logging.getLogger(__name__)
db_client = database.client.DatabaseClient(config=config)
# Try and import the desired loader module
try:
loader_module = importlib.import_module(loader_module_name)
except ImportError:
loader_module = None
if loader_module is None:
log.error("Could not load module {0} for importing dataset, check it exists".format(loader_module_name))
return
if not hasattr(loader_module, 'import_dataset'):
log.error("Module {0} does not have method 'import_dataset'".format(loader_module_name))
return
# It's up to the importer to fail here if the path doesn't exist
if experiment_id is not None:
log.info("Importing dataset from {0} using module {1} for experiment {2}".format(path, loader_module_name,
experiment_id))
else:
log.info("Importing dataset from {0} using module {1}".format(path, loader_module_name))
try:
dataset_id = loader_module.import_dataset(path, db_client)
except Exception:
dataset_id = None
log.error("Exception occurred while importing dataset from {0} with module {1}:\n{2}".format(
path, loader_module_name, traceback.format_exc()
))
if dataset_id is not None:
experiment = dh.load_object(db_client, db_client.experiments_collection, experiment_id)
if experiment is not None:
log.info("Successfully imported dataset {0}, adding to experiment {1}".format(dataset_id,
experiment_id))
experiment.add_image_source(dataset_id, path, db_client)
else:
log.info("Successfully imported dataset {0}".format(dataset_id))
if __name__ == '__main__':
main(*sys.argv[1:])
|
#13.2_chisq
import rpy2.robjects as ro
r = ro.r
table = r("read.table('smoking_and_lung_cancer.txt', header = TRUE, sep = '\t')")
print(r.names(table))
cont_table = r.table(table[1], table[2])
chitest = r['chisq.test']
print(chitest(cont_table)) |
## do not change anything in here!!!!
## go to run program, you should not have to save it to run it!
## put the grades you have recieved already and the grades you hope to get.
# it will print out your grade number and letter grade for the class
print("Welcome to Dr.Dornshuld's Chemistry Calculator")
again = 'y'
while again == 'y':
print('\n')
homework = int(input('What is your averaged homework grade? '))
test1 = int(input('What is your first test grade? '))
test2 = int(input('What is your 2nd test grade? '))
test3 = int(input('What is your 3rd test grade? '))
final = int(input('What is your final exam grade? '))
tests = test1+test2+test3+final
test = tests/4
final_grade = homework *.10 + 100*.05 + test * .8501
final_grade = round(final_grade,0)
print('This grade is based off of PERFECT Attendance')
if final_grade <= 54.0:
letter_grade = 'F'
elif final_grade >= 55.0 and final_grade <= 64.0:
letter_grade = 'D'
elif final_grade >= 65 and final_grade <= 74.0:
letter_grade = 'C'
elif final_grade >= 75.0 and final_grade <= 84.0:
letter_grade = 'B'
elif final_grade >= 85.0:
letter_grade = 'A'
print('Final Grade: ', final_grade,'Letter Grade: ', letter_grade)
again = input('Would you like try again?(y/n): ')
if again != 'y':
quit
|
#!/usr/bin/env python
import sys
import socket
import os
import json
try:
# For Python 3.0 and later
from urllib.request import urlopen
except ImportError:
# Fall back to Python 2's urllib2
from urllib2 import urlopen
config = json.load(open('config.json'))
# probably no necessary --> remote kv store in use; import registry
import leader, store, store_sharding, udr, bill
from random import randint
if(config['store'] == 'shard'):
valueStore = store_sharding
else:
valueStore = store
port = int(os.environ.get('VCAP_APP_PORT', '5050'))
host = os.environ.get('VCAP_APP_HOST', 'localhost')
isLeader = False
leaderhost = ''
if len(sys.argv) > 2:
host = sys.argv[1]
port = int(sys.argv[2])
def registerself(host, port):
global config
try:
doRegister = True
hostname = host + ':' + str(port)
hosts = leader.getNodeList()
if hosts:
for host in hosts:
if host.strip() == hostname:
doRegister = False
if doRegister:
response = urlopen(config['hostsRegistry'] + '/' + hostname)
registerResponse = response.read().decode('UTF-8')
print(socket.gethostname())
print("Node registered as " + hostname)
except Exception as e:
print("Error registering: ", e)
def application(environ, start_response):
global isLeader
global leaderhost
global host
global port
ctype = 'text/plain'
status = '200 OK'
response_body = "It works"
print('Called : ' + environ['PATH_INFO'])
if environ['PATH_INFO'].startswith("/store/"):
params = environ['PATH_INFO'][7:]
params = params.split('=')
response_body = valueStore.storeValues(params[0], params[1], host, port)
if isLeader == False:
valueStore.notifyLeader(leaderhost, params[0], params[1])
else:
valueStore.syncAll(params[0], params[1], host, port)
#return store.application(environ, start_response)
elif environ['PATH_INFO'].startswith("/store"):
response_body = str(valueStore.getValues(host, port))
elif environ['PATH_INFO'].startswith("/sync/"):
params = environ['PATH_INFO'][6:]
params = params.split('=')
valueStore.syncValue(params[0], params[1])
if isLeader == True:
print('I Am leader')
valueStore.syncAll(params[0], params[1], host, port)
elif environ['PATH_INFO'].startswith("/leader"):
if environ['PATH_INFO'].startswith("/leader/vote"):
response_body = str(leader.vote())
print(response_body)
else:
response_body = leader.election(host, port)
leaderhost = response_body
if leaderhost == host.strip() + ':' + str(port):
isLeader = True
else:
isLeader = False
elif environ['PATH_INFO'].startswith("/newleader/"):
leaderhost = environ['PATH_INFO'][11:]
isLeader = False
if leaderhost == host.strip() + ':' + str(port):
isLeader = True
print('Registered Leader Host:' + leaderhost)
elif environ['PATH_INFO'].startswith("/getleader"):
response_body = leaderhost
elif environ['PATH_INFO'].startswith("/getvalue/"):
response_body = valueStore.getValue(environ['PATH_INFO'][10:])
elif environ['PATH_INFO'].startswith("/udr"):
createUDR()
elif environ['PATH_INFO'].startswith("/billing"):
createBill()
else:
response_body = 'It Works'
response_body = response_body.encode('utf-8')
response_headers = [('Content-Type', ctype), ('Content-Length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body]
def createUDR():
udr.create()
def createBill():
bill.create()
if __name__ == '__main__':
from wsgiref.simple_server import make_server
httpd = make_server('', port, application)
registerself(host, port)
httpd.serve_forever()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.