text stringlengths 38 1.54M |
|---|
from ..types import Action, ComponentType, DataElement, DataType, Instance, InstanceElement, Topology
from ..connector import DynizerConnection
from ...common.errors import LoaderError
from typing import Sequence
import xml.etree.ElementTree as ET
import itertools
class XMLAbstractElement:
"""
Abstract XML element
This class represents an abstract XML element, that can be use to build up
Dynizer instance data.
Member Variables
----------------
value : any
The actual value of the xml element
data_type : DataType
The Dynizer data type of the xml element
component : ComponentType
The Dynizer component type of the xml element
label : str
The label for the xml element
Member Functions
----------------
fetch_from_entity
Should be overwritten in concrete elements that fetch the value from the
parsed xml file
apply_ariables
Should be overwritten in concrete elements that retrieve the value from
previously parsed variables out of the xml file
"""
def __init__(self, value,
data_type: DataType,
component: ComponentType,
label = ''):
self.value = value
self.data_type = data_type
self.component = component
self.label = label
def fetch_from_entity(self, entity, components, data, labels, ns):
components.append(self.component)
data.append(InstanceElement(value=self.value, datatype=self.data_type))
labels.append(self.label)
return True
def apply_variables(self, combinations):
pass
class XMLFixedElement(XMLAbstractElement):
"""
Fixed element
This class represents a Fixed element, that can be use to build up
Dynizer instance data. A Fixed element acts like a constant and will always
represent the same value. It can not be altered once provided.
"""
def __init__(self, value,
data_type: DataType,
component: ComponentType,
label = ''):
super().__init__(value, data_type, component, label)
class XMLVariableElement(XMLAbstractElement):
"""
Variable element
This class represents a Variable element. Variable elements are assigned
a value based upon a combination of LoopVariables, which have been fetched
in a pre-parsing loop of the xml file. A full combinatoric off all loop variables
are created and provided to the variable elements. Based upon the loop_index and
variable_index the correct element is assigned to the variable element.
The loop index specifies the loopvariable to access in the combinatorics matrix.
Since each loop variable can have multiple subvariable references, the variable_index
refers to the sub-index within the loopvariable.
See the XmlLoopVariable class for more information
"""
def __init__(self, loop_index: int,
variable_index: int,
data_type: DataType,
component: ComponentType,
label = '',
transform_funcs = []):
super().__init__(None, data_type, component, label)
self.loop_index = loop_index
self.variable_index = variable_index
self.transform_funcs = list(transform_funcs)
def apply_variables(self, combination):
self.value = combination[self.loop_index][self.variable_index]
for tf in self.transform_funcs:
self.value = tf(self.value)
class XMLExtractionElement(XMLAbstractElement):
def __init__(self, path: str,
data_type: DataType,
component: ComponentType,
label = '',
required = True,
default = None,
allow_void = True,
transform_funcs = []):
super().__init__(None, data_type, component, label)
self.path = path
self.required = required
self.default = default
self.allow_void = allow_void
self.transform_funcs = list(transform_funcs)
def fetch_from_entity(self, entity, components, data, labels, ns):
node = entity.findall(self.path, ns)
if len(node) == 0:
if self.required:
if self.default is not None:
components.append(self.component)
data.append(InstanceElement(value=value, datatype=self.data_type))
labels.append(self.label)
elif self.allow_void:
components.append(self.component)
data.append(InstanceElement())
labels.append(self.label)
else:
return False
elif len(node) == 1:
value = node[0].text
for tf in self.transform_funcs:
value = tf(value)
components.append(self.component)
data.append(InstanceElement(value=value, datatype=self.data_type))
labels.append(self.label)
else:
for val in node:
value = val.text
for tf in self.transform_funcs:
value = tf(value)
components.append(self.component)
data.append(InstanceElement(value=value, datatype=self.data_type))
labels.append(self.label)
return True
class XMLStringCombinationElement(XMLAbstractElement):
def __init__(self, paths: Sequence[str],
component: ComponentType,
label = '',
combinator_func = None,
required = True,
sequence_join_char = ','):
super().__init__(None, DataType.STRING, component, label)
self.paths = paths
self.combinator_func = combinator_func
self.required = required
def fetch_from_entity(self, entity, components, data, labels, ns):
tmp_data=[]
for path in self.paths:
node = entity.findall(path, ns)
val = ''
if len(node) == 1:
val = node[0].text
elif len(node) > 1:
arr = []
for n in node:
arr.append(n.text)
val = sequence_join_char.join(arr)
tmp_data.append(val)
"""
if len(tmp_data) == 0:
if not self.required:
return True
else:
data.append(InstanceElement())
else:
if self.combinator_func is not None:
data.append(InstanceElement(value=self.combinator_func(tmp_data), datatype=DataType.STRING))
else:
data.append(InstanceElement(value=self._default_combinator(tmp_data), datatype=DataType.STRING))
"""
value = self.combinator_func(tmp_data) if self.combinator_func is not None else self._default_combinator(tmp_data)
if len(value) == 0:
if self.required:
data.append(InstanceElement())
else:
return False
else:
data.append(InstanceElement(value=value, datatype=DataType.STRING))
components.append(self.component)
labels.append(self.label)
return True
def _default_combinator(self, tmp_data):
result = '';
for elem in tmp_data:
if len(elem) > 0:
if len(result) == 0:
result = '{0}'.format(result, elem)
else:
result = '{0} {1}'.format(result, elem)
return result
class XMLLoopVariable:
def __init__(self, path: str, variable_path: Sequence[str]):
self.path = path
self.variable_path = variable_path
class XMLMapping:
def __init__(self, action: Action,
root_path: str,
variables: Sequence[XMLLoopVariable],
elements: Sequence[XMLAbstractElement],
fallback: Sequence[XMLAbstractElement] = [],
batch_size=100):
self.action = action
self.root_path = root_path
self.variables = variables
self.elements = elements
self.fallback = list(fallback)
self.expanded_variables = []
self.batch_size = batch_size
class XMLLoader:
def __init__(self, root_node: ET.Element,
mappings: Sequence[XMLMapping] = [],
namespaces={}):
self.root_node = root_node
self.mappings = list(mappings)
self.ns = namespaces
@classmethod
def parse(cls, xml_file: str, mappings: Sequence[XMLMapping] = [], namespaces={}):
return cls(ET.parse(xml_file).getroot(), mappings, namespaces)
@classmethod
def fromstring(cls, xml_string: str):
return cls(ET.fromstring(xml_string))
def add_mapping(self, mapping: XMLMapping):
self.elements.append(mapping)
def run(self, connection: DynizerConnection, debug=False):
try:
if connection is not None:
connection.connect()
for mapping in self.mappings:
self.__run_mapping(connection, mapping, debug)
if connection is not None:
connection.close()
except Exception as e:
if connection is not None:
connection.close()
raise e
def __expand_variables(self, root, mapping, variable):
elements = root.findall(variable.path, self.ns)
values = []
for elem in elements:
v_values = []
for v_path in variable.variable_path:
var_elem = elem.findall(v_path, self.ns)
v_values.append(list(map(lambda x: x.text, var_elem)))
values = values + list(itertools.product(*v_values))
mapping.expanded_variables.append(values)
def __run_mapping(self, connection: DynizerConnection,
mapping: XMLMapping,
debug):
print('Creating instances for: {0}'.format( mapping.action.name))
action_obj = None
if connection is not None:
try:
action_obj = connection.create(mapping.action)
except Exception as e:
raise LoaderError(XMLLoader, "Failed to create required action: '{0}'".format(mapping.action))
topology_map = {}
loadlist = []
if len(mapping.variables) == 0:
# No loopvariables are present
self.__run_simple_mapping(connection, mapping, mapping.root_path, action_obj, topology_map, loadlist, debug)
else:
# We have loop variables, resolve them
for variable in mapping.variables:
self.__expand_variables(self.root_node, mapping, variable)
# Make the combinations of the various loop variables and iterate over them
var_combinations = list(itertools.product(*mapping.expanded_variables))
for combination in var_combinations:
# Adjust the root path with the requested loop variables
current_root = mapping.root_path.format(*combination)
for elem in mapping.elements:
# Apply the current variables to the XMLVariableElements
elem.apply_variables(combination)
for elem in mapping.fallback:
elem.apply_variables(combination)
self.__run_simple_mapping(connection, mapping, current_root, action_obj, topology_map, loadlist, debug)
if len(loadlist) > 0:
self.__push_batch(connection, loadlist)
def __run_simple_mapping(self, connection: DynizerConnection,
mapping: XMLMapping,
root_path: str,
action_obj, topology_map, loadlist,
debug):
# Fetch the root node
root = None
try:
root = self.root_node.findall(root_path, self.ns)
except Exception as e:
print(root_path)
raise e
if len(root) == 0:
raise LoaderError(XMLLoader, "Invalid xpath specified for root path: '{0}'".format(root_path))
# Loop over all entities in the root node and parse the entities
for entity in root:
status = self.__run_mapping_on_entity(entity, topology_map, loadlist, action_obj, connection, mapping, debug = debug)
if not status:
self.__run_mapping_on_entity(entity, topology_map, loadlist, action_obj, connection, mapping, fallback=True, debug = debug)
if len(loadlist) >= mapping.batch_size:
self.__push_batch(connection, loadlist)
def __run_mapping_on_entity(self, entity, topology_map, loadlist,
action_obj: Action,
connection: DynizerConnection,
mapping: XMLMapping,
fallback = False,
debug = False):
components = []
data = []
labels = []
elements = mapping.fallback if fallback else mapping.elements
if len(elements) == 0:
return False
# Loop over all elements and fetch them fro mthe entity
for element in elements:
if element.fetch_from_entity(entity, components, data, labels, self.ns) == False:
return False
if len(components) < 2:
return False
if debug:
inst = Instance(action_id=0, topology_id=0, data=data)
print(inst.to_json())
if connection is None:
return True
# Build the topology
top_map_key = ','.join(map(str, components))
topology_obj = None
if top_map_key in topology_map:
topology_obj = topology_map[top_map_key]
else:
# Check if we have it in the system
if topology_obj is None:
try:
topology = Topology(components=components, labels=labels)
topology_obj = connection.create(topology)
topology_obj.labels = labels
except Exception as e:
raise LoaderError(XMLLoader, "Failed to create topology: '{0}'".format(top_map_key))
# Also make sure it is linked to the action
try:
connection.link_actiontopology(action_obj, topology_obj)
except Exception as e:
print("Failed to link action and topology.")
topology_map[top_map_key] = topology_obj
# Create the instance and push it onto the load list
inst = Instance(action_id=action_obj.id, topology_id=topology_obj.id, data=data)
loadlist.append(inst)
return True
def __push_batch(self, connection: DynizerConnection,
batch: Sequence[Instance]):
if connection is not None:
print("Writing batch ...")
try:
connection.batch_create(batch)
batch.clear()
except Exception as e:
raise LoaderError(XMLLoader, "Failed to push batch of instances")
|
import codecs
import socket
import traceback
from struct import *
from modules.api import definitions
from modules.data import variables
buffSize = 128
''
class HIL_socket:
def __init__(self, ip, port):
"""
:param family:
:param type:
"""
self.family = socket.AF_INET
self.type = socket.SOCK_STREAM
self.isconn = False
self.Socket = socket.socket(self.family, self.type)
self.Socket.settimeout(0.5)
self.servicesDict = definitions.servicesDict
self.errorsDict = definitions.errorsDict
self.txMsgStructuresDict = definitions.txMsgStructures
self.rxMsgStructureDict = definitions.rxMsgStructures
self.debug_log = True
self.ip = ip
self.port = port
def newSocket(self, timeout=0.5):
self.Socket = socket.socket(self.family, self.type)
self.Socket.settimeout(timeout)
def is_connected(self):
return self.isconn
def reset_connection(self):
self.isconn = False
def connect(self):
"""
:param IP:
:param port:
:return:
"""
server_address = (self.ip, self.port)
# if self.debug_log:
variables.log2(self.__class__.__name__,'starting up on %s port %s' % server_address)
try:
self.Socket.connect(server_address)
self.isconn = True
return 0
except:
if self.debug_log:
variables.log2(self.__class__.__name__,'Error starting connection on %s port %s' % server_address)
variables.print_exception(self.__class__.__name__)
self.isconn = False
return 1
def send(self, message):
"""
:param message:
:return:
"""
if self.debug_log:
# variables.log2(self.__class__.__name__, 'sending to "%s" "%s"' % (self.ip, message))
# variables.log2(self.__class__.__name__, 'sending (parsed) to "%s" "%s"' % (self.ip, "".join("%s " % ("0x%0.2X" % tup) for tup in message)))
pass
try:
self.Socket.sendall(message)
return 0
except:
if self.debug_log:
variables.log2(self.__class__.__name__,'Error sending "%s"' % message)
variables.print_exception(self.__class__.__name__)
return 1
def receive (self, bufsize):
"""
:param bufsize:
:return:
"""
data = None
try:
data = self.Socket.recv(bufsize)
if self.debug_log:
# variables.log2(self.__class__.__name__, 'received from "%s" "%s"' % (self.ip, data))
# variables.log2(self.__class__.__name__, 'received from "%s" "%s"' % (self.ip, "".join("%s " % ("0x%0.2X" % tup) for tup in data)))
pass
except:
if self.debug_log:
variables.print_exception(self.__class__.__name__)
return data
def close(self):
"""
:return:
"""
if self.debug_log:
variables.log2(self.__class__.__name__, "closing socket")
try:
self.Socket.shutdown(socket.SHUT_RDWR)
self.Socket.close()
return 0
except:
if self.debug_log:
variables.print_exception(self.__class__.__name__)
return 1
def request(self, tx_message):
# if self.debug_log:
# variables.log2(self.__class__.__name__, "get data")
self.send((tx_message+"\n").encode())
rx_message = self.receive(buffSize)
try:
rx_message = rx_message.decode()
rx_data = [int(rm) for rm in rx_message.split(",")]
# get error code and data
return (0, rx_data)
except:
if self.debug_log:
variables.print_exception(self.__class__.__name__)
return (-1, None)
def request_hex(self, service_def):
if self.debug_log:
variables.log2(self.__class__.__name__, "get data")
format = "!BB"
tx_length = calcsize(format) - 1
try:
tx_message = pack(format, tx_length, self.servicesDict[service_def])
# tx_message = pack(self.txMsgStructuresDict['DATA'], tx_length, self.servicesDict['SERVICE_DATA'])
except:
if self.debug_log:
variables.print_exception(self.__class__.__name__)
return (-1, None)
self.send(tx_message)
rx_message = self.receive(buffSize)
try:
rx_data = [rm for rm in rx_message]
# get error code and data
return (rx_data[2], rx_data)
except:
if self.debug_log:
variables.print_exception(self.__class__.__name__)
return (-1, None)
|
import pymysql
connector = pymysql.connect(
host='localhost',
db='sdb',
user='root',
passwd='root',
charset='utf8',
)
cursor = connector.cursor()
sql = "insert into test_table values('1','python')"
cursor.execute(sql)
sql = "insert into test_table values('2','パイソン')"
cursor.execute(sql)
sql = "insert into test_table values('3','ぱいそん')"
cursor.execute(sql)
connector.commit()
cursor.close()
connector.close() |
#coding: utf-8
import numpy as np
'''
计算信息增益
powerd by ayonel
'''
class InformationGain:
def __init__(self, X, y):
self.X = X
self.y = y
self.totalSampleCount = X.shape[0] # 样本总数
self.totalSystemEntropy = 0 # 系统总熵
self.totalClassCountDict = {} # 存储每个类别的样本数量是多少
self.nonzeroPosition = X.T.nonzero() # 将X转置之后输出非零值的位置
self.igResult = [] # 保存结果的list
self.wordExistSampleCount = 0
self.wordExistClassCountDict = {}
self.iter()
# 将结果列表排序输出
def get_result(self):
return self.igResult
# 计算系统总熵
def cal_total_system_entropy(self):
# 计算每个类别各有多少个
for label in self.y:
if label not in self.totalClassCountDict:
self.totalClassCountDict[label] = 1
else:
self.totalClassCountDict[label] += 1
for cls in self.totalClassCountDict:
probs = self.totalClassCountDict[cls] / float(self.totalSampleCount)
self.totalSystemEntropy -= probs * np.log(probs)
# 遍历nonzeroPosition时,逐步计算出每个word的信息增益
def iter(self):
self.cal_total_system_entropy()
pre = 0
for i in range(len(self.nonzeroPosition[0])):
if i != 0 and self.nonzeroPosition[0][i] != pre:
for notappear in range(pre+1, self.nonzeroPosition[0][i]): # 如果一个词在整个样本集中都未出现,则直接赋为0
self.igResult.append(0.0)
ig = self.cal_information_gain()
self.igResult.append(ig)
self.wordExistSampleCount = 0
self.wordExistClassCountDict = {}
pre = self.nonzeroPosition[0][i]
self.wordExistSampleCount += 1
yclass = self.y[self.nonzeroPosition[1][i]] # 求得当前样本的标签
if yclass not in self.wordExistClassCountDict:
self.wordExistClassCountDict[yclass] = 1
else:
self.wordExistClassCountDict[yclass] += 1
# 计算最后一个单词的ig
ig = self.cal_information_gain()
self.igResult.append(ig)
# 计算ig的主要函数
def cal_information_gain(self):
x_exist_entropy = 0
x_nonexist_entropy = 0
for cls in self.wordExistClassCountDict:
probs = self.wordExistClassCountDict[cls] / float(self.wordExistSampleCount)
x_exist_entropy -= probs * np.log(probs)
probs = (self.totalClassCountDict[cls] - self.wordExistClassCountDict[cls]) / float(self.totalSampleCount - self.wordExistSampleCount)
if probs == 0: #该单词在每条样本中都出现了,虽然该几率很小
x_nonexist_entropy = 0
else:
x_nonexist_entropy -= probs*np.log(probs)
for cls in self.totalClassCountDict:
if cls not in self.wordExistClassCountDict:
probs = self.totalClassCountDict[cls] / float(self.totalSampleCount - self.wordExistSampleCount)
x_nonexist_entropy -= probs*np.log(probs)
# 合并两项,计算出ig
ig = self.totalSystemEntropy - ((self.wordExistSampleCount/float(self.totalSampleCount))*x_exist_entropy +
((self.totalSampleCount-self.wordExistSampleCount)/float(self.totalSampleCount)*x_nonexist_entropy))
return ig
if __name__ == '__main__':
X = np.array([[1,0,0,1],[0,1,1,1],[0,0,1,0]])
y = [0,0,1]
ig = InformationGain(X, y)
print(ig.get_result()) |
from __future__ import annotations
from decimal import Decimal
from fava.beans import create
from fava.core.inventory import CounterInventory
def test_add() -> None:
inv = CounterInventory()
key = ("KEY", None)
inv.add(key, Decimal("10"))
assert len(inv) == 1
inv.add(key, Decimal("-10"))
assert inv.is_empty()
def test_add_amount() -> None:
inv = CounterInventory()
inv.add_amount(create.amount("10 USD"))
inv.add_amount(create.amount("30 USD"))
assert len(inv) == 1
inv.add_amount(create.amount("-40 USD"))
assert inv.is_empty()
inv.add_amount(create.amount("10 USD"))
inv.add_amount(create.amount("20 CAD"))
inv.add_amount(create.amount("10 USD"))
assert len(inv) == 2
inv.add_amount(create.amount("-20 CAD"))
assert len(inv) == 1
def test_add_inventory() -> None:
inv = CounterInventory()
inv2 = CounterInventory()
inv3 = CounterInventory()
inv.add_amount(create.amount("10 USD"))
inv2.add_amount(create.amount("30 USD"))
inv3.add_amount(create.amount("-40 USD"))
inv.add_inventory(inv2)
assert len(inv) == 1
inv.add_inventory(inv3)
assert inv.is_empty()
inv = CounterInventory()
inv.add_inventory(inv2)
assert len(inv) == 1
|
from flask import Blueprint, jsonify, render_template, request
from . import db, app
from .models import Program
from datetime import datetime
import flask_jwt_extended as jwt
from sqlalchemy.exc import IntegrityError, OperationalError
from .auth import decode_identity, gain_access
main = Blueprint('main', __name__)
@main.route('/')
def index():
return render_template("index.html")
# view, add or remove programs
@main.route('/programs', methods=['GET'])
@gain_access
def view_programs():
#view all users saved programs
try:
token = request.headers.get('Authorization')[7:]
if not token or token == 'null' or token == 'undefined':
token = request.cookies.get('token')
if not token or token == 'null' or token == 'undefined':
return jsonify({"msg": "no token"})
except TypeError:
return jsonify({"msg": "no token"})
if request.method == 'GET':
data = jwt.decode_token(token)
# token = request.args.get('token')
identity = decode_identity(token)
programs_list = db.session.query(Program).filter_by(user_id = identity['id'])
programs = []
for item in programs_list:
programs.append({
'program': item.program,
'username': item.username,
'password': item.password,
'id': item.program_id,
'idx': item.index})
return jsonify({'programs': programs})
else:
return jsonify({"msg": "Invalid authorization"})
@main.route('/remove-program', methods=['POST'])
@jwt.jwt_required
def remove_program():
try:
program_data = request.get_json()
gain_access(program_data)
db.session.query(Program).filter_by(
program=program_data['program'],
username=program_data['username'],
user_id=program_data['id']).delete()
db.session.commit()
except (IntegrityError, OperationalError):
return jsonify({"err": "error while trying to remove program"}), 204
return jsonify({"msg": "removed"}), 201
@main.route('/add-program', methods=['POST'])
@jwt.jwt_required
def add_program():
try:
program_data = request.get_json()
new_program = Program(program=program_data['program'],
username=program_data['username'],
password=program_data['password'],
user_id=program_data['id'],
index=program_data['index'])
db.session.add(new_program)
db.session.commit()
except (IntegrityError, OperationalError):
return jsonify({"err": "error while trying to add program"}), 204
return jsonify({"msg": f"added {program_data['program']}"}), 201
@main.route('/change-credentials', methods=['POST'])
@jwt.jwt_required
def change_credentials():
try:
program_data = request.get_json()
change_program = Program.query.filter_by(
user_id=program_data['user_id'],
index=program_data['index']).first()
if 'program' in program_data:
change_program.program = program_data['program']
if 'username' in program_data:
change_program.username = program_data['username']
if 'password' in program_data:
change_program.password = program_data['password']
db.session.commit()
except (IntegrityError, OperationalError):
return jsonify({"err": "error while trying to change credentials"}), 204
except AttributeError:
return jsonify({"err": "error while adding application data"})
return jsonify({'msg': f"credentials changed for {change_program.program}"}), 201
|
'''
Python 中定义函数有两种方法,一种是用常规方式 def 定义,函数要指定名字,第二种是用 lambda 定义,不需要指定名字,称为 Lambda 函数。
Lambda 函数又称匿名函数,匿名函数就是没有名字的函数,函数没有名字也行?当然可以啦。有些函数如果只是临时一用,而且它的业务逻辑也很简单时,就没必要非给它取个名字不可。
关键字lambda表示匿名函数,冒号前面的x表示函数参数。
匿名函数有个限制,就是只能有一个表达式,不用写return,返回值就是该表达式的结果。
用匿名函数有个好处,因为函数没有名字,不必担心函数名冲突。此外,匿名函数也是一个函数对象,也可以把匿名函数赋值给一个变量,再利用变量来调用该函数
'''
print('----------lambda---------')
print(type(lambda x, y: x + y))
add = lambda x, y: x + y
print(add)
print((lambda x, y: x + y)(1, 2))
print(add(1, 2))
print('----------sorted---------')
# 一个整数列表,要求按照列表中元素的绝对值大小升序排列
list1 = [3, 5, -4, -1, 0, -2, -6]
print(sorted(list1, key=lambda x: abs(x)))
# 过滤
# filter() 函数用于过滤序列,过滤掉不符合条件的元素,返回由符合条件元素组成的新列表。
# 该接收两个参数,第一个为函数,第二个为序列,序列的每个元素作为参数传递给函数进行判断,然后返回 True 或 False,最后将返回 True 的元素放到新列表中。
print('----------filter---------')
print(list(filter(lambda x: x % 3 == 0, list1)))
# map
# map() 会根据提供的函数对指定序列做映射。
# 第一个参数 function 以参数序列中的每一个元素调用 function 函数,返回包含每次 function 函数返回值的新列表。
print('-----------map----------')
print(list(map(lambda x: x * 2 + 1, list1)))
# reduce,py3已从全局名字空间移除
# reduce() 函数会对参数序列中元素进行累积
# 函数将一个数据集合(链表,元组等)中的所有数据进行下列操作:用传给reduce中的函数function(有两个参数)先对集合中的第1,2个元素进行操作,得到的结果再与第三个数据用function函数运算,最后得到一个结果
from functools import reduce
print('----------reduce--------')
print(reduce(lambda x, y: x + y, list1))
# zip
# zip()的目的是映射多个容器的相似索引,以便它们可以仅作为单个实体使用。
# 基础语法:zip(*iterators); 参数:iterators为可迭代的对象,例如list,string; 返回值:返回单个迭代器对象,具有来自所有容器的映射值
print('-----------zip----------')
keys = ['name','age']
values = ['xiaobai',18]
my_dict = dict(zip(keys,values))
print(my_dict)
name = [ "xiaobai", "john", "mike", "alpha" ]
age = [ 4, 1, 3, 2 ]
marks = [ 40, 50, 60, 70 ]
mapped = list(zip(name, age, marks))
print(mapped)
names, ages, marks = zip(*mapped)
print(names) |
import json
import unittest
from app import db
from apps.news.patches import (
patch_item, add, copy, move, remove, replace
)
from apps.news.models import News, NewsCategories, NewsCategoriesMapping
from apps.utils.time import get_datetime
class TestNewsPatchesFailures(unittest.TestCase):
def setUp(self):
item1 = News(
Title="Test News 1",
Contents="My news 1",
Author="UnitTester",
Created=get_datetime()
)
db.session.add(item1)
item2 = News(
Title="Test News 2",
Contents="My news 2",
Author="UnitTester",
Created=get_datetime(),
Updated=get_datetime(),
)
db.session.add(item2)
db.session.commit()
self.valid_news_id = [item1.NewsID, item2.NewsID]
# Make sure we have no categories mapped
for catmap in NewsCategoriesMapping.query.all():
db.session.delete(catmap)
for cat in NewsCategories.query.all():
db.session.delete(cat)
db.session.commit()
# Add some valid news categories
cat1 = NewsCategories(Category="UnitTest1")
cat2 = NewsCategories(Category="UnitTest2")
cat3 = NewsCategories(Category="UnitTest3")
db.session.add(cat1)
db.session.add(cat2)
db.session.add(cat3)
db.session.commit()
# And map the first category to both News items
newscat1 = NewsCategoriesMapping(
NewsID=item1.NewsID,
NewsCategoryID=cat1.NewsCategoryID
)
newscat2 = NewsCategoriesMapping(
NewsID=item2.NewsID,
NewsCategoryID=cat1.NewsCategoryID
)
db.session.add(newscat1)
db.session.add(newscat2)
db.session.commit()
self.valid_cat_ids = [
cat1.NewsCategoryID, cat2.NewsCategoryID, cat3.NewsCategoryID
]
def tearDown(self):
for news in News.query.all():
db.session.delete(news)
for cat in NewsCategoriesMapping.query.all():
db.session.delete(cat)
for catmap in NewsCategories.query.all():
db.session.delete(catmap)
db.session.commit()
def test_news_categories_with_invalid_data(self):
payload = json.dumps([[{"op": "add", "path": "/categories", "value": "invalid"}]])
result = patch_item(self.valid_news_id[0], payload)
self.assertFalse(result["success"])
self.assertEqual("Only lists are allowed in categories", result["message"])
def test_db_constraint(self):
"""When attempting an operation that is against the DB constraints."""
payload = json.dumps([
{"op": "move", "from": "/author", "path": "/title"},
])
result = patch_item(self.valid_news_id[1], payload)
news_item = News.query.filter_by(NewsID=self.valid_news_id[0]).first()
self.assertNotEqual(None, news_item.Author)
self.assertFalse(result["success"])
self.assertEqual("The defined source cannot be nullified (NOT NULL)", result["message"])
def test_failing_test_op(self):
"""When the test OP comparison fails, should cancel the patch and rollback any
commits that were created before the OP."""
payload = json.dumps([
{"op": "add", "path": "/contents", "value": "Should not change"},
{"op": "replace", "path": "/author", "value": "Should not change"},
{"op": "test", "path": "/title", "value": "not the same value"},
{"op": "move", "from": "/updated", "path": "/title"},
])
result = patch_item(self.valid_news_id[1], payload)
news_item = News.query.filter_by(NewsID=self.valid_news_id[1]).first()
self.assertNotEqual("Should not change", news_item.Contents)
self.assertNotEqual("Should not change", news_item.Author)
self.assertEqual("Test News 2", news_item.Title)
self.assertFalse(result["success"])
self.assertEqual("Comparison test failed in the patch", result["message"])
def test_invalid_op(self):
"""When an invalid OP is in the payload, should cancel the patch and rollback any
commits that were created before the OP."""
payload = json.dumps([
{"op": "replace", "path": "/author", "value": "Should not change"},
{"op": "invalid", "from": "/updated", "path": "/contents"}
])
result = patch_item(self.valid_news_id[0], payload)
news_item = News.query.filter_by(NewsID=self.valid_news_id[0]).first()
self.assertNotEqual("Should not change", news_item.Author)
self.assertFalse(result["success"])
self.assertEqual("Invalid operation in patch", result["message"])
def test_add_function_with_non_list(self):
"""Should raise a ValueError."""
with self.assertRaises(ValueError):
add(None, "/categories", "Not-List", {})
def test_copying_to_categories_raises(self):
"""Technically it is a valid operation, but we prevent it."""
with self.assertRaises(ValueError):
copy(None, "/author", "/categories", {})
def test_moving_to_categories_raises(self):
"""Technically it is a valid operation, but we prevent it."""
with self.assertRaises(Exception):
move(None, "/author", "/categories", {})
def test_removing_constrainted_items_raises(self):
"""Technically it is a valid operation, but we prevent it."""
with self.assertRaises(ValueError):
remove(None, "/author", {})
def test_replacing_categories_with_non_list_raises(self):
"""Technically it is a valid operation, but we prevent it."""
with self.assertRaises(ValueError):
replace(None, "/categories", "not-allowed", {})
def test_test_op_with_invalid_path(self):
"""The test op returns a boolean whether the test value matches target."""
payload = json.dumps([
{"op": "test", "path": "/doesnotexist", "value": "Test News 2"},
])
result = patch_item(self.valid_news_id[1], payload)
self.assertEqual(False, result["success"])
self.assertEqual("Comparison test failed in the patch", result["message"])
def test_copy_to_unknown_target_raises(self):
payload = json.dumps([
{"op": "copy", "from": "/created", "path": "/doesnotexist"},
])
result = patch_item(self.valid_news_id[1], payload)
self.assertEqual(False, result["success"])
self.assertEqual("Invalid operation in patch", result["message"])
|
class UnionFind():
def __init__(self, n):
self.parents = [-1] * n
self.n = n
self.rank = [0] * n
def find(self, a):
if self.parents[a] < 0:
return a
self.parents[a] = self.find(self.parents[a])
return self.parents[a]
def union(self, a, b):
a = self.find(a)
b = self.find(b)
if a == b:
return
if self.rank[a] < self.rank[b]:
self.parents[b] += self.parents[a]
self.parents[a] = b
else:
self.parents[a] += self.parents[b]
self.parents[b] = a
if self.rank[a] == self.rank[b]:
self.rank[a] += 1
def size(self, a):
return -self.parents[self.find(a)]
def group_num(self):
return len(list(filter(lambda x: x < 0, self.parents)))
def all_size(self):
ans = []
for i in range(self.n):
if self.parents[i] < 0:
ans.append(-self.parents[i])
return ans
n = int(input())
A = list(map(int, input().split()))
error = []
v = {}
num = 0
for i in range(n//2):
if A[i] != A[n-i-1]:
error.append((A[i], A[n-i-1]))
if A[i] not in v:
v[A[i]] = num
num += 1
if A[n-i-1] not in v:
v[A[n-i-1]] = num
num += 1
uf = UnionFind(num)
for i, j in error:
a, b = v[i], v[j]
uf.union(a, b)
size = uf.all_size()
print(sum(size) - len(size))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 9 22:03:53 2018
@author: raja
"""
from nltk.tree import ParentedTree
def find_path1(text,e1_from,e2_to):
e1_from=e1_from.split()[0]
e2_to=e2_to.split()[0]
def get_lca_length(location1, location2):
i = 0
while i < len(location1) and i < len(location2) and location1[i] == location2[i]:
i+=1
return i
def get_labels_from_lca(ptree, lca_len, location):
labels = []
for i in range(lca_len, len(location)):
labels.append(ptree[location[:i]].label())
return labels
def findPath(ptree, text1, text2):
leaf_values = ptree.leaves()
leaf_index1 = leaf_values.index(text1)
leaf_index2 = leaf_values.index(text2)
location1 = ptree.leaf_treeposition(leaf_index1)
location2 = ptree.leaf_treeposition(leaf_index2)
#find length of least common ancestor (lca)
lca_len = get_lca_length(location1, location2)
#find path from the node1 to lca
labels1 = get_labels_from_lca(ptree, lca_len, location1)
#ignore the first element, because it will be counted in the second part of the path
result = labels1[1:]
#inverse, because we want to go from the node to least common ancestor
result = result[::-1]
#add path from lca to node2
result = result + get_labels_from_lca(ptree, lca_len, location2)
return result
#text="(VP (VERB saw) (NP (DET the) (NOUN dog)))"
#e1_from = 'the'
#e2_to = 'dog'
ptree = ParentedTree.fromstring(text)
final=findPath(ptree, e1_from, e2_to)
return final |
#!/usr/bin/env python
# Written by Min-Su Shin in Department of Astronomy, University of Michigan.
# Feel free use or revise the code.
import sys, urllib, string
import xml.dom.minidom
wsid = 000000 # parameter : wsid of your account on CASJobs
pw = "00000" # parameter : password for your account on CASJobs
tb_name = sys.argv[1]
print "# ", tb_name
# 6 arcsec search
sql_query = """
CREATE TABLE %s (
nsvs_id int not null,
objid bigint not null,
distance float not null,
type int not null,
psfMag_u real not null,
psfMagErr_u real not null,
flags_u bigint not null,
psfMag_g real not null,
psfMagErr_g real not null,
flags_g bigint not null,
psfMag_r real not null,
psfMagErr_r real not null,
flags_r bigint not null,
psfMag_i real not null,
psfMagErr_i real not null,
flags_i bigint not null,
psfMag_z real not null,
psfMagErr_z real not null,
flags_z bigint not null,
specObjID bigint not null
)
""" % (tb_name)
# Please, check the following web page for SOAP messages and relevant information
# http://casjobs.sdss.org/CasJobs/services/jobs.asmx?op=SubmitJob
# parameter : a default context is DR7
# parameter : taskname
# parameter : estimate of execution time (min)
params = urllib.urlencode( {'wsid': wsid, 'pw':pw, 'qry': sql_query, 'context': "DR7", 'taskname': tb_name, 'estimate': 30} )
f = urllib.urlopen("http://casjobs.sdss.org/CasJobs/services/jobs.asmx/SubmitJob", params) # POST method
# get the response
dom = xml.dom.minidom.parse(f)
job_id = dom.getElementsByTagName("long")[0]
job_id_str = job_id.childNodes[0].data
print job_id_str # job id
|
# file deepcode ignore NoHardcodedCredentials/test: Secrets are all just examples for tests. # noqa: E501
import logging
import warnings
from collections import deque
from contextlib import contextmanager
from pathlib import Path
import fakeredis
import pytest
import sqlalchemy
import uuid
from _pytest.capture import CaptureFixture
from _pytest.logging import LogCaptureFixture
from _pytest.monkeypatch import MonkeyPatch
from moto import mock_dynamodb
from pymongo.errors import OperationFailure
from rasa.core.agent import Agent
from rasa.nlu.tokenizers.whitespace_tokenizer import WhitespaceTokenizer
from rasa.shared.constants import DEFAULT_SENDER_ID
from sqlalchemy.dialects.postgresql.base import PGDialect
from sqlalchemy.dialects.sqlite.base import SQLiteDialect
from sqlalchemy.dialects.oracle.base import OracleDialect
from sqlalchemy.engine.url import URL
from typing import Any, Tuple, Text, Type, Dict, List, Union, Optional, ContextManager
from unittest.mock import MagicMock, Mock
import rasa.core.tracker_store
from rasa.shared.core.constants import (
ACTION_LISTEN_NAME,
ACTION_RESTART_NAME,
ACTION_SESSION_START_NAME,
)
from rasa.core.constants import POSTGRESQL_SCHEMA
from rasa.shared.core.domain import Domain
from rasa.shared.core.events import (
SlotSet,
ActionExecuted,
Restarted,
UserUttered,
SessionStarted,
BotUttered,
Event,
)
from rasa.shared.exceptions import ConnectionException, RasaException
from rasa.core.tracker_store import (
TrackerStore,
InMemoryTrackerStore,
RedisTrackerStore,
DEFAULT_REDIS_TRACKER_STORE_KEY_PREFIX,
SQLTrackerStore,
DynamoTrackerStore,
FailSafeTrackerStore,
AwaitableTrackerStore,
)
from rasa.shared.core.trackers import DialogueStateTracker, TrackerEventDiffEngine
from rasa.shared.nlu.training_data.message import Message
from rasa.utils.endpoints import EndpointConfig, read_endpoint_config
from tests.conftest import AsyncMock
from tests.core.conftest import MockedMongoTrackerStore
test_domain = Domain.load("data/test_domains/default.yml")
async def get_or_create_tracker_store(store: TrackerStore) -> None:
slot_key = "location"
slot_val = "Easter Island"
tracker = await store.get_or_create_tracker(DEFAULT_SENDER_ID)
ev = SlotSet(slot_key, slot_val)
tracker.update(ev)
assert tracker.get_slot(slot_key) == slot_val
await store.save(tracker)
again = await store.get_or_create_tracker(DEFAULT_SENDER_ID)
assert again.get_slot(slot_key) == slot_val
def test_get_or_create():
get_or_create_tracker_store(InMemoryTrackerStore(test_domain))
# noinspection PyPep8Naming
@mock_dynamodb
def test_dynamo_get_or_create():
get_or_create_tracker_store(DynamoTrackerStore(test_domain))
@mock_dynamodb
async def test_dynamo_tracker_floats():
conversation_id = uuid.uuid4().hex
tracker_store = DynamoTrackerStore(test_domain)
tracker = await tracker_store.get_or_create_tracker(
conversation_id, append_action_listen=False
)
# save `slot` event with known `float`-type timestamp
timestamp = 13423.23434623
tracker.update(SlotSet("key", "val", timestamp=timestamp))
await tracker_store.save(tracker)
# retrieve tracker and the event timestamp is retrieved as a `float`
tracker = await tracker_store.get_or_create_tracker(conversation_id)
retrieved_timestamp = tracker.events[0].timestamp
assert isinstance(retrieved_timestamp, float)
assert retrieved_timestamp == timestamp
async def test_restart_after_retrieval_from_tracker_store(domain: Domain):
store = InMemoryTrackerStore(domain)
tr = await store.get_or_create_tracker("myuser")
synth = [ActionExecuted("action_listen") for _ in range(4)]
for e in synth:
tr.update(e)
tr.update(Restarted())
latest_restart = tr.idx_after_latest_restart()
await store.save(tr)
tr2 = await store.retrieve("myuser")
latest_restart_after_loading = tr2.idx_after_latest_restart()
assert latest_restart == latest_restart_after_loading
async def test_tracker_store_remembers_max_history(domain: Domain):
store = InMemoryTrackerStore(domain)
tr = await store.get_or_create_tracker("myuser", max_event_history=42)
tr.update(Restarted())
await store.save(tr)
tr2 = await store.retrieve("myuser")
assert tr._max_event_history == tr2._max_event_history == 42
def test_tracker_store_endpoint_config_loading(endpoints_path: Text):
cfg = read_endpoint_config(endpoints_path, "tracker_store")
assert cfg == EndpointConfig.from_dict(
{
"type": "redis",
"url": "localhost",
"port": 6379,
"db": 0,
"password": "password",
"timeout": 30000,
"use_ssl": True,
"ssl_keyfile": "keyfile.key",
"ssl_certfile": "certfile.crt",
"ssl_ca_certs": "my-bundle.ca-bundle",
}
)
def test_create_tracker_store_from_endpoint_config(
domain: Domain, endpoints_path: Text
):
store = read_endpoint_config(endpoints_path, "tracker_store")
tracker_store = RedisTrackerStore(
domain=domain,
host="localhost",
port=6379,
db=0,
password="password",
record_exp=3000,
use_ssl=True,
ssl_keyfile="keyfile.key",
ssl_certfile="certfile.crt",
ssl_ca_certs="my-bundle.ca-bundle",
)
assert isinstance(tracker_store, type(TrackerStore.create(store, domain)))
def test_redis_tracker_store_invalid_key_prefix(domain: Domain):
test_invalid_key_prefix = "$$ &!"
tracker_store = RedisTrackerStore(
domain=domain,
host="localhost",
port=6379,
db=0,
password="password",
key_prefix=test_invalid_key_prefix,
record_exp=3000,
)
assert tracker_store._get_key_prefix() == DEFAULT_REDIS_TRACKER_STORE_KEY_PREFIX
def test_redis_tracker_store_valid_key_prefix(domain: Domain):
test_valid_key_prefix = "spanish"
tracker_store = RedisTrackerStore(
domain=domain,
host="localhost",
port=6379,
db=0,
password="password",
key_prefix=test_valid_key_prefix,
record_exp=3000,
)
assert (
tracker_store._get_key_prefix()
== f"{test_valid_key_prefix}:{DEFAULT_REDIS_TRACKER_STORE_KEY_PREFIX}"
)
def test_exception_tracker_store_from_endpoint_config(
domain: Domain, monkeypatch: MonkeyPatch, endpoints_path: Text
):
"""Check if tracker store properly handles exceptions.
If we can not create a tracker store by instantiating the
expected type (e.g. due to an exception) we should fallback to
the default `InMemoryTrackerStore`."""
store = read_endpoint_config(endpoints_path, "tracker_store")
mock = Mock(side_effect=Exception("test exception"))
monkeypatch.setattr(rasa.core.tracker_store, "RedisTrackerStore", mock)
with pytest.raises(Exception) as e:
TrackerStore.create(store, domain)
assert "test exception" in str(e.value)
def test_raise_connection_exception_redis_tracker_store_creation(
domain: Domain, monkeypatch: MonkeyPatch, endpoints_path: Text
):
store = read_endpoint_config(endpoints_path, "tracker_store")
monkeypatch.setattr(
rasa.core.tracker_store,
"RedisTrackerStore",
Mock(side_effect=ConnectionError()),
)
with pytest.raises(ConnectionException):
TrackerStore.create(store, domain)
def test_mongo_tracker_store_raise_exception(domain: Domain, monkeypatch: MonkeyPatch):
monkeypatch.setattr(
rasa.core.tracker_store,
"MongoTrackerStore",
Mock(
side_effect=OperationFailure("not authorized on logs to execute command.")
),
)
with pytest.raises(ConnectionException) as error:
TrackerStore.create(
EndpointConfig(username="username", password="password", type="mongod"),
domain,
)
assert "not authorized on logs to execute command." in str(error.value)
class HostExampleTrackerStore(RedisTrackerStore):
pass
class NonAsyncTrackerStore(TrackerStore):
def retrieve(self, sender_id: Text) -> Optional[DialogueStateTracker]:
pass
def save(self, tracker: DialogueStateTracker) -> None:
pass
def test_tracker_store_with_host_argument_from_string(domain: Domain):
endpoints_path = "data/test_endpoints/custom_tracker_endpoints.yml"
store_config = read_endpoint_config(endpoints_path, "tracker_store")
store_config.type = "tests.core.test_tracker_stores.HostExampleTrackerStore"
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter("error")
tracker_store = TrackerStore.create(store_config, domain)
assert len(record) == 0
assert isinstance(tracker_store, HostExampleTrackerStore)
def test_tracker_store_from_invalid_module(domain: Domain):
endpoints_path = "data/test_endpoints/custom_tracker_endpoints.yml"
store_config = read_endpoint_config(endpoints_path, "tracker_store")
store_config.type = "a.module.which.cannot.be.found"
with pytest.warns(UserWarning):
tracker_store = TrackerStore.create(store_config, domain)
assert isinstance(tracker_store, InMemoryTrackerStore)
def test_tracker_store_from_invalid_string(domain: Domain):
endpoints_path = "data/test_endpoints/custom_tracker_endpoints.yml"
store_config = read_endpoint_config(endpoints_path, "tracker_store")
store_config.type = "any string"
with pytest.warns(UserWarning):
tracker_store = TrackerStore.create(store_config, domain)
assert isinstance(tracker_store, InMemoryTrackerStore)
async def _tracker_store_and_tracker_with_slot_set() -> Tuple[
InMemoryTrackerStore, DialogueStateTracker
]:
# returns an InMemoryTrackerStore containing a tracker with a slot set
slot_key = "cuisine"
slot_val = "French"
store = InMemoryTrackerStore(test_domain)
tracker = await store.get_or_create_tracker(DEFAULT_SENDER_ID)
ev = SlotSet(slot_key, slot_val)
tracker.update(ev)
return store, tracker
async def test_tracker_serialisation():
store, tracker = await _tracker_store_and_tracker_with_slot_set()
serialised = store.serialise_tracker(tracker)
assert tracker == store.deserialise_tracker(DEFAULT_SENDER_ID, serialised)
@pytest.mark.parametrize(
"full_url",
[
"postgresql://localhost",
"postgresql://localhost:5432",
"postgresql://user:secret@localhost",
"sqlite:///",
],
)
def test_get_db_url_with_fully_specified_url(full_url: Text):
assert SQLTrackerStore.get_db_url(host=full_url) == full_url
def test_get_db_url_with_port_in_host():
host = "localhost:1234"
dialect = "postgresql"
db = "mydb"
expected = f"{dialect}://{host}/{db}"
assert (
str(SQLTrackerStore.get_db_url(dialect=dialect, host=host, db=db)) == expected
)
def test_db_get_url_with_sqlite():
expected = "sqlite:///rasa.db"
assert str(SQLTrackerStore.get_db_url(dialect="sqlite", db="rasa.db")) == expected
def test_get_db_url_with_correct_host():
expected = "postgresql://localhost:5005/mydb"
assert (
str(
SQLTrackerStore.get_db_url(
dialect="postgresql", host="localhost", port=5005, db="mydb"
)
)
== expected
)
def test_get_db_url_with_query():
expected = "postgresql://localhost:5005/mydb?driver=my-driver"
assert (
str(
SQLTrackerStore.get_db_url(
dialect="postgresql",
host="localhost",
port=5005,
db="mydb",
query={"driver": "my-driver"},
)
)
== expected
)
def test_sql_tracker_store_logs_do_not_show_password(caplog: LogCaptureFixture):
dialect = "postgresql"
host = "localhost"
port = 9901
db = "some-database"
username = "db-user"
# deepcode ignore NoHardcodedPasswords/test: Test credential
password = "some-password"
with caplog.at_level(logging.DEBUG):
_ = SQLTrackerStore(None, dialect, host, port, db, username, password)
# the URL in the logs does not contain the password
assert password not in caplog.text
# instead the password is displayed as '***'
assert f"postgresql://{username}:***@{host}:{port}/{db}" in caplog.text
def test_db_url_with_query_from_endpoint_config(tmp_path: Path):
endpoint_config = """
tracker_store:
dialect: postgresql
url: localhost
port: 5123
username: user
password: pw
login_db: login-db
query:
driver: my-driver
another: query
"""
f = tmp_path / "tmp_config_file.yml"
f.write_text(endpoint_config)
store_config = read_endpoint_config(str(f), "tracker_store")
url = SQLTrackerStore.get_db_url(**store_config.kwargs)
import itertools
# order of query dictionary in yaml is random, test against both permutations
connection_url = "postgresql://user:pw@:5123/login-db?"
assert any(
str(url) == connection_url + "&".join(permutation)
for permutation in (
itertools.permutations(("another=query", "driver=my-driver"))
)
)
async def test_fail_safe_tracker_store_if_no_errors():
mocked_tracker_store = Mock()
tracker_store = FailSafeTrackerStore(mocked_tracker_store, None)
# test save
mocked_tracker_store.save = AsyncMock()
await tracker_store.save(None)
mocked_tracker_store.save.assert_called_once()
# test retrieve
expected = [1]
mocked_tracker_store.retrieve = AsyncMock(return_value=expected)
sender_id = "10"
assert await tracker_store.retrieve(sender_id) == expected
mocked_tracker_store.retrieve.assert_called_once_with(sender_id)
# test keys
expected = ["sender 1", "sender 2"]
mocked_tracker_store.keys = AsyncMock(return_value=expected)
assert await tracker_store.keys() == expected
mocked_tracker_store.keys.assert_called_once()
async def test_fail_safe_tracker_store_with_save_error():
mocked_tracker_store = Mock()
mocked_tracker_store.save = Mock(side_effect=Exception())
fallback_tracker_store = Mock()
fallback_tracker_store.save = AsyncMock()
on_error_callback = Mock()
tracker_store = FailSafeTrackerStore(
mocked_tracker_store, on_error_callback, fallback_tracker_store
)
await tracker_store.save(None)
fallback_tracker_store.save.assert_called_once()
on_error_callback.assert_called_once()
async def test_fail_safe_tracker_store_with_keys_error():
mocked_tracker_store = Mock()
mocked_tracker_store.keys = Mock(side_effect=Exception())
on_error_callback = Mock()
tracker_store = FailSafeTrackerStore(mocked_tracker_store, on_error_callback)
assert await tracker_store.keys() == []
on_error_callback.assert_called_once()
async def test_fail_safe_tracker_store_with_retrieve_error():
mocked_tracker_store = Mock()
mocked_tracker_store.retrieve = Mock(side_effect=Exception())
fallback_tracker_store = Mock()
on_error_callback = Mock()
tracker_store = FailSafeTrackerStore(
mocked_tracker_store, on_error_callback, fallback_tracker_store
)
assert await tracker_store.retrieve("sender_id") is None
on_error_callback.assert_called_once()
def test_set_fail_safe_tracker_store_domain(domain: Domain):
tracker_store = InMemoryTrackerStore(domain)
fallback_tracker_store = InMemoryTrackerStore(None)
failsafe_store = FailSafeTrackerStore(tracker_store, None, fallback_tracker_store)
failsafe_store.domain = domain
assert failsafe_store.domain is domain
assert tracker_store.domain is failsafe_store.domain
assert fallback_tracker_store.domain is failsafe_store.domain
async def create_tracker_with_partially_saved_events(
tracker_store: TrackerStore,
) -> Tuple[List[Event], DialogueStateTracker]:
# creates a tracker with two events and saved it to the tracker store
# following that, it adds three more events that are not saved to the tracker store
sender_id = uuid.uuid4().hex
# create tracker with two events and save it
events = [UserUttered("hello"), BotUttered("what")]
tracker = DialogueStateTracker.from_events(sender_id, events)
await tracker_store.save(tracker)
# add more events to the tracker, do not yet save it
events = [ActionExecuted(ACTION_LISTEN_NAME), UserUttered("123"), BotUttered("yes")]
for event in events:
tracker.update(event)
return events, tracker
async def _saved_tracker_with_multiple_session_starts(
tracker_store: TrackerStore, sender_id: Text
) -> DialogueStateTracker:
tracker = DialogueStateTracker.from_events(
sender_id,
[
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
UserUttered("hi"),
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
],
)
await tracker_store.save(tracker)
return await tracker_store.retrieve(sender_id)
async def test_mongo_additional_events(domain: Domain):
tracker_store = MockedMongoTrackerStore(domain)
events, tracker = await create_tracker_with_partially_saved_events(tracker_store)
# make sure only new events are returned
# noinspection PyProtectedMember
assert list(tracker_store._additional_events(tracker)) == events
async def test_mongo_additional_events_with_session_start(domain: Domain):
sender = "test_mongo_additional_events_with_session_start"
tracker_store = MockedMongoTrackerStore(domain)
tracker = await _saved_tracker_with_multiple_session_starts(tracker_store, sender)
tracker.update(UserUttered("hi2"))
# noinspection PyProtectedMember
additional_events = list(tracker_store._additional_events(tracker))
assert len(additional_events) == 1
assert isinstance(additional_events[0], UserUttered)
# we cannot parametrise over this and the previous test due to the different ways of
# calling _additional_events()
async def test_sql_additional_events(domain: Domain):
tracker_store = SQLTrackerStore(domain)
additional_events, tracker = await create_tracker_with_partially_saved_events(
tracker_store
)
# make sure only new events are returned
with tracker_store.session_scope() as session:
# noinspection PyProtectedMember
assert (
list(tracker_store._additional_events(session, tracker))
== additional_events
)
async def test_sql_additional_events_with_session_start(domain: Domain):
sender = "test_sql_additional_events_with_session_start"
tracker_store = SQLTrackerStore(domain)
tracker = await _saved_tracker_with_multiple_session_starts(tracker_store, sender)
tracker.update(UserUttered("hi2"), domain)
# make sure only new events are returned
with tracker_store.session_scope() as session:
# noinspection PyProtectedMember
additional_events = list(tracker_store._additional_events(session, tracker))
assert len(additional_events) == 1
assert isinstance(additional_events[0], UserUttered)
@pytest.mark.parametrize(
"tracker_store_type,tracker_store_kwargs",
[(MockedMongoTrackerStore, {}), (SQLTrackerStore, {"host": "sqlite:///"})],
)
async def test_tracker_store_retrieve_with_session_started_events(
tracker_store_type: Type[TrackerStore],
tracker_store_kwargs: Dict,
domain: Domain,
):
tracker_store = tracker_store_type(domain, **tracker_store_kwargs)
events = [
UserUttered("Hola", {"name": "greet"}, timestamp=1),
BotUttered("Hi", timestamp=2),
SessionStarted(timestamp=3),
UserUttered("Ciao", {"name": "greet"}, timestamp=4),
]
sender_id = "test_sql_tracker_store_with_session_events"
tracker = DialogueStateTracker.from_events(sender_id, events)
await tracker_store.save(tracker)
# Save other tracker to ensure that we don't run into problems with other senders
other_tracker = DialogueStateTracker.from_events("other-sender", [SessionStarted()])
await tracker_store.save(other_tracker)
# Retrieve tracker with events since latest SessionStarted
tracker = await tracker_store.retrieve(sender_id)
assert len(tracker.events) == 2
assert all((event == tracker.events[i] for i, event in enumerate(events[2:])))
@pytest.mark.parametrize(
"tracker_store_type,tracker_store_kwargs",
[(MockedMongoTrackerStore, {}), (SQLTrackerStore, {"host": "sqlite:///"})],
)
async def test_tracker_store_retrieve_without_session_started_events(
tracker_store_type: Type[TrackerStore],
tracker_store_kwargs: Dict,
domain,
):
tracker_store = tracker_store_type(domain, **tracker_store_kwargs)
# Create tracker with a SessionStarted event
events = [
UserUttered("Hola", {"name": "greet"}),
BotUttered("Hi"),
UserUttered("Ciao", {"name": "greet"}),
BotUttered("Hi2"),
]
sender_id = "test_sql_tracker_store_retrieve_without_session_started_events"
tracker = DialogueStateTracker.from_events(sender_id, events)
await tracker_store.save(tracker)
# Save other tracker to ensure that we don't run into problems with other senders
other_tracker = DialogueStateTracker.from_events("other-sender", [SessionStarted()])
await tracker_store.save(other_tracker)
tracker = await tracker_store.retrieve(sender_id)
assert len(tracker.events) == 4
assert all(event == tracker.events[i] for i, event in enumerate(events))
@pytest.mark.parametrize(
"tracker_store_type,tracker_store_kwargs",
[
(MockedMongoTrackerStore, {}),
(SQLTrackerStore, {"host": "sqlite:///"}),
(InMemoryTrackerStore, {}),
],
)
async def test_tracker_store_retrieve_with_events_from_previous_sessions(
tracker_store_type: Type[TrackerStore], tracker_store_kwargs: Dict
):
tracker_store = tracker_store_type(Domain.empty(), **tracker_store_kwargs)
conversation_id = uuid.uuid4().hex
tracker = DialogueStateTracker.from_events(
conversation_id,
[
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
UserUttered("hi"),
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
],
)
await tracker_store.save(tracker)
actual = await tracker_store.retrieve_full_tracker(conversation_id)
assert len(actual.events) == len(tracker.events)
def test_session_scope_error(
monkeypatch: MonkeyPatch, capsys: CaptureFixture, domain: Domain
):
tracker_store = SQLTrackerStore(domain)
tracker_store.sessionmaker = Mock()
requested_schema = uuid.uuid4().hex
# `ensure_schema_exists()` raises `ValueError`
mocked_ensure_schema_exists = Mock(side_effect=ValueError(requested_schema))
monkeypatch.setattr(
rasa.core.tracker_store, "ensure_schema_exists", mocked_ensure_schema_exists
)
# `SystemExit` is triggered by failing `ensure_schema_exists()`
with pytest.raises(SystemExit):
with tracker_store.session_scope() as _:
pass
# error message is printed
assert (
f"Requested PostgreSQL schema '{requested_schema}' was not found in the "
f"database." in capsys.readouterr()[0]
)
@pytest.mark.parametrize(
"url,is_postgres_url",
[
(f"{PGDialect.name}://admin:pw@localhost:5432/rasa", True),
(f"{SQLiteDialect.name}:///", False),
(URL(PGDialect.name), True),
(URL(SQLiteDialect.name), False),
],
)
def test_is_postgres_url(url: Union[Text, URL], is_postgres_url: bool):
assert rasa.core.tracker_store.is_postgresql_url(url) == is_postgres_url
def set_or_delete_postgresql_schema_env_var(
monkeypatch: MonkeyPatch, value: Optional[Text]
) -> None:
"""Set `POSTGRESQL_SCHEMA` environment variable using `MonkeyPatch`.
Args:
monkeypatch: Instance of `MonkeyPatch` to use for patching.
value: Value of the `POSTGRESQL_SCHEMA` environment variable to set.
"""
if value is None:
monkeypatch.delenv(POSTGRESQL_SCHEMA, raising=False)
else:
monkeypatch.setenv(POSTGRESQL_SCHEMA, value)
@pytest.mark.parametrize(
"url,schema_env,kwargs",
[
# postgres without schema
(
f"{PGDialect.name}://admin:pw@localhost:5432/rasa",
None,
{
"pool_size": rasa.core.tracker_store.POSTGRESQL_DEFAULT_POOL_SIZE,
"max_overflow": rasa.core.tracker_store.POSTGRESQL_DEFAULT_MAX_OVERFLOW,
},
),
# postgres with schema
(
f"{PGDialect.name}://admin:pw@localhost:5432/rasa",
"schema1",
{
"connect_args": {"options": "-csearch_path=schema1"},
"pool_size": rasa.core.tracker_store.POSTGRESQL_DEFAULT_POOL_SIZE,
"max_overflow": rasa.core.tracker_store.POSTGRESQL_DEFAULT_MAX_OVERFLOW,
},
),
# oracle without schema
(f"{OracleDialect.name}://admin:pw@localhost:5432/rasa", None, {}),
# oracle with schema
(f"{OracleDialect.name}://admin:pw@localhost:5432/rasa", "schema1", {}),
# sqlite
(f"{SQLiteDialect.name}:///", None, {}),
],
)
def test_create_engine_kwargs(
monkeypatch: MonkeyPatch,
url: Union[Text, URL],
schema_env: Optional[Text],
kwargs: Dict[Text, Dict[Text, Union[Text, int]]],
):
set_or_delete_postgresql_schema_env_var(monkeypatch, schema_env)
assert rasa.core.tracker_store.create_engine_kwargs(url) == kwargs
@contextmanager
def does_not_raise():
"""Contextmanager to be used when an expression is not expected to raise an
exception.
This contextmanager can be used in parametrized tests, where some input objects
are expected to raise and others are not.
Example:
@pytest.mark.parametrize(
"a,b,raises_context",
[
# 5/6 is a legal divison
(5, 6, does_not_raise()),
# 5/0 raises a `ZeroDivisionError`
(5, 0, pytest.raises(ZeroDivisionError)),
],
)
def test_divide(
a: int, b: int, raises_context: ContextManager,
):
with raises_context:
_ = a / b
"""
yield
@pytest.mark.parametrize(
"is_postgres,schema_env,schema_exists,raises_context",
[
(True, "schema1", True, does_not_raise()),
(True, "schema1", False, pytest.raises(ValueError)),
(False, "schema1", False, does_not_raise()),
(True, None, False, does_not_raise()),
(False, None, False, does_not_raise()),
],
)
def test_ensure_schema_exists(
monkeypatch: MonkeyPatch,
is_postgres: bool,
schema_env: Optional[Text],
schema_exists: bool,
raises_context: ContextManager,
):
set_or_delete_postgresql_schema_env_var(monkeypatch, schema_env)
monkeypatch.setattr(
rasa.core.tracker_store, "is_postgresql_url", lambda _: is_postgres
)
monkeypatch.setattr(sqlalchemy, "exists", Mock())
# mock the `session.query().scalar()` query which returns whether the schema
# exists in the db
scalar = Mock(return_value=schema_exists)
query = Mock(scalar=scalar)
session = Mock()
session.query = Mock(return_value=query)
with raises_context:
rasa.core.tracker_store.ensure_schema_exists(session)
def test_current_state_without_events(domain: Domain):
tracker_store = MockedMongoTrackerStore(domain)
# insert some events
events = [
UserUttered("Hola", {"name": "greet"}),
BotUttered("Hi"),
UserUttered("Ciao", {"name": "greet"}),
BotUttered("Hi2"),
]
sender_id = "test_mongo_tracker_store_current_state_without_events"
tracker = DialogueStateTracker.from_events(sender_id, events)
# get current state without events
# noinspection PyProtectedMember
state = tracker_store._current_tracker_state_without_events(tracker)
# `events` key should not be in there
assert state and "events" not in state
def test_login_db_with_no_postgresql(tmp_path: Path):
with pytest.warns(UserWarning):
SQLTrackerStore(db=str(tmp_path / "rasa.db"), login_db=str(tmp_path / "other"))
@pytest.mark.parametrize(
"config",
[
{
"type": "mongod",
"url": "mongodb://0.0.0.0:42/?serverSelectionTimeoutMS=5000",
},
{"type": "dynamo"},
],
)
def test_tracker_store_connection_error(config: Dict, domain: Domain):
store = EndpointConfig.from_dict(config)
with pytest.raises(ConnectionException):
TrackerStore.create(store, domain)
async def prepare_token_serialisation(
tracker_store: TrackerStore, response_selector_agent: Agent, sender_id: Text
):
text = "Good morning"
tokenizer = WhitespaceTokenizer(WhitespaceTokenizer.get_default_config())
tokens = tokenizer.tokenize(Message(data={"text": text}), "text")
indices = [[t.start, t.end] for t in tokens]
tracker = await tracker_store.get_or_create_tracker(sender_id=sender_id)
parse_data = await response_selector_agent.parse_message(text)
event = UserUttered(
"Good morning",
parse_data.get("intent"),
parse_data.get("entities", []),
parse_data,
)
tracker.update(event)
await tracker_store.save(tracker)
retrieved_tracker = await tracker_store.retrieve(sender_id=sender_id)
event = retrieved_tracker.get_last_event_for(event_type=UserUttered)
event_tokens = event.as_dict().get("parse_data").get("text_tokens")
assert event_tokens == indices
def test_inmemory_tracker_store_with_token_serialisation(
domain: Domain, default_agent: Agent
):
tracker_store = InMemoryTrackerStore(domain)
prepare_token_serialisation(tracker_store, default_agent, "inmemory")
def test_mongo_tracker_store_with_token_serialisation(
domain: Domain, response_selector_agent: Agent
):
tracker_store = MockedMongoTrackerStore(domain)
prepare_token_serialisation(tracker_store, response_selector_agent, "mongo")
def test_sql_tracker_store_with_token_serialisation(
domain: Domain, response_selector_agent: Agent
):
tracker_store = SQLTrackerStore(domain, **{"host": "sqlite:///"})
prepare_token_serialisation(tracker_store, response_selector_agent, "sql")
def test_sql_tracker_store_creation_with_invalid_port(domain: Domain):
with pytest.raises(RasaException) as error:
TrackerStore.create(
EndpointConfig(port="$DB_PORT", type="sql"),
domain,
)
assert "port '$DB_PORT' cannot be cast to integer." in str(error.value)
def test_create_non_async_tracker_store(domain: Domain):
endpoint_config = EndpointConfig(
type="tests.core.test_tracker_stores.NonAsyncTrackerStore"
)
with pytest.warns(FutureWarning):
tracker_store = TrackerStore.create(endpoint_config)
assert isinstance(tracker_store, AwaitableTrackerStore)
assert isinstance(tracker_store._tracker_store, NonAsyncTrackerStore)
def test_create_awaitable_tracker_store_with_endpoint_config():
endpoint_config = EndpointConfig(
type="tests.core.test_tracker_stores.NonAsyncTrackerStore"
)
tracker_store = AwaitableTrackerStore.create(endpoint_config)
assert isinstance(tracker_store, AwaitableTrackerStore)
assert isinstance(tracker_store._tracker_store, NonAsyncTrackerStore)
@pytest.mark.parametrize(
"endpoints_file, expected_type",
[
(None, InMemoryTrackerStore),
("data/test_endpoints/endpoints_sql.yml", SQLTrackerStore),
("data/test_endpoints/endpoints_redis.yml", RedisTrackerStore),
],
)
def test_create_tracker_store_from_endpoints_file(
endpoints_file: Optional[Text], expected_type: Any, domain: Domain
) -> None:
endpoint_config = read_endpoint_config(endpoints_file, "tracker_store")
tracker_store = rasa.core.tracker_store.create_tracker_store(
endpoint_config, domain
)
assert rasa.core.tracker_store.check_if_tracker_store_async(tracker_store) is True
assert isinstance(tracker_store, expected_type)
async def test_fail_safe_tracker_store_retrieve_full_tracker(
domain: Domain, tracker_with_restarted_event: DialogueStateTracker
) -> None:
primary_tracker_store = SQLTrackerStore(domain)
sender_id = tracker_with_restarted_event.sender_id
tracker_store = FailSafeTrackerStore(primary_tracker_store)
await tracker_store.save(tracker_with_restarted_event)
tracker = await tracker_store.retrieve_full_tracker(sender_id)
assert tracker == tracker_with_restarted_event
async def test_fail_safe_tracker_store_retrieve_full_tracker_with_exception(
caplog: LogCaptureFixture,
) -> None:
primary_tracker_store = MagicMock()
primary_tracker_store.domain = Domain.empty()
primary_tracker_store.event_broker = None
exception = Exception("Something went wrong")
primary_tracker_store.retrieve_full_tracker = AsyncMock(side_effect=exception)
tracker_store = FailSafeTrackerStore(primary_tracker_store)
with caplog.at_level(logging.ERROR):
await tracker_store.retrieve_full_tracker("some_id")
assert "Error happened when trying to retrieve conversation tracker" in caplog.text
assert f"Please investigate the following error: {exception}." in caplog.text
async def test_sql_get_or_create_full_tracker_without_action_listen() -> None:
tracker_store = SQLTrackerStore(Domain.empty())
sender_id = uuid.uuid4().hex
tracker = await tracker_store.get_or_create_full_tracker(
sender_id=sender_id, append_action_listen=False
)
assert tracker.sender_id == sender_id
assert tracker.events == deque()
async def test_sql_get_or_create_full_tracker_with_action_listen() -> None:
tracker_store = SQLTrackerStore(Domain.empty())
sender_id = uuid.uuid4().hex
tracker = await tracker_store.get_or_create_full_tracker(
sender_id=sender_id, append_action_listen=True
)
assert tracker.sender_id == sender_id
assert tracker.events == deque([ActionExecuted(ACTION_LISTEN_NAME)])
async def test_sql_get_or_create_full_tracker_with_existing_tracker(
tracker_with_restarted_event: DialogueStateTracker,
) -> None:
sender_id = tracker_with_restarted_event.sender_id
tracker_store = SQLTrackerStore(Domain.empty())
await tracker_store.save(tracker_with_restarted_event)
tracker = await tracker_store.get_or_create_full_tracker(
sender_id=sender_id, append_action_listen=False
)
assert tracker.sender_id == sender_id
assert tracker.events == deque(tracker_with_restarted_event.events)
async def test_sql_tracker_store_retrieve_full_tracker(
domain: Domain, tracker_with_restarted_event: DialogueStateTracker
) -> None:
tracker_store = SQLTrackerStore(domain)
sender_id = tracker_with_restarted_event.sender_id
await tracker_store.save(tracker_with_restarted_event)
tracker = await tracker_store.retrieve_full_tracker(sender_id)
assert tracker == tracker_with_restarted_event
async def test_sql_tracker_store_retrieve(
domain: Domain,
tracker_with_restarted_event: DialogueStateTracker,
events_after_restart: List[Event],
) -> None:
tracker_store = SQLTrackerStore(domain)
sender_id = tracker_with_restarted_event.sender_id
await tracker_store.save(tracker_with_restarted_event)
tracker = await tracker_store.retrieve(sender_id)
# the retrieved tracker with the latest session would not contain
# `action_session_start` event because the SQLTrackerStore filters
# only the events after `session_started` event
assert list(tracker.events) == events_after_restart[1:]
async def test_in_memory_tracker_store_retrieve_full_tracker(
domain: Domain,
tracker_with_restarted_event: DialogueStateTracker,
) -> None:
tracker_store = InMemoryTrackerStore(domain)
sender_id = tracker_with_restarted_event.sender_id
await tracker_store.save(tracker_with_restarted_event)
tracker = await tracker_store.retrieve_full_tracker(sender_id)
assert tracker == tracker_with_restarted_event
async def test_in_memory_tracker_store_retrieve(
domain: Domain,
tracker_with_restarted_event: DialogueStateTracker,
events_after_restart: List[Event],
) -> None:
tracker_store = InMemoryTrackerStore(domain)
sender_id = tracker_with_restarted_event.sender_id
await tracker_store.save(tracker_with_restarted_event)
tracker = await tracker_store.retrieve(sender_id)
assert list(tracker.events) == events_after_restart
async def test_mongo_tracker_store_retrieve_full_tracker(
domain: Domain,
tracker_with_restarted_event: DialogueStateTracker,
) -> None:
tracker_store = MockedMongoTrackerStore(domain)
sender_id = tracker_with_restarted_event.sender_id
await tracker_store.save(tracker_with_restarted_event)
tracker = await tracker_store.retrieve_full_tracker(sender_id)
assert tracker == tracker_with_restarted_event
async def test_mongo_tracker_store_retrieve(
domain: Domain,
tracker_with_restarted_event: DialogueStateTracker,
events_after_restart: List[Event],
) -> None:
tracker_store = MockedMongoTrackerStore(domain)
sender_id = tracker_with_restarted_event.sender_id
await tracker_store.save(tracker_with_restarted_event)
tracker = await tracker_store.retrieve(sender_id)
# the retrieved tracker with the latest session would not contain
# `action_session_start` event because the MongoTrackerStore filters
# only the events after `session_started` event
assert list(tracker.events) == events_after_restart[1:]
class MockedRedisTrackerStore(RedisTrackerStore):
def __init__(
self,
domain: Domain,
) -> None:
self.red = fakeredis.FakeStrictRedis()
self.key_prefix = DEFAULT_REDIS_TRACKER_STORE_KEY_PREFIX
self.record_exp = None
super(RedisTrackerStore, self).__init__(domain, None)
async def test_redis_tracker_store_retrieve_full_tracker(
domain: Domain,
tracker_with_restarted_event: DialogueStateTracker,
) -> None:
tracker_store = MockedRedisTrackerStore(domain)
sender_id = tracker_with_restarted_event.sender_id
await tracker_store.save(tracker_with_restarted_event)
tracker = await tracker_store.retrieve_full_tracker(sender_id)
assert tracker == tracker_with_restarted_event
async def test_redis_tracker_store_retrieve(
domain: Domain,
tracker_with_restarted_event: DialogueStateTracker,
events_after_restart: List[Event],
) -> None:
tracker_store = MockedRedisTrackerStore(domain)
sender_id = tracker_with_restarted_event.sender_id
await tracker_store.save(tracker_with_restarted_event)
tracker = await tracker_store.retrieve(sender_id)
assert list(tracker.events) == events_after_restart
async def test_redis_tracker_store_merge_trackers_same_session() -> None:
start_session_sequence = [
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
ActionExecuted(ACTION_LISTEN_NAME),
]
events: List[Event] = start_session_sequence + [UserUttered("hello")]
prior_tracker = DialogueStateTracker.from_events(
"same-session",
evts=events,
)
events += [BotUttered("Hey! How can I help you?")]
new_tracker = DialogueStateTracker.from_events(
"same-session",
evts=events,
)
actual_tracker = RedisTrackerStore._merge_trackers(prior_tracker, new_tracker)
assert actual_tracker == new_tracker
def test_redis_tracker_store_merge_trackers_overlapping_session() -> None:
prior_tracker_events: List[Event] = [
ActionExecuted(ACTION_SESSION_START_NAME, timestamp=1),
SessionStarted(timestamp=2),
ActionExecuted(ACTION_LISTEN_NAME, timestamp=3),
UserUttered("hello", timestamp=4),
BotUttered("Hey! How can I help you?", timestamp=5),
UserUttered("/restart", timestamp=6),
ActionExecuted(ACTION_RESTART_NAME, timestamp=7),
]
new_start_session = [
ActionExecuted(ACTION_SESSION_START_NAME, timestamp=8),
SessionStarted(timestamp=9),
ActionExecuted(ACTION_LISTEN_NAME, timestamp=10),
]
prior_tracker_events += new_start_session
prior_tracker = DialogueStateTracker.from_events(
"overlapping-session",
evts=prior_tracker_events,
)
after_restart_event = [UserUttered("hi again", timestamp=11)]
new_tracker_events = new_start_session + after_restart_event
new_tracker = DialogueStateTracker.from_events(
"overlapping-session",
evts=new_tracker_events,
)
actual_tracker = RedisTrackerStore._merge_trackers(prior_tracker, new_tracker)
expected_events = prior_tracker_events + after_restart_event
assert list(actual_tracker.events) == expected_events
def test_redis_tracker_store_merge_trackers_different_session() -> None:
prior_tracker_events: List[Event] = [
ActionExecuted(ACTION_SESSION_START_NAME, timestamp=1),
SessionStarted(timestamp=2),
ActionExecuted(ACTION_LISTEN_NAME, timestamp=3),
UserUttered("hello", timestamp=4),
BotUttered("Hey! How can I help you?", timestamp=5),
]
prior_tracker = DialogueStateTracker.from_events(
"different-session",
evts=prior_tracker_events,
)
new_session = [
ActionExecuted(ACTION_SESSION_START_NAME, timestamp=8),
SessionStarted(timestamp=9),
ActionExecuted(ACTION_LISTEN_NAME, timestamp=10),
UserUttered("I need help.", timestamp=11),
]
new_tracker = DialogueStateTracker.from_events(
"different-session",
evts=new_session,
)
actual_tracker = RedisTrackerStore._merge_trackers(prior_tracker, new_tracker)
expected_events = prior_tracker_events + new_session
assert list(actual_tracker.events) == expected_events
async def test_tracker_event_diff_engine_event_difference() -> None:
start_session_sequence = [
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
ActionExecuted(ACTION_LISTEN_NAME),
]
events: List[Event] = start_session_sequence + [UserUttered("hello")]
prior_tracker = DialogueStateTracker.from_events(
"same-session",
evts=events,
)
new_events = [BotUttered("Hey! How can I help you?")]
events += new_events
new_tracker = DialogueStateTracker.from_events(
"same-session",
evts=events,
)
event_diff = TrackerEventDiffEngine.event_difference(prior_tracker, new_tracker)
assert new_events == event_diff
|
import _wingpio as gpio
import time
led_pin_one = 5
led_pin_two = 6
sensor_pin = 4
pinOneValue = gpio.HIGH
pinTwoValue = gpio.LOW
gpio.setup(led_pin_one, gpio.OUT, gpio.PUD_OFF, gpio.HIGH)
gpio.setup(led_pin_two, gpio.OUT, gpio.PUD_OFF, gpio.HIGH)
def work():
count = 0
gpio.setup(sensor_pin, gpio.OUT)
gpio.output(sensor_pin, gpio.LOW)
time.sleep(0.1)
gpio.setup(sensor_pin, gpio.IN)
while (gpio.input(sensor_pin) == gpio.LOW):
count += 1
if (count > 10000):
gpio.output(led_pin_one, gpio.HIGH)
gpio.output(led_pin_two, gpio.LOW)
else:
gpio.output(led_pin_one, gpio.LOW)
gpio.output(led_pin_two, gpio.HIGH)
return count;
try:
while True:
work()
except KeyboardInterrupt:
pass
finally:
gpio.cleanup() |
# -*- coding: utf-8 -*-
"""URLs for default BEL resources.
This script is susceptible to rate limits from the GitHub API, so don't run it over and over!
"""
import logging
import os
from bel_resources.github import get_famplex_url, get_github_url
HERE = os.path.abspath(os.path.dirname(__file__))
logging.basicConfig(level=logging.DEBUG)
logging.getLogger("pybel").setLevel(logging.DEBUG)
def _get_conso_url(name):
return get_github_url(
owner="pharmacome",
repo="conso",
path="external/{}.belns".format(name),
)
keyword_to_suffix = dict(
chebi="chebi-names",
ec="ec-code",
fb="fb-names",
go="go-names",
hgnc="hgnc-names",
mesh="mesh-names",
mgi="mgi-names",
ncbigene="ncbigene-names",
rgd="rgd-names",
)
def main():
"""Update the resources links file."""
keyword_to_url = {keyword: _get_conso_url(suffix) for keyword, suffix in keyword_to_suffix.items()}
with open(os.path.join(HERE, "resources.py"), "w") as file:
print("# -*- coding: utf-8 -*-\n", file=file)
print('"""Resources for PyBEL."""\n', file=file)
for keyword, url in sorted(keyword_to_url.items()):
print("{}_URL = '{}'".format(keyword.upper(), url), file=file)
print("\nFPLX_URL = '{}'".format(get_famplex_url()), file=file)
print("\n#: Default URL lookup for some keywords", file=file)
print("keyword_to_url = dict(", file=file)
for k in sorted(keyword_to_suffix):
print(" {}={}_URL,".format(k, k.upper()), file=file)
print(" fplx=FPLX_URL,", file=file)
print(")", file=file)
if __name__ == "__main__":
main()
|
'''
import traditional assets
'''
## create name for financial tickers
tkr_fin = ['Stocks', 'Bonds', 'Gold']
## background information on stocks bonds gold
name_bonds = "Vanguard Total Bond Market ETF"
url_bonds = "https://www.morningstar.com/etfs/ARCX/BND/quote.html"
name_stocks = "SP500"
url_stocks = "https://stooq.pl/q/?s=^spx"
name_gold = "Commodity Futures Price Quotes for Gold (COMEX)"
url_gold = "https://www.nasdaq.com/markets/gold.aspx"
# https://stooq.pl/q/?s=iau.us is another possibel choihce for gold
## choose start date = first day of vcc datetime index
START0 = pri_vcc_mat.index[0]
END0 = pri_vcc_mat.index[-1]
## --------------------------------------------------------------------------------------
## old method: download data either online or offline
ONLINE_DOWNLOAD = False
if ONLINE_DOWNLOAD:
# download from stooql. example: https://stooq.pl/q/?s=^spx
stocks = web.DataReader('^SPX', 'stooq', START0)
bonds = web.DataReader('BND.US', 'stooq', START0)
gold = web.DataReader('GC.F', 'stooq', START0)
# concat price and volume matrix
pri_fin_mat = pd.concat([stocks.Close, bonds.Close, gold.Close], axis=1)
vol_fin_mat = pd.concat([stocks.Volume, bonds.Volume, gold.Volume], axis=1)
# trim size
pri_fin_mat = np.round(pri_fin_mat, 4)
pri_fin_mat = pri_fin_mat[START0:]
vol_fin_mat = vol_fin_mat[START0:]
# rename
pri_fin_mat.columns = tkr_fin
vol_fin_mat.columns = tkr_fin
# re index
pri_fin_mat = re_index_date(pri_fin_mat)
vol_fin_mat = re_index_date(vol_fin_mat)
# save to csv
pri_fin_mat.to_csv('object/pri_fin_mat.csv')
vol_fin_mat.to_csv('object/vol_fin_mat.csv')
if not ONLINE_DOWNLOAD:
# read
file_pri_fin = 'object/pri_fin_mat.csv'
file_vol_fin = 'object/vol_fin_mat.csv'
pri_fin_mat = pd.read_csv(file_pri_fin, index_col=0, parse_dates=True)
vol_fin_mat = pd.read_csv(file_vol_fin , index_col=0, parse_dates=True)
# re index
pri_fin_mat = re_index_date(pri_fin_mat)
vol_fin_mat = re_index_date(vol_fin_mat)
# slice date
pri_fin_mat = pri_fin_mat.loc[START0:END0]
vol_fin_mat = vol_fin_mat.loc[START0:END0]
## --------------------------------------------------------------------------------------
## new way of importing i think it is much better
sp500wei = pd.read_excel('notes2others/sp500weights.xlsx')
sp500wei = sp500wei[['Ticker', 'Weight']]
sp10 = sp500wei.nlargest(10, 'Weight')
tkr_sp10 = sp10.Ticker.tolist()
tkr_sp5 = tkr_sp10[0:5]
del sp10
# not tested. wrote on a plane.
# todo runt he code to test it
def ticker2pricevec(ticker, start, end, data_source='stooq'):
price_vector = web.DataReader(name=ticker, data_source=data_source,
start=start, end=end)
return price_vector['Close']
def tickerlist2pricemat(ticker_list, start, end, data_source='stooq', filepath=None):
# put prices in a list
pri_sp10_list = []
for tkr in tkr_sp10:
price_vector = ticker2pricevec(tkr, start=start, end=end,
data_source=data_source)
pri_sp10_list.append(price_vector)
# put prices in a matrix
pri_sp10_mat = pd.concat(pri_sp10_list, axis=1)
pri_sp10_mat.columns = tkr_sp10
# re index
pri_sp10_mat = re_index_date(pri_sp10_mat)
# save to csv
if filepath != None:
pri_sp10_mat.to_csv(filepath)
return pri_sp10_mat
FILEPATH_SP500 = 'object/pri_sp10_mat.csv'
FILEPATH_FIN = 'objcet/pri_fin_mat.csv'
if ONLINE_DOWNLOAD:
pri_sp10_mat = tickerlist2pricemat(ticker_list=tkr_sp10,
start=START0, end=END0,
filepath=FILEPATH_SP500)
pri_fin_mat = tickerlist2pricemat(ticker_list=['^SPX', 'BND.US', 'GC.F'],
start=START0, end=END0,
filepath=)
pri_fin_mat.columns = tkr_fin
else:
pri_sp10_mat = pd.read_csv(FILEPATH_SP500)
pri_fin_mat = pd.read_csv(FILEPATH_FIN)
# end of new importing metohd
## --------------------------------------------------------------------------------------
|
# -*- encoding: utf-8 -*-
"""
Created on Jun 14, 2012
@author: Steve Ivy <steveivy@gmail.com>
@co-author: yangming <yangming@appfirst.com>
http://www.appfirst.com
Updated for Python 3 May 14, 2014 by michael@appfirst.com
Python client for AppFirst Statsd+
this file expects local_settings.py to be in the same dir, with statsd host and
port information:
statsd_host = 'localhost'
statsd_port = 8125
Sends statistics to a stats daemon over UDP
Sends statistics to the AppFirst collector over POSIX MQ or Windows Mailslot
"""
import sys
import time
import random
import atexit
import threading
from socket import socket, AF_INET, SOCK_DGRAM
#---------------------------------------------------------------------------
# Default UDP Transport
#---------------------------------------------------------------------------
class UDPTransport(object):
def __init__(self, host='localhost', port=8125):
self.host = host
self.port = port
def emit(self, data):
"""
Send the metrics over UDP
"""
addr=(self.host, self.port)
udp_sock = socket(AF_INET, SOCK_DGRAM)
try:
for name, value in data.items():
send_data = "{0}:{1}".format(name, value.format_string(udp=True))
udp_sock.sendto(send_data, addr)
except Exception as e:
sys.stderr.write("Error emitting stats over UDP: {0.__class__.__name__}: {0}\n".format(e))
def close(self):
pass
#---------------------------------------------------------------------------
# Statsd Aggregator to buffer stats of the same bucket and dump them together
#---------------------------------------------------------------------------
class StatsdAggregator(object):
def __init__(self, interval, transport):
self.running = False
self.interval = interval
self.transport = transport
self.buf = {}
self.lock = threading.Lock()
self._service_thread = None
self.left_buffers = {} # 2 buffer groups, each stored in a dict
self.right_buffers = {} # one of each for each thread
self.rbufs = self.left_buffers # buffer group currently being read from
self.wbufs = self.right_buffers # buffer group currently being written to
def service_loop(self):
while self.running:
time.sleep(self.interval/2.0)
self.swap_buffers()
time.sleep(self.interval/2.0)
self.dump()
def start(self):
"""
Start aggregation
"""
if self.running:
return
else:
self.running = True
if self._service_thread == None:
self._service_thread = threading.Thread(target=self.service_loop)
self._service_thread.daemon = True
self._service_thread.start()
def stop(self):
"""
Stop aggregation
"""
if self.running:
self.running = False
self.dump()
self.swap_buffers()
self.dump()
def is_empty(self):
"""
Check if data in self.buf
"""
if self.buf:
return False
else:
return True
def add(self, bucket):
# is setdefault atomic (thread safe)? It's faster!
write_buffer = self.wbufs.setdefault(threading.currentThread(), {})
"""
if threading.currentThread() in self.wbufs:
write_buffer = self.wbufs[threading.currentThread()]
else:
#print "creating new write buffer for new thread"
write_buffer = {}
self.lock.acquire()
self.wbufs[threading.currentThread()] = write_buffer
self.lock.release()
"""
if bucket.name in write_buffer:
# aggregate if bucket is already in bucket
write_buffer[bucket.name].aggregate(bucket.stat)
else:
# otherwise add
write_buffer[bucket.name] = bucket
return
def dump(self):
"""
aggregate data across all read buffers and then emit
"""
send_buffer = {}
for th in self.rbufs:
read_buffer = self.rbufs[th]
for name, bucket in read_buffer.items():
if name in send_buffer:
send_buffer[name].aggregate(bucket.stat)
else:
send_buffer[name]=bucket
read_buffer.clear()
self.transport.emit(send_buffer)
def swap_buffers(self):
if self.rbufs == self.left_buffers:
self.rbufs = self.right_buffers
self.wbufs = self.left_buffers
else:
self.rbufs = self.left_buffers
self.wbufs = self.right_buffers
class Bucket(object):
def format_string(self, udp=False):
if udp:
return self._to_udp_string()
else:
return self._to_af_string()
def _to_udp_string(self):
raise NotImplementedError
def _to_af_string(self):
return self._to_udp_string()
class CounterBucket(Bucket):
def __init__(self, name, stat, rate=1):
self.name = name
self.stat = stat
self.rate = rate
def _to_udp_string(self):
return "{0}|c".format(self.stat)
def aggregate(self, stat):
"""
CounterBuckets are aggregated by adding new values to
the current value.
"""
# Note: This is non-standard. We should not divide this out,
# but instead send the semple rate upstream (with @rate)
self.stat += int(stat/self.rate)
class TimerBucket(Bucket):
def __init__(self, name, stat):
self.name = name
self.stat = [stat]
self.count = 1
def _to_af_string(self):
"""
Sending up the full list of values by default so AppFirst can calculate
the max/min during the interval as well.
"""
return "{0}|ms".format(','.join([str(n) for n in self.stat]))
def _to_udp_str(self):
"""
Only send up the average if emitting over UDP so we don't break existing
StatsD implementations.
"""
avg = sum(self.stat) / self.count
return "{0}|ms".format(avg)
def aggregate(self, stat):
"""
TimerBuckets are aggregated by adding new time values to the existing
time values and incrementing a counter used to get an average time.
"""
self.stat.extend(stat)
self.count += 1
class GaugeBucket(Bucket):
def __init__(self, name, stat):
self.name = name
self.stat = stat
self.timestamp=int(time.time())
def _to_udp_string(self):
return "{0}|g|{1}".format(self.stat, self.timestamp)
def aggregate(self, stat):
"""
GuageBuckets are updated by setting the current gauge value to the new
value. No actual aggregation is done.
"""
self.stat = stat
self.timestamp = int(time.time())
#---------------------------------------------------------------------------
# Statsd Client
#---------------------------------------------------------------------------
class Statsd(object):
_transport = UDPTransport()
_aggregator = StatsdAggregator(20, _transport)
@staticmethod
def set_transport(transport):
Statsd._transport.close()
Statsd._transport = transport
Statsd._aggregator.transport = transport
@staticmethod
def set_aggregation(should_aggregate):
if should_aggregate and not Statsd._aggregator.running:
Statsd._aggregator.start()
if not should_aggregate and Statsd._aggregator.running:
Statsd._aggregator.stop()
@staticmethod
def gauge(name, reading):
"""
Log gauge information
>>> from client import Statsd
>>> Statsd.gauge('some.gauge', 500)
"""
GaugeBucket(name, reading)
Statsd.send(GaugeBucket(name, reading))
@staticmethod
def timing(name, elapse):
"""
Log timing information
>>> from client import Statsd
>>> Statsd.timing('some.time', 500)
"""
Statsd.send(TimerBucket(name, int(round(elapse))))
@staticmethod
def increment(names, sample_rate=1):
"""
Increments one or more stats counters
>>> Statsd.increment('some.int')
>>> Statsd.increment('some.int', 0.5)
"""
Statsd.update_stats(names, 1, sample_rate)
@staticmethod
def decrement(names, sample_rate=1):
"""
Decrements one or more stats counters
>>> Statsd.decrement('some.int')
"""
Statsd.update_stats(names, -1, sample_rate)
@staticmethod
def update_stats(names, delta=1, sample_rate=1):
"""
Updates one or more stats counters by arbitrary amounts
>>> Statsd.update_stats('some.int', 10)
Sample rate is a decimal value representing the proportion of stats
to keep. For example, if sample_rate is 0.5, then 50% of stats will
be discarded. Default value is 1 and does not discard any stats.
"""
if sample_rate < 1 and random.random() > sample_rate:
return
if not isinstance(names, list):
names = [names]
for name in names:
Statsd.send(CounterBucket(name, int(round(delta)), sample_rate))
@staticmethod
def send(bucket):
if Statsd._aggregator.running:
Statsd._aggregator.add(bucket)
else:
bucket = {bucket.name: bucket}
Statsd._transport.emit(bucket)
@staticmethod
def flush(buf):
Statsd._transport.emit(buf.dump())
@staticmethod
def shutdown():
Statsd._aggregator.stop()
Statsd._transport.close()
@staticmethod
def time(name, enabled=True):
"""
Function Decorator to report function execution time.
>>> @Statsd.time("some.timer.bucket")
>>> def some_func():
>>> pass # do something
"""
def wrap_timer(method):
if not enabled:
return method
def send_statsd(*args, **kwargs):
start = time.time()
result = method(*args, **kwargs)
duration = (time.time() - start) * 1000
Statsd.timing(name, duration)
return result
return send_statsd
return wrap_timer
@staticmethod
def count(name, sample_rate=1, enabled=True):
"""
Function Decorator to count how many times a function is invoked.
>>> @Statsd.count("some.counter.bucket")
>>> def some_func():
>>> pass #do something
"""
def wrap_counter(method):
if not enabled:
return method
def send_statsd(*args, **kwargs):
result = method(*args, **kwargs)
Statsd.increment(name, sample_rate)
return result
return send_statsd
return wrap_counter
# shutdown automatically on application exit...
atexit.register(Statsd.shutdown)
|
# -*- coding: utf-8 -*-
# @Time : 2020/7/25 15:27
# @Author : wangmengmeng
while True:
try:
print(int(input(),8))
except:
break |
# -*- coding: utf-8 -*-
"""
***************************************************************************
MonthlyMean.py
---------------------
Date : December 2012
Copyright : (C) 2012 by Riccardo Lemmi
Email : riccardo at reflab dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Riccardo Lemmi'
__date__ = 'December 2012'
__copyright__ = '(C) 2012, Riccardo Lemmi'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from PyQt4 import QtGui
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.outputs import OutputRaster
from processing.algs.gdal.GdalUtils import GdalUtils
from processing.core.parameters import ParameterRange
from processing.core.parameters import ParameterNumber
from processing.core.parameters import ParameterRaster
import sys
from osgeo import gdal
import numpy
import numpy.ma as ma
import calendar
class MonthUtils:
MONTHS = range(1,13)
def _compute_weeks_for_months(self, current_year):
weeks_in_months = []
weekCounter = 0
for month_index in self.MONTHS:
monthcalendar = calendar.monthcalendar(current_year, month_index)
days_in_week = [len([1 for day in weeks if day]) for weeks in monthcalendar]
# if a weekly chuck has more than 4 days we'll keep it and count it as a full week
weeks_in_current_month = sum(1 for daycount in days_in_week if daycount > 3)
# since the assumption of 4 days is somewhat random, let's doublecheck
# the number of weeks
weekCounter = weekCounter + weeks_in_current_month
if (weekCounter > 52):
weeks_in_months.append(52 - (weekCounter - weeks_in_current_month))
else:
weeks_in_months.append(weeks_in_current_month)
weekIndex = 1
week_indexes_for_months = []
for index in self.MONTHS:
week_indexes_for_months.append([])
for _ in range(weeks_in_months[index-1]):
# add each week into the monthly raster
week_indexes_for_months[index-1].append(weekIndex)
weekIndex += 1
return week_indexes_for_months
def weeks_for(self, year, month):
return dict(zip(self.MONTHS, self._compute_weeks_for_months(year)))[month]
utils = MonthUtils()
class Mean:
def __init__(self, input_path, output_path, number_bandsOut=1):
gdal.AllRegister()
self.imageIn = gdal.Open(str(input_path))
self.rows = self.imageIn.RasterYSize
self.cols = self.imageIn.RasterXSize
self.data_type = self.imageIn.GetRasterBand(1).DataType
driver = self.imageIn.GetDriver()
self.imageOut = driver.Create(
str(output_path),
self.cols,
self.rows,
number_bandsOut,
self.data_type)
def _saveMean(self, band_number, mean):
# create the output image
bandOut = self.imageOut.GetRasterBand(band_number)
bandOut.SetNoDataValue(-3.4e+38)
bandOut.SetStatistics(
self.min,
self.max,
numpy.mean([self.max, self.min]),
self.std)
bandOut.WriteArray(mean, 0, 0)
bandOut.FlushCache()
def compute(self, band_numbers, band_out=1):
#
bandsIn = [self.imageIn.GetRasterBand(n) for n in band_numbers]
minimum = min([band.GetMinimum() for band in bandsIn])
datas = [band.ReadAsArray(0, 0, self.cols, self.rows) for band in bandsIn]
t_mean = sum(datas)/len(datas)
# fix no data values
mask = numpy.greater_equal(t_mean, minimum)
self.mean = numpy.choose(mask, (-3.4e+38, t_mean))
# stats
masked_mean = ma.masked_less_equal(self.mean, -3.4e+38)
self.min = float(masked_mean.min())
self.max = float(masked_mean.max())
self.std = numpy.std(masked_mean)
self._saveMean(band_out, self.mean)
def __enter__(self):
return self
def __exit__(self,exc_type, exc_val, exc_tb):
# set the geotransform and projection on the output
self.imageOut.SetGeoTransform(self.imageIn.GetGeoTransform())
self.imageOut.SetProjection(self.imageIn.GetProjection())
## build pyramids for the output
gdal.SetConfigOption('HFA_USE_RRD', 'YES')
self.imageOut.BuildOverviews(overviewlist=[2,4,8,16])
class MonthlyMean(GeoAlgorithm):
INPUT = "INPUT"
OUTPUT = "OUTPUT"
YEAR = "YEAR"
MONTH = "MONTH"
def defineCharacteristics(self):
self.name = "Monthly Mean"
self.group = "[permaclim] Mix"
self.addParameter(ParameterRaster(MonthlyMean.INPUT, "Weekly mean layer"))
self.addParameter(ParameterNumber(MonthlyMean.YEAR, "Year of the layer", default=2001))
self.addParameter(ParameterNumber(MonthlyMean.MONTH, "Month", 1, 12, 1))
self.addOutput(OutputRaster(MonthlyMean.OUTPUT, "Layer with the mean of the choosed month"))
def processAlgorithm(self, progress):
input_path = self.getParameterValue(MonthlyMean.INPUT)
output_path = self.getOutputValue(MonthlyMean.OUTPUT)
year = self.getParameterValue(MonthlyMean.YEAR)
month = self.getParameterValue(MonthlyMean.MONTH)
with Mean(input_path, output_path) as mm:
mm.compute(utils.weeks_for(year, month))
|
import sys
sys.path.append("./python/")
import os
import json
import random
import string
from flask import Flask, request, render_template, jsonify
from ips_python.script import process_query
from ips_python.constants import (
VECTORIZER_FILENAME,
TERM_DOCUMENT_MATRIX_FILENAME,
PROCESSED_RECORDS_FILENAME,
INPUT_DATA_FILENAME,
)
import pickle
from os.path import join, dirname
from ips_python.utils import get_data_path
import pandas as pd
import requests
from dotenv import load_dotenv
from flask_wtf import FlaskForm
from wtforms.fields import TextAreaField, RadioField, SubmitField
from wtforms.validators import DataRequired
dotenv_path = join(dirname(dirname(__file__)), ".env")
load_dotenv(dotenv_path)
environment = os.getenv("FLASK_ENV", "development").lower()
app = Flask(__name__)
if environment == "production":
app.secret_key = os.getenv("APP_SECRET_KEY")
else:
app.secret_key = "".join(random.choice(string.ascii_lowercase) for i in range(10))
with open(join(get_data_path(), VECTORIZER_FILENAME), "rb") as _file:
vectorizer = pickle.load(_file)
with open(join(get_data_path(), TERM_DOCUMENT_MATRIX_FILENAME), "rb") as _file:
term_document_matrix = pickle.load(_file)
processed_iati_records = pd.read_csv(
join(get_data_path(), PROCESSED_RECORDS_FILENAME), encoding="iso-8859-1"
)
full_iati_records = pd.read_csv(
join(get_data_path(), INPUT_DATA_FILENAME), encoding="iso-8859-1"
)
class SearchForm(FlaskForm):
search_method = RadioField(
"Search method: ",
choices=[("cosine", "Cosine Similarity"), ("elastic", "Elasticsearch")],
default="cosine",
)
search = TextAreaField("Query:", validators=[DataRequired()])
submit = SubmitField("Search")
class Meta:
csrf = False
def get_elasticsearch_results(query):
url = os.getenv("ELASTICSEARCH_URL") + "/_search"
payload = {
"query": {
"more_like_this": {
"fields": ["title", "description"],
"like": query,
"min_term_freq": 1,
"max_query_terms": 30,
}
}
}
headers = {"Content-Type": "application/json"}
response = requests.get(url, data=json.dumps(payload), headers=headers).json()
return response["hits"]["hits"]
def get_cosine_results(query):
return process_query(
query,
vectorizer,
term_document_matrix,
processed_iati_records,
full_iati_records,
).to_dict("records")
@app.route("/", methods=["POST", "GET"])
# @app.route("/search")
def home():
form = SearchForm(request.form)
if request.method == "POST":
if form.validate():
results = None
search_type = form.data["search_method"]
if search_type == "cosine":
results = get_cosine_results(form.data["search"])
else:
results = get_elasticsearch_results(form.data["search"])
return render_template(
"index.html", form=form, results=results, result_type=search_type
)
return render_template("index.html", form=form)
if __name__ == "__main__":
if environment == "development":
app.run(debug=True)
elif environment == "production":
app.run(debug=False)
|
from PyQt4 import QtGui, QtCore
class TextChangingButton(QtGui.QPushButton):
"""Button that changes its text to ON or OFF and colors when it's pressed"""
def __init__(self, parent = None):
super(TextChangingButton, self).__init__(parent)
self.setCheckable(True)
self.setFont(QtGui.QFont('MS Shell Dlg 2',pointSize=10))
self.setSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum)
#connect signal for appearance changing
self.toggled.connect(self.setAppearance)
self.setAppearance(self.isDown())
def set_value_no_signal(self, down):
self.blockSignals(True)
self.setChecked(down)
self.setAppearance(down)
self.blockSignals(False)
def setAppearance(self, down):
if down:
self.setText('I')
self.setPalette(QtGui.QPalette(QtCore.Qt.darkGreen))
else:
self.setText('O')
self.setPalette(QtGui.QPalette(QtCore.Qt.black))
def sizeHint(self):
return QtCore.QSize(37, 26) |
from astropy.io import ascii
import matplotlib.pyplot as plt
tabla1=[]
tabla2=[]
tabla3=[]
tabla4=[]
tabla5=[]
tabla6=[]
tabla7=[]
tabla8=[]
tabla9=[]
tabla10=[]
tabla11=[]
tabla12=[]
tabla13=[]
tabla14=[]
tabla15=[]
tabla16=[]
tabla17=[]
tabla18=[]
tabla19=[]
tabla20=[]
tabla21=[]
tabla22=[]
tabla23=[]
tabla24=[]
tabla25=[]
tabla26=[]
tabla27=[]
tabla28=[]
tabla1=ascii.read('Valores_Mediana_Radio_Halo')
distancia1=tabla1['Distancia']
densidad1=tabla1['Densidad']
tabla2=ascii.read('Valores_Percentil10_Radio_Halo')
distancia2=tabla2['Distancia']
densidad2=tabla2['Densidad']
tabla3=ascii.read('Valores_Percentil90_Radio_Halo')
distancia3=tabla3['Distancia']
densidad3=tabla3['Densidad']
tabla4=ascii.read('Valores_Percentil16_Radio_Halo')
distancia4=tabla4['Distancia']
densidad4=tabla4['Densidad']
tabla5=ascii.read('Valores_Percentil84_Radio_Halo')
distancia5=tabla5['Distancia']
densidad5=tabla5['Densidad']
tabla6=ascii.read('Valores_Mediana_Control_Halo')
distancia6=tabla6['Distancia']
densidad6=tabla6['Densidad']
tabla7=ascii.read('Valores_Percentil10_Control_Halo')
distancia7=tabla7['Distancia']
densidad7=tabla7['Densidad']
tabla8=ascii.read('Valores_Percentil90_Control_Halo')
distancia8=tabla8['Distancia']
densidad8=tabla8['Densidad']
tabla9=ascii.read('Valores_Percentil16_Control_Halo')
distancia9=tabla9['Distancia']
densidad9=tabla9['Densidad']
tabla10=ascii.read('Valores_Percentil84_Control_Halo')
distancia10=tabla10['Distancia']
densidad10=tabla10['Densidad']
tabla11=ascii.read('Valores_Mediana_Radio_Estelar')
distancia11=tabla11['Distancia']
densidad11=tabla11['Densidad']
tabla12=ascii.read('Valores_Percentil10_Radio_Estelar')
distancia12=tabla12['Distancia']
densidad12=tabla12['Densidad']
tabla13=ascii.read('Valores_Percentil90_Radio_Estelar')
distancia13=tabla13['Distancia']
densidad13=tabla13['Densidad']
tabla14=ascii.read('Valores_Percentil16_Radio_Estelar')
distancia14=tabla14['Distancia']
densidad14=tabla14['Densidad']
tabla15=ascii.read('Valores_Percentil84_Radio_Estelar')
distancia15=tabla15['Distancia']
densidad15=tabla15['Densidad']
tabla16=ascii.read('Valores_Mediana_Control_Estelar')
distancia16=tabla16['Distancia']
densidad16=tabla16['Densidad']
tabla17=ascii.read('Valores_Percentil10_Control_Estelar')
distancia17=tabla17['Distancia']
densidad17=tabla17['Densidad']
tabla18=ascii.read('Valores_Percentil90_Control_Estelar')
distancia18=tabla18['Distancia']
densidad18=tabla18['Densidad']
tabla19=ascii.read('Valores_Percentil16_Control_Estelar')
distancia19=tabla19['Distancia']
densidad19=tabla19['Densidad']
tabla20=ascii.read('Valores_Percentil84_Control_Estelar')
distancia20=tabla20['Distancia']
densidad20=tabla20['Densidad']
tabla21=ascii.read('Valores_Percentil25_Control_Estelar')
distancia21=tabla21['Distancia']
densidad21=tabla21['Densidad']
tabla22=ascii.read('Valores_Percentil75_Control_Estelar')
distancia22=tabla22['Distancia']
densidad22=tabla22['Densidad']
tabla23=ascii.read('Valores_Percentil25_Radio_Estelar')
distancia23=tabla23['Distancia']
densidad23=tabla23['Densidad']
tabla24=ascii.read('Valores_Percentil75_Radio_Estelar')
distancia24=tabla24['Distancia']
densidad24=tabla24['Densidad']
tabla25=ascii.read('Valores_Percentil25_Radio_Halo')
distancia25=tabla25['Distancia']
densidad25=tabla25['Densidad']
tabla26=ascii.read('Valores_Percentil75_Radio_Halo')
distancia26=tabla26['Distancia']
densidad26=tabla26['Densidad']
tabla27=ascii.read('Valores_Percentil25_Control_Halo')
distancia27=tabla27['Distancia']
densidad27=tabla27['Densidad']
tabla28=ascii.read('Valores_Percentil75_Control_Halo')
distancia28=tabla28['Distancia']
densidad28=tabla28['Densidad']
#----------------------------------------------MASA HALO -------------------------------------------------------------------------------------
#1) Radio galaxias:
#plt.plot(distancia1,densidad1,color='red',label='Radio Galaxias: Mediana')
#plt.fill_between(distancia1, densidad2, densidad3,color='orange',label='Percentiles de Radio Galaxias: [Cota superior $\mathrm{P_{90}}$ : Cota inferior $\mathrm{P_{10}}$]',alpha=0.5)
#plt.fill_between(distancia1, densidad4, densidad5,color='orange',label='Percentiles de Radio Galaxias: [Cota superior $\mathrm{P_{84}}$ : Cota inferior $\mathrm{P_{16}}$]',alpha=0.5)
#plt.fill_between(distancia1, densidad25, densidad26,color='orange',label='Percentiles de Radio Galaxias: [Cota superior $\mathrm{P_{75}}$ : Cota inferior $\mathrm{P_{25}}$]',alpha=0.5)
#2) Sin Radio:
#plt.plot(distancia6,densidad6,color='blue',label='Galaxias Control $\mathrm{M_{Halo}}$: Mediana')
#plt.fill_between(distancia6, densidad7, densidad8,color='blue',label='Percentiles de Muestra Control: [Cota superior $\mathrm{P_{90}}$ : Cota inferior $\mathrm{P_{10}}$]',alpha=0.35)
#plt.fill_between(distancia6, densidad9, densidad10,color='blue',label='Percentiles de Muestra Control: [Cota superior $\mathrm{P_{84}}$ : Cota inferior $\mathrm{P_{16}}$]',alpha=0.35)
#plt.fill_between(distancia6, densidad27, densidad28,color='blue',label='Percentiles de Muestra Control: [Cota superior $\mathrm{P_{75}}$ : Cota inferior $\mathrm{P_{25}}$]',alpha=0.35)
#-----------------------------------------------MASA ESTELAR----------------------------------------------------------------------------------
#1) Radio galaxias:
plt.plot(distancia11,densidad11,color='red',label='Radio Galaxias: Mediana')
#plt.fill_between(distancia11, densidad12, densidad13,color='orange',label='Percentiles de Radio Galaxias: [Cota superior $\mathrm{P_{90}}$ : Cota inferior $\mathrm{P_{10}}$]',alpha=0.5)
#plt.fill_between(distancia11, densidad14, densidad15,color='orange',label='Percentiles de Radio Galaxias: [Cota superior $\mathrm{P_{84}}$ : Cota inferior $\mathrm{P_{16}}$]',alpha=0.5)
plt.fill_between(distancia11, densidad23, densidad24,color='orange',label='Percentiles de Radio Galaxias: [Cota superior $\mathrm{P_{75}}$ : Cota inferior $\mathrm{P_{25}}$]',alpha=0.5)
#2) Sin Radio:
plt.plot(distancia16,densidad16,color='blue',label='Galaxias Control $\mathrm{M_{Estelar}}$: Mediana')
#plt.fill_between(distancia16, densidad17, densidad18,color='blue',label='Percentiles de Muestra Control: [Cota superior $\mathrm{P_{90}}$ : Cota inferior $\mathrm{P_{10}}$]',alpha=0.35)
#plt.fill_between(distancia16, densidad19, densidad20,color='blue',label='Percentiles de Muestra Control: [Cota superior $\mathrm{P_{84}}$ : Cota inferior $\mathrm{P_{16}}$]',alpha=0.35)
plt.fill_between(distancia16, densidad21, densidad22,color='blue',label='Percentiles de Muestra Control: [Cota superior $\mathrm{P_{75}}$ : Cota inferior $\mathrm{P_{25}}$]',alpha=0.35)
#---------------------------------------------------LEYENDA--------------------------------------------------------------------------------
plt.legend(loc="upper right",prop={'size':12})
plt.suptitle('Densidad alrededor de galaxias centrales\n z=0 \n', fontsize=18)
plt.title('Radio Galaxias vs No Radio Galaxias', fontsize=12)
#plt.title('Densidad', fontsize=12)
plt.ylabel('$\mathrm{log_{10}(\delta){[(Mpc/h)^{-3}]}}$',fontsize=17)
plt.xlabel('$\mathrm{r{[(Mpc/h)]}}$',fontsize=17)
plt.show()
|
#!/usr/bin/env python
import unittest
from flysight_manager import config
import flysight_manager.log
flysight_manager.log.suppress_logs()
class TestableConfiguration(config.Configuration):
CONFIG_FILE = 'flysight-manager.ini.example'
class TestConfigParser(unittest.TestCase):
def test_config(self):
# This is hella sideeffectful
TestableConfiguration()
|
from django.contrib import admin
from .models import Revendedor
class RevendedorAdmin(admin.ModelAdmin):
list_display = ['nome', 'cpf', 'email']
list_filter = ('nome', 'cpf', 'email')
search_fields = ('nome', 'cpf', 'email')
ordering = ['nome', 'cpf', 'email']
save_as = True
admin.site.register(Revendedor, RevendedorAdmin)
|
import csv
import numpy as np
from classes_needed import *
def main():
# set initial parameters
robotSpeed = 3
# Create an initial matrix for shape and scale values for different zones and times
# col = zone, row = hours
los_matrix = np.zeros((1, 2), dtype='f,f').tolist()
# _____parameters of the gamma distribution; order -> shape outer zone, shape inner zone
los_matrix[0] = [(1, 1), (4, 1)]
print("Outer zone shape is {} and inner zone shape is {}".format(los_matrix[0][0][0], los_matrix[0][1][0]))
idex = 1
# how about create a list with a set of csv files and then run the alg over all of them
# csv_list = ['1.csv' , '2.csv', '3.csv', '4.csv', '5.csv', '6.csv', '7.csv', '8.csv', '9.csv', '10.csv']
csv_list = ['new_data_c_20_p_35_1.csv', 'new_data_c_20_p_35_2.csv', 'new_data_c_20_p_35_3.csv',
'new_data_c_20_p_35_4.csv', 'new_data_c_20_p_35_5.csv', 'new_data_c_20_p_35_6.csv',
'new_data_c_20_p_35_7.csv',
'new_data_c_20_p_35_8.csv', 'new_data_c_20_p_35_9.csv', 'new_data_c_20_p_35_10.csv']
for item in csv_list:
# read in customers
print('new_data_c_50_p_35' + '_' + str(idex))
idex += 1
custList = []
with open(item, 'r') as file:
reader = csv.reader(file)
for row in reader:
custList.append(Customer(int(row[0]), float(row[1]), float(row[2]), float(row[3]), float(row[4])))
# create depot with specified number of robots. Perhaps I don't really need a class-> dict would work
depot = Depot(0, 0, 0)
depot.setNumberOfRobots(3)
# create a corresponding RoutePlan based on the number of robots in the depot
routePlan = []
# create a copy of customers for the tabu search
custList_tabu = copy.deepcopy(custList)
# create a list of routes with the number of empty routes corresponding to the number of robots in the depot
for elem in range(depot.getNumberRobots()):
routePlan.append(Route(elem))
depotCoords = [1.2, 1.7]
zoneCoords = [.3, .6, 1.7, 1.3]
# create a matrix of distances from depot(s) to customers
distances_raw = all_distances(depotCoords, custList, zoneCoords)
# as of now, distances contain (distance, shape coeff)
distances, shapes, path_indices, best_paths = dist_matr_trim(distances_raw, los_matrix, custList)
# print(distances_raw)
# print(distances)
# print(path_indices)
# print(best_paths)
# get total distances to customers in km as np array
best_paths_distance_combined = []
for index, elem in enumerate(best_paths):
if elem == 0:
best_paths_distance_combined.append(elem)
else:
best_paths_distance_combined.append(elem[0] + elem[1])
# at this point all distances are one-way distances. They are needed to be multiplied by 2 to get the full
# picture
best_paths_distance_combined = np.asarray(best_paths_distance_combined)
# potentially there is a need to further diversify the customer and paths to see what customer is in what zone
# print the output data:
# total distance
# total expected travel time
# paths selected
# total_exp_travel_time = 0
total_exp_distance = 0
# stub = distances[0] - 1 / 30
# stub[0] = 0
# total_exp_travel_time = sum(stub) * 2
total_exp_distance = sum(best_paths_distance_combined) * 2
# initialization alg
# After having a list of customers and routes I need to create initial set of routes filed with customers.
# Step 1: take empty routes. For all customers check all insertion positions based on the measures and insert
# those customers 1 by 1.
for i in range(0, len(custList)):
# print(len(custList))
obj_fun_change = float("inf")
# for every customer and route
for customer in custList:
for route in routePlan:
# for every position
for position, elem in enumerate(route.currentRoute, 1):
# compute measures before and after the customer is inserted
prev_lateness = route.total_lateness()
prev_earliness = route.total_earliness()
route.insert_customer_v_2(position, customer, distances, shapes, los_matrix[0][0][1])
curr_lateness = route.total_lateness()
curr_earliness = route.total_earliness()
# change in the earl\laten
change_in_measures = curr_earliness - prev_earliness + curr_lateness - prev_lateness
# update the insertion location if needed
if change_in_measures < obj_fun_change:
cust_to_ins = customer
route_to_ins = route.id
pos_to_ins = position
# need to update upper bound on the measure to make sure it updates when needed, not always
obj_fun_change = change_in_measures
# once everything is computed and compared, restore the route
route.remove_customer_v_2(position, los_matrix[0][0][1])
# after all customers and positions are checked finally insert a customer into a route
routePlan[route_to_ins].insert_customer_v_2(pos_to_ins, cust_to_ins, distances, shapes, los_matrix[0][0][1])
# and remove a customer from a list of initial customers
custList.remove(cust_to_ins)
# need to run this tabu search
final_ans = tabu_search(custList_tabu, distances, routePlan, shapes, los_matrix[0][0][1])
# results outputs
best_paths = best_paths[1:]
# consider total travelled inner and outer distances
total_exp_distance_inner = 0
total_exp_distance_outer = 0
for elem in best_paths:
total_exp_distance_inner += elem[1]
total_exp_distance_outer += elem[0]
print("Traveled distance INside the zone is {} ".format(2 * total_exp_distance_inner))
print("Traveled distance OUTside the zone is {} ".format(2 * total_exp_distance_outer))
print("Total distance traveled is {} ".format(total_exp_distance))
# need to compute expected travel times in inner/outer zones
total_exp_travel_time_inner = 0
total_exp_travel_time_outer = 0
for elem in best_paths:
total_exp_travel_time_inner += elem[1] * los_matrix[0][1][0]
total_exp_travel_time_outer += elem[0] * los_matrix[0][0][0]
total_exp_travel_time_inner = 2 * total_exp_travel_time_inner / robotSpeed
total_exp_travel_time_outer = 2 * total_exp_travel_time_outer / robotSpeed
print("Expected inner travel time is {:.6} ".format(total_exp_travel_time_inner))
print("Expected outer travel time is {:.6} ".format(total_exp_travel_time_outer))
total_exp_travel_time = 0
# for each elem in the best path compute corresponding total expected travel times
for elem in best_paths:
total_exp_travel_time += elem[0] * los_matrix[0][0][0] + elem[1] * los_matrix[0][1][0]
# to get a total expected travel time I need to multiply it by 2
total_exp_travel_time = 2 * total_exp_travel_time / robotSpeed
print("Total expected travel time is {:.6} ".format(total_exp_travel_time))
print("Paths selected are {} ".format(path_indices))
print("Final set of routes: ", final_ans[0][0])
t_earl = 0
t_lateness = 0
# Total earliness by routes
for elem in final_ans[0][0]:
t_earl += elem.total_earliness()
print('Percentage of earliness is {:.4}'.format(t_earl / final_ans[0][1]))
# Total lateness by routes
for elem in final_ans[0][0]:
t_lateness += elem.total_lateness()
print('Percentage of lateness is {:.4}'.format(t_lateness / final_ans[0][1]))
print("Total obj function is {:.6} ".format(final_ans[0][1]))
'''last parameter for all shiftings is waiting time. Currently in mins'''
# evaluate shiftings
after_shift_obj, after_shift_ids, total_after_shift_obj = whole_route_shift(final_ans[0][0],
los_matrix[0][0][1],
5)
print("Objective function after a whole route shift {:.6} ".format(total_after_shift_obj))
by_cust_shift_per_route, obj_value_after_fwd_shift, percent_early, percent_late = forward_shifting(
final_ans[0][0], los_matrix[0][0][1], 5)
print("Objective function after a forward shift shift {:.6} ".format(obj_value_after_fwd_shift))
print('Percentage of earliness is {:.4}'.format(percent_early))
print('Percentage of lateness is {:.4}'.format(percent_late))
print("Shifts used: ", by_cust_shift_per_route)
if __name__ == "__main__":
main()
|
#!/usr/bin/python
import sys
import re
SYNSETS_MAP_PATH = "/home/robin/Documents/NUS/Lectures/sem_3/Advanced_AI/Project/Data/Images/synsets.txt"
NAMES = sys.argv[1:]
f = open(SYNSETS_MAP_PATH, "r")
text = f.read()
f.close()
f = open("synsets.txt", "w")
synsets = []
for name in NAMES:
p = re.compile("n[0-9]+ %s" % name)
line_match = p.search(text).group()
synset = line_match.split(" ")[0]
f.write(synset + '\n')
f.close() |
places = {
'an':'andaman-nicobar-islands-an-',
'ap':'andhra-pradesh-ap-',
'ar':'arunachal-pradesh-ar-',
'cg':'chhattisgarh-cg-',
'ch':'chandigarh-ch-',
'dd':'daman-and-diu-dd-',
'dl':'delhi-dl-',
'dn':'dadra-nagar-haveli-dn-',
'ga':'goa-ga-',
'gj':'gujarat-gj-',
'hp':'himachal-pradesh-hp-',
'hr':'haryana-hr-',
'jh':'jharkhand-jh-',
'jk':'jammu-kashmir-jk-',
'ka':'karnataka-ka-',
'kl':'kerala-kl-',
'ld':'lakshadweep-ld-',
'mh':'maharashtra-mh-',
'ml':'meghalaya-ml-',
'mn':'manipur-mn-',
'mp':'madhya-pradesh-mp-',
'mz':'mizoram-mz-',
'nl':'nagaland-nl-',
'od':'odisha-od-',
'or':'odisha-od-',
'pb':'punjab-pb-',
'py':'puducherry-py-',
'rj':'rajasthan-rj-',
'sk':'sikkim-sk-',
'tn':'tamil-nadu-tn-',
'tr':'tripura-tr-',
'ts':'telangana-ts-',
'uk':'uttarakhand-uk-',
'up':'uttar-pradesh-up-',
'wb':'west-bengal-wb-'
} |
from django.urls import path
from . import views
urlpatterns = [
path('v1/productos/', views.ProductoList.as_view(), name='producto_list_api'),
path('v1/productos/<str:codigo>', views.ProductoDetalle.as_view(), name='producto_detalle'),
] |
# Generated by Django 2.2.2 on 2019-06-20 19:04
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('posts', '0003_auto_20190617_1811'),
]
operations = [
migrations.AlterModelOptions(
name='comment',
options={'ordering': ['created_date']},
),
migrations.AlterModelOptions(
name='post',
options={'ordering': ['-created_date']},
),
migrations.AlterField(
model_name='comment',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='comment',
name='text',
field=models.TextField(help_text='Text'),
),
migrations.AlterField(
model_name='post',
name='text',
field=models.TextField(help_text='Text', max_length=300),
),
migrations.AlterField(
model_name='post',
name='title',
field=models.CharField(help_text='Title', max_length=50),
),
]
|
import os
import fnmatch
FOLDER = '/haproxy'
root = os.getcwd() + FOLDER
print(root)
for path, subdirs, files in os.walk(root):
for name in files:
# print(name)
if fnmatch.fnmatch(name, '*.c'):
path = os.path.join(path, name)
# print(name)
try:
# f = open(path,'rb')
matching = True
found = []
with open(path, 'r') as file:
it = iter(file)
for line in it:
# print(line)
if matching:
if line.strip() == '':
break
else:
found.append(line)
elif line.endswith('PATTERN:'):
for _ in range(6):
next(it)
matching = True
for line in found:
print(line)
except:
pass
|
import plotly.figure_factory as ff
import statistics
import random
import pandas as pd
import csv
df = pd.read_csv("data.csv")
data = df["temp"].tolist()
#fig = ff.create_distplot([data], ["name"], show_hist=False)
#fig.show()
pm = statistics.mean(data)
ptsd = statistics.stdev(data)
#print(pm)
#print(ptsd)
def randomsetofmean():
dataset = []
for i in range(0, 100):
randomindex = random.randint(0, len(data) - 1)
value = data[randomindex]
dataset.append(float(value))
mean = statistics.mean(dataset)
return mean
def showfig(meanlist):
df = meanlist
fig = ff.create_distplot([df], ["temp"], show_hist=False)
fig.show()
fmean = statistics.mean(meanlist)
print(fmean)
fs = statistics.stdev(meanlist)
print(fs)
def setup():
meanlist = []
for i in range(0, 1000):
setmean = randomsetofmean()
meanlist.append(setmean)
showfig(meanlist)
setup() |
# checkio.py
def non_unique(data):
freqDist = {}
for val in set(data):
freqDist[val] = 0
for val in data:
freqDist[val] += 1
for val, freq in freqDist.items():
if freq == 1:
del(data[data.index(val)])
def roman_numeral(data):
values = [1000, 900, 500, 400, 100,
90, 50, 40, 10, 9, 5, 4, 1]
symbols = ['M', 'CM', 'D', 'CD', 'C',
'XC', 'L', 'XL', 'X', 'IX', 'V', 'IV', 'I']
roman = ''
for i in range(len(values)):
result = data/values[i]
if result >= 1:
roman += symbols[i]*int(result)
data = data%values[i]
return roman
def grille_cipher(grille, cipher):
password = ''
for i in range(4):
for j in range(4):
for k in range(4):
if grille[j][k] == 'X':
password += cipher[j][k]
grille = list(zip(*grille[::-1]))
return password
def safe_pawns(pawns):
# a pawn is safe if there is another one row up and one column left/right
safePawns = 0
for pawn in pawns:
# calculate required positions for the safety of that space
col = pawn[0]
row = int(pawn[1])
safeSpaces = [chr(ord(col)+1)+str(row-1), chr(ord(col)-1)+str(row-1)]
# determine if the safe spaces are included in list of pawns
if safeSpaces[0] in pawns or safeSpaces[1] in pawns:
# if so, the pawn we are testing is safe
safePawns += 1
return safePawns
def main():
# data = [1,2,3,2,1]
# print(non_unique(data))
# data = 3999
# print(roman_numeral(data))
# grille = ('X...', '..X.', 'X..X', '....')
# cipher = ('itdf', 'gdce', 'aton', 'qrdi')
# print(grille_cipher(grille, cipher))
pawns = {"b4", "d4", "f4", "c3", "e3", "g5", "d2"}
print(safe_pawns(pawns))
if __name__ == '__main__':
main()
|
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
DEBUGE = False
TESTING = False
SECRET_KEY = "12345"
#SQLALCHEMY_DATABASE_URL = os.environment['DATABASE_URL']
|
#!/usr/bin/env python3
from librip.gens import gen_random
from librip.iterators import Unique
data0 = [1, 1, 1, 1, 1, 2, 2, 2, 2, 2]
data1 = gen_random(1, 3, 10)
# Реализация задания 2
data2= ['a', 'A', 'A', 'a', 'C', 'a', 'a', 'b', 'b', 'b', 'b', 'b']
d0 = Unique(data0)
for x in d0:
print(x, end=', ')
print('\n')
d1 = Unique(list(data1))
for x in d1:
print(x, end=', ')
print('\n')
d2 = Unique(data2, ignore_case=True)
for x in d2:
print(x, end=', ')
print('\n')
d3 = Unique(data2)
for x in d3:
print(x, end=', ')
print('\n') |
import json
with open('./problemset.json','r',encoding='utf8')as fp:
json_data = json.load(fp);
result=json_data["result"];
problems=result["problems"];
problemStatistics=result["problemStatistics"];
cnt=0;
for i in problems:
cnt+=1;
print(cnt,i); |
import os, sys
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
from MenuOfChoices import MenuOfChoices
MENU_OF_CHOICES = MenuOfChoices()
class FinancesUI():
def __init__():
pass
def set_sale_to_emp(Finances):
emp_id = int(input("ID do empregado: "))
date = MENU_OF_CHOICES.fill_in_date_format()
value = float(input("Valor da venda: "))
comission = float(input("Comissao em %: "))
comission = value*comission/100
Finances.setSaleToEmployee(date, value, emp_id, comission)
return Finances
def set_tax_to_emp(Finances):
emp_id = int(input("ID do empregado: "))
tax_value = float(input("Valor da taxa: "))
Finances.setTaxToEmployee(emp_id, tax_value)
return Finances |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
# Register your models here.
from django.contrib import admin
from .models import Station
class StationAdmin(admin.ModelAdmin):
fieldsets = [
(None,{'fields':['content']}),
('location', {'fields':['lat'],'classes': ['collapse']}),
]
admin.site.register(Station,StationAdmin) |
import socket
import sys
import struct
# The following libraries should be installed before executing
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
# Construct a TCP socket
HOST, PORT = "140.113.194.88", 45000
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
# Connect to the server
sock.connect((HOST, PORT))
# Send hello to server
# 1. Send the size in byte of "hello" to Server
msg_size = len("hello")
byte_msg_size = struct.pack("i", msg_size)
sock.sendall( byte_msg_size )
# 2. Send the "hello" string to Server
sock.sendall(bytes("hello", 'utf-8'))
print('I send : hello')
# Receive hello from server
msg_size = struct.unpack('i', sock.recv(4))
received = str(sock.recv(int(msg_size[0])), "utf-8")
print('TA send : ', received)
# Send public pem file to server
with open('public.pem', 'rb') as f:
myPubKey = serialization.load_pem_public_key(
f.read(),
backend=default_backend()
)
f.close()
myPubPem = myPubKey.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
msg_size = len(str(myPubPem, 'utf-8'))
byte_msg_size = struct.pack('i', msg_size)
sock.sendall(byte_msg_size)
sock.sendall(myPubPem)
print('I send my RSA public key :\n', str(myPubPem, 'utf-8'))
# Receive AES Session Key from server
msg_size = struct.unpack('i', sock.recv(4))
encryptedAESKey = sock.recv(int(msg_size[0]))
print('Received C1 :\n', encryptedAESKey)
with open('private.pem', 'rb') as f:
myPriKey = serialization.load_pem_private_key(
f.read(),
password=None,
backend=default_backend()
)
f.close()
AESKey = myPriKey.decrypt(
encryptedAESKey,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.SHA1(),
label=None
)
)
print('ASS Session Key :\n', AESKey)
# Receive Initial Vector from Server
msg_size = struct.unpack('i', sock.recv(4))
encryptedIV = sock.recv(int(msg_size[0]))
print('Received C2 :\n', encryptedIV)
IV = myPriKey.decrypt(
encryptedIV,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.SHA1(),
label=None
)
)
print('Initial Vector :\n', IV)
# Send my encrypted ID to server
cipher = Cipher(algorithms.AES(AESKey), modes.CBC(IV), backend=default_backend())
encryptor = cipher.encryptor()
encryptedID = encryptor.update(b'0216023\0\0\0\0\0\0\0\0\0') + encryptor.finalize()
msg_size = len(str(encryptedID))
byte_msg_size = struct.pack('i', msg_size)
sock.sendall(byte_msg_size)
sock.sendall(encryptedID)
print('Send my encrypted ID :\n', encryptedID)
# Receive Magic Number from server
msg_size = struct.unpack('i', sock.recv(4))
encryptedMagicNum = sock.recv(int(msg_size[0]))
print('Received C4 :\n', encryptedMagicNum)
decryptor = cipher.decryptor()
print('My Magic Number:\n', decryptor.update(encryptedMagicNum) + decryptor.finalize())
# bye
msg_size = struct.unpack("i", sock.recv(4))
received = str(sock.recv(int(msg_size[0])), "utf-8")
print(received)
|
# Filename: MD_ImmuNet_Scraper_Window.py
# Author: Zheng Guo
# Date: 10-16-2020
# Purpose: Scraping member's immunization registration information from the MD Immunet site based on a given list of members.
# Class list: - Person (measYr, memberId, memberIdSkey, fname, lname, lnameSuffix, dob, gender, stateRes, meas)
# Functions:
# - is_date(string)
# - immunte(Fname, Lname, DOB, Gender, user, pw)
# User Input:
# - First input : MLQM_Immun_Regs_Lkup_SAMPLE.xlsx
# xlsx file contains the following information in order below:
# meas_yr,memb_life_id,memb_life_id_skey,memb_frst_nm,memb_last_nm,memb_nm_suffix,memb_dob,gender,state,meas
# - Second input : User name
# - Third input: Password
# Output:
# - First output: HEDIS_MD_Immun_Records_Found_YYYY_MM_DD.csv
# - Second output: HEDIS_MD_Immun_Records_Not_Found_YYYY_MM_DD.csv
##############################################################################
# Imports
import csv
import datetime
import os
import os.path
import time
import pandas as pd
from dateutil.parser import parse
from pandas import DataFrame
from selenium import webdriver
from selenium.common.exceptions import (NoSuchElementException,
WebDriverException)
from selenium.webdriver.support.select import Select
##############################################################################
# Classes
class Person(object):
def __init__(self, measYr, memberId, memberIdSkey, fname, lname, lnameSuffix, dob, gender, stateRes, meas):
self.measYr = measYr
self.memberId = memberId
self.memberIdSkey = memberIdSkey
self.fname = fname
self.lname = lname
self.lnameSuffix = lnameSuffix
self.dob = dob
self.gender = gender
self.stateRes = stateRes
self.meas = meas
def getMeasYr(self):
return self.measYr
def getMemberIdSkey(self):
return self.memberIdSkey
def getMemberId(self):
return self.memberId
def getFirstName(self):
return self.fname
def getLastName(self):
return self.lname
def getLastNameSuffix(self):
return self.lnameSuffix
def getDateOfBirth(self):
return self.dob
def getGender(self):
return self.gender
def getStateRes(self):
return self.stateRes
def getMeas(self):
return self.meas
###############################################################################
# Function
def is_date(string, fuzzy=False):
try:
parse(string, fuzzy=fuzzy)
return True
except ValueError:
return False
def immunte(Fname, Lname, DOB, Gender, driver):
# work on patient search button
driver.find_element_by_xpath("//*[@id='editVFCProfileButton']").click()
# work on last name
lastname = driver.find_element_by_id("txtLastName")
lastname.clear()
lastname.send_keys(Lname)
# work on first name
firstname = driver.find_element_by_id("txtFirstName")
firstname.clear()
firstname.send_keys(Fname)
# work on birth date
birthdate = driver.find_element_by_id("txtBirthDate")
birthdate.clear()
birthdate.send_keys(DOB)
# work on advanced search button to input gender
try:
driver.find_element_by_xpath(
"//*[@id='queryResultsForm']/table/tbody/tr/td[2]/table/tbody/tr[3]/td/table/tbody/tr[2]/td[5]/input").click()
# work on gender selection button
obj = Select(driver.find_element_by_name("optSexCode"))
if Gender == 'M':
obj.select_by_index(2)
elif Gender == 'F':
obj.select_by_index(1)
else:
obj.select_by_index(3)
# work on search button
driver.find_element_by_name("cmdFindClient").click()
# two scenarios could emerge as a search result: 1, no patient found 2, the patient found
if "No patients were found for the requested search criteria" in driver.find_element_by_id("queryResultsForm").text:
al = []
elif "Patient Demographics Patient Immunization History" in driver.find_element_by_id("queryResultsForm").text:
# work on patient immunization button
driver.find_element_by_xpath(
"//*[@id='queryResultsForm']/table[2]/tbody/tr[2]/td[2]/span/label").click()
# work on patient last name button
driver.find_element_by_id("redirect1").click()
# work on getting rid of people who opt out of the site - header
header = driver.find_elements_by_class_name("large")[1].text
if "Access Restricted" in header:
print(Fname+' '+Lname+' '+" Opt out")
al = []
elif "Patient Information" in header:
# find the first line
first = driver.find_element_by_xpath(
"//*[@id='container']/table[3]/tbody/tr/td[2]/table[2]/tbody/tr/td/table/tbody/tr[1]/td/table/tbody/tr[5]/td[1]").text
if (first == None):
al = []
else:
even = driver.find_elements_by_class_name("evenRow")
odd = driver.find_elements_by_class_name("oddRow")
o = []
e = []
for value in odd:
o.append(value.text)
for value in even:
e.append(value.text)
length = len(o)
i = 0
al = []
# merge odd and even row together and remove the row marked with complete
while i < length:
al.append(e[i])
al.append(o[i])
i = i+1
# parse each row of information with a comma, add group name for row that are without one
for x in range(len(al)):
if is_date(al[x][1:10]):
al[x] = al[x].replace(' ', ',')
al[x] = al[x].replace(',of,', ' of ')
al[x] = group + ',' + al[x][2:]
else:
al[x] = al[x].replace(' ', ',')
al[x] = al[x].replace(',of,', ' of ')
g = al[x].split(',', 1)
group = g[0]
# work on returning to home page
driver.find_element_by_xpath(
"//*[@id='headerMenu']/table/tbody/tr/td[2]/div/a").click()
except NoSuchElementException:
al = []
except WebDriverException:
al = []
return al
def main():
# Welcome message and input info
print('\nThis is the web scraper for the MaryLand Immunization Record Website.')
print('You will be prompted to type in a file name and username/password.')
print('If you need to exit the script and stop its process press \'CTRL\' + \'C\'.')
file = input("\nEnter file name: ")
user = input("\nEnter MDImmnet username: ")
pw = input("\nEnter MDImmnet password: ")
date = str(datetime.date.today())
# output file
fileOutputName = 'HEDIS_MD_Immun_Records_Found_' + \
date.replace('-', '_') + '.csv'
fileOutputNameNotFound = 'HEDIS_MD_Immun_Records_Not_Found_' + \
date.replace('-', '_') + '.csv'
fileOutput = open(fileOutputName, 'w')
fileOutputNotFound = open(fileOutputNameNotFound, 'w')
fileOutput.write('MEAS_YR,MEMB_LIFE_ID_SKEY,MEMB_LIFE_ID,MEMB_FRST_NM,MEMB_LAST_NM,' +
'DOB,GNDR,RSDNC_STATE,IMUN_RGSTRY_STATE,VCCN_GRP,VCCN_ADMN_DT,DOSE_SERIES,' +
'BRND_NM,DOSE_SIZE,RCTN\n')
fileOutputNotFound.write('MEAS_YR,MEMB_LIFE_ID_SKEY,MEMB_LIFE_ID,MEMB_FRST_NM,MEMB_LAST_NM,MEMB_SUFFIX,' +
'DOB,GNDR,RSDNC_STATE,IMUN_RGSTRY_STATE,VCCN_GRP,VCCN_ADMN_DT,DOSE_SERIES,' +
'BRND_NM,DOSE_SIZE,RCTN\n')
# If the file exists
try:
os.path.isfile(file)
except:
print('File Not Found\n')
df = pd.read_excel(file)
# create array of People objects and member ID
peopleArray = []
memberIdArray = []
df.dropna()
total = len(df)
not_found = 0
found = 0
# assign each record in the data frame into Person class
for i in range(total):
measYr = str(df.loc[i, "#MEAS_YR"])
memberId = str(df.loc[i, "MEMB_LIFE_ID"])
memberIdSkey = str(df.loc[i, "MEMB_LIFE_ID_SKEY"])
fname = str(df.loc[i, "MEMB_FRST_NM"])
lname = str(df.loc[i, "MEMB_LAST_NM"])
lnameSuffix = str(df.loc[i, "MEMB_NM_SUFFIX"])
inputDate = str(df.loc[i, "MEMB_DOB"])
# If date is null then assign an impossible date
if not inputDate:
dob = '01/01/1900'
if '-' in inputDate:
dob = datetime.datetime.strptime(
inputDate, "%Y-%m-%d %H:%M:%S").strftime('%m/%d/%Y')
else:
dob = datetime.datetime.strptime(
str(df.loc[i, "MEMB_DOB"]), '%m/%d/%Y').strftime('%m/%d/%Y')
gender = str(df.loc[i, "GENDER"])
stateRes = str(df.loc[i, "STATE_RES"])
meas = str(df.loc[i, "MEAS"])
p = Person(measYr, memberId, memberIdSkey, fname, lname,
lnameSuffix, dob, gender, stateRes, meas)
# append array
m = df.loc[i, "MEMB_LIFE_ID"]
if (m not in memberIdArray):
peopleArray.append(p)
memberIdArray.append(m)
# work on setting up driver for md immunet - mac forward slash/windows double backward slash
PATH = os.getcwd()+'\\'+'chromedriver'
driver = webdriver.Chrome(PATH)
driver.get("https://www.mdimmunet.org/prd-IR/portalInfoManager.do")
# work on login ID
username = driver.find_element_by_id("userField")
username.clear()
username.send_keys(user)
# work on password
password = driver.find_element_by_name("password")
password.clear()
password.send_keys(pw)
# work on getting to home page - where loop will start
driver.find_element_by_xpath(
"//*[@id='loginButtonForm']/div/div/table/tbody/tr[3]/td[1]/input").click()
for n in range(total):
p = peopleArray[n]
recordToWrite = ''
print('Looking up: ' + str(n)+' ' +
p.getLastName() + ', ' + p.getFirstName())
MeasYr = p.getMeasYr()
MemberIdSkey = p.getMemberIdSkey()
MemberId = p.getMemberId()
Fname = p.getFirstName()
Lname = p.getLastName()
DOB = str(p.getDateOfBirth())
Gender = p.getGender()
StateRes = p.getStateRes()
children = immunte(Fname, Lname, DOB, Gender, driver)
if children == []:
not_found += 1
recordToWrite = MeasYr+','+MemberIdSkey+','+MemberId+',' + Fname + \
','+Lname + ',' + ' ' + ','+DOB+','+Gender+','+StateRes+','+'MD'
fileOutputNotFound.write(recordToWrite + '\n')
elif children != []:
found += 1
for x in range(len(children)):
data_element = children[x].split(",")
# if the admin date is not valid, or the brand is not valid skip the records, clean data on the dosage and reaction field
if is_date(data_element[1]) and is_date(data_element[3]):
children[x] = ''
elif is_date(data_element[1]) and data_element[2] == 'NOT' and data_element[3] == 'VALID':
children[x] = ''
elif is_date(data_element[1]) and is_date(data_element[3]) == False:
if data_element[5] != 'No':
data_element[4] = data_element[5]
data_element[5] = ''
children[x] = ','.join(data_element[0:6])
else:
data_element[5] = ''
children[x] = ','.join(data_element[0:6])
else:
children[x] = ''
for x in range(len(children)):
if children[x] != '':
recordToWrite = MeasYr+','+MemberIdSkey+','+MemberId+',' + \
Fname+','+Lname + ','+DOB+','+Gender+','+StateRes+','+'MD'
recordToWrite = recordToWrite+','+children[x]
fileOutput.write(recordToWrite + '\n')
n = +1
fileOutput.close()
fileOutputNotFound.close()
print('\n--------------------------------OUTPUT--------------------------------')
print("Script completed.")
print("There are "+str(total)+" members in the original lookup list provided.")
print("There are "+str(found) +
" members were found with records on the MD immunization website.")
print("There are "+str(not_found) +
" members were not found on the MD immunization website.\n")
print('Files saved: \n' + fileOutputName + '\n' + fileOutputNameNotFound)
print('\n----------------------------------------------------------------------\n')
##############################################################################
main()
|
import json
import random
import binascii
import base64
from Crypto.Cipher import AES
import requests
url = 'https://music.163.com/weapi/cloudsearch/get/web?csrf_token='
headers = {
"Host":"music.163.com",
"Connection":"keep-alive",
"Origin":"https://music.163.com",
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36",
"Content-Type":"application/x-www-form-urlencoded",
"Accept":"*/*",
"Referer":"https://music.163.com/search/",
}
def random_b():
seed = "1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
sa = []
for i in range(16):
sa.append(random.choice(seed))
salt = ''.join(sa)
return bytes(salt, 'utf-8')
#第二参数,rsa公匙组成
pub_key = "010001"
#第三参数,rsa公匙组成
modulus = "00e0b509f6259df8642dbc35662901477df22677ec152b5ff68ace615bb7b725152b3ab17a876aea8a5aa76d2e417629ec4ee341f56135fccf695280104e0312ecbda92557c93870114af6c9d05c4f7f0c3685b7a46bee255932575cce10b424d813cfe4875d3e82047b97ddef52741d546b8e289dc6935b3ece0462db0a22b8e7"
#第四参数,aes密匙
secret_key = b'0CoJUm6Qyw8W8jud'
def aes_encrypt(text, key):
# 偏移量
iv = b'0102030405060708'
# 对长度不是16倍数的字符串进行补全,然后在转为bytes数据
pad = 16 - len(text) % 16
try:
# 如果接到bytes数据(如第一次aes加密得到的密文)要解码再进行补全
text = text.decode()
except:
pass
text = text + pad * chr(pad)
try:
text = text.encode()
except:
pass
encryptor = AES.new(key, AES.MODE_CBC, iv)
ciphertext = encryptor.encrypt(text)
ciphertext = base64.b64encode(ciphertext).decode('utf-8') # 得到的密文还要进行base64编码
return ciphertext
def rsa_encrypt(random_char):
text = random_char[::-1]#明文处理,反序并hex编码
rsa = int(binascii.hexlify(text), 16) ** int(pub_key, 16) % int(modulus, 16)
return format(rsa, 'x').zfill(256)
def aes_param(data):
text = json.dumps(data)
print(text)
random_char = random_b()
params = aes_encrypt(text, secret_key)#两次aes加密
params = aes_encrypt(params, random_char)
enc_sec_key = rsa_encrypt(random_char)
data = {
'params': params,
'encSecKey': enc_sec_key
}
return data
if __name__ == "__main__":
str=input("请输入歌曲名字:")
data = {
"hlpretag": "<span class=\"s-fc7\">",
"hlposttag": "</span>",
"s": str,
"type": "1",
"offset": "0",
"total": "true",
"limit": "30",
"csrf_token": ""
}
formdata = aes_param(data)
print(formdata) |
from threading import Thread
from webserver.zenwebserver import ZenWebServer
from kivy.logger import Logger
from components.config import Config
class FlaskThread(Thread):
"""
Start the Flask Application on a background thread to blocking the GUI.
"""
def __init__(self, ctrl, config):
super().__init__()
self.ctrl = ctrl
self.config = config
def run(self):
""" Run the Flask server with the given configuration options """
try:
ZenWebServer(self.ctrl).run(**self.config)
except OSError as e:
print(f"Unable to start webserver: error {e}")
class WebServer:
"""
This classes acts as a controller for the flask webserver, starting and
stopping it on a background thread.
"""
_thread = None
@staticmethod
def start(ctrl):
""" Start the ZenPlayer web API backend. """
config = Config.load("webserver.json")
Logger.info("Webserver: Starting web server ")
thread = FlaskThread(ctrl, config)
thread.daemon = True
thread.start()
WebServer._thread = thread
@staticmethod
def stop():
if WebServer._thread is not None:
# TODO
pass
|
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from config import config
class Net(nn.Module):
def __init__(self, input_length, output_length, tune_config):
super(Net, self).__init__()
self.tune_config = tune_config
config_layers = self.get_config("hidden_layers")
config_layers.append(output_length)
activations = [get_activation(name) for name in self.get_config("activations")]
first_layer = nn.Linear(input_length, config_layers[0])
linear_layers = [
nn.Linear(config_layers[index], config_layers[index + 1])
for index in range(len(config_layers) - 1)
]
linear_layers.insert(0, first_layer)
self.layers = nn.Sequential(
*[layer for pair in zip(linear_layers, activations) for layer in pair]
)
def forward(self, data_input):
return self.layers(data_input)
def get_config(self, name):
return (
self.tune_config[name].sample()
if not self.tune_config == None
else config[name]
)
def get_activation(name):
if name == "relu":
return nn.ReLU()
elif name == "sigmoid":
return nn.Sigmoid()
elif name == "leakyrelu":
return nn.LeakyReLU()
else:
return nn.ReLU()
|
from app import db,app
from flask_marshmallow import Marshmallow
ma = Marshmallow(app)
class Estado_Grupo(db.Model):
id=db.Column(db.Integer, primary_key=True,nullable=False)
nombre=db.Column(db.String(50),nullable=False)
def __init__(self, nombre):
self.nombre = nombre
db.create_all()
class StateGroupSchema(ma.Schema):
class Meta:
fields = ('id','nombre')
estado_grupo_schema = StateGroupSchema() #Uno solo (POST,GET)
estados_grupos_schema = StateGroupSchema(many=True) #Varios (GET) |
import random
def main():
hidden = [str(x) for x in random.sample(range(1, 7), 4)]
#print(hidden)
while(True):
print("Please enter 4 digits")
guess = list(input())
if len(guess)!=4:
continue
res = ""
for h, g in zip(hidden, guess):
#print(h, g)
if h == g:
res += "b"
elif g in hidden:
res += "w"
#print(res)
if res=='bbbb':
print("Congrats!")
break
print(''.join(sorted(res)))
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
from bs4 import BeautifulSoup
from argparse import ArgumentParser
from platform import python_version_tuple
import json
import pandas as pd
import re
import requests
import time
if python_version_tuple()[0] == u'2':
def input(prompt): return raw_input(prompt.encode('utf8')).decode('utf8')
__author__ = u'"noragami", "noragami" <yuumeikai@gmail.com>'
__version__ = '1.0'
class Scraptimus():
def __init__(self):
print(u"""
______ ______ ______ ______ ______ ______ __ __ __ __ __ ______
/\ ___\/\ ___\/\ == \/\ __ \/\ == \/\__ _\/\ \/\ "-./ \/\ \/\ \/\ ___\
\ \___ \ \ \___\ \ __<\ \ __ \ \ _-/\/_/\ \/\ \ \ \ \-./\ \ \ \_\ \ \___ \
\/\_____\ \_____\ \_\ \_\ \_\ \_\ \_\ \ \_\ \ \_\ \_\ \ \_\ \_____\/\_____\
\/_____/\/_____/\/_/ /_/\/_/\/_/\/_/ \/_/ \/_/\/_/ \/_/\/_____/\/_____/
created by {__author__}
Version: {__version__}
""".format(__author__=__author__, __version__=__version__))
URL = 'http://www.co-optimus.com/ajax/ajax_games.php?game-title-filter=&system=&countDirection=at+least&playerCount=2&page=%d&sort=&sortDirection='
def set_args(self):
""" Create parser for command line arguments """
parser = ArgumentParser(
prog=u'python -m scraptimus',
description='Scrape and export to a file the list of games found at Co-optimus website.\n\t\tDefault format is json.')
parser.add_argument('-f', '--filename',
help=u'Override the default filename')
group = parser.add_mutually_exclusive_group()
group.add_argument(
'-j', '--json', help=u'Export to a json file', action='store_true')
group.add_argument(
'-c', '--csv', help=u'Export to a csv file', action='store_true')
parser.add_argument(
'-s', '--startpage', help=u'Define where to start. Default is 1')
parser.add_argument(
'-e', '--endpage', help=u'Define where to end. Default is all the pages')
return parser
def scraper(self, start_page=None, end_page=None, records=[]):
print('Started... please wait.')
r = requests.get(self.URL % start_page)
soup = BeautifulSoup(r.text, 'lxml') # html.parser is slower
rows = iter(soup.find('table').find_all('tr'))
# skip first row
next(rows)
for row in rows:
idx = row['id']
cells = row.find_all('td')
title = cells[0].strong.string
genre = cells[0].label.string
system = cells[1].a.string
online = int(cells[2].string)
couch = int(cells[3].string)
combo = int(cells[4].string)
features = []
for link in cells[5].find_all('a'):
features.extend(link['class'])
features = [x for x in features if features.count(
x) == 1] # remove duplicated
features.remove('features-icon') # remove unwanted
review_score = float(
cells[6].div.div.string) if cells[6].div else None
user_rating = float(
cells[7].i.string) if cells[7].i else None
release_date = cells[8].span.string if cells[8].span else None
records.append({'id': idx, 'title': title, 'genre': genre, 'system': system,
'online': online, 'couch': couch, 'combo': combo,
'features': ','.join(features), 'review_score': review_score,
'user_rating': user_rating, 'release_date': release_date})
for tag in soup.find_all(string=re.compile("^Next$")):
next_page = int(re.search(r'\d+', tag.parent['onclick']).group())
if end_page is None or end_page > next_page:
self.scraper(start_page=next_page,
end_page=end_page, records=records)
else:
break
return records
def export_to_csv(self, filename=None, records=None, separator='|'):
filename = '%s.csv' % filename
df = pd.DataFrame(records, columns=['id', 'title', 'genre', 'system', 'online',
'couch', 'combo', 'features', 'review_score',
'user_rating', 'release_date'])
df.to_csv(filename, index=False, encoding='utf-8', sep=separator)
def export_to_json(self, filename=None, records=None):
filename = '%s.json' % filename
with open(filename, 'w') as outfile:
json.dump(records, outfile, indent=4)
def scrap(self):
parser = self.set_args()
args = parser.parse_args()
start_page = int(args.startpage) if args.startpage else 1
end_page = int(args.endpage) if args.endpage else None
records = self.scraper(
start_page=start_page, end_page=end_page, records=[])
filename = '%s-%d-%d' % (
args.filename if args.filename else '%s' % time.strftime("%Y%m%d"),
start_page,
end_page if end_page else 0)
if args.csv:
self.export_to_csv(filename=filename, records=records)
else:
self.export_to_json(filename=filename, records=records)
print('Finished!')
if __name__ == '__main__':
scraptimus = Scraptimus()
scraptimus.scrap()
|
from Crypto import Random
from Crypto.Hash import SHA
from Crypto.Cipher import PKCS1_v1_5 as Cipher_pkcs1_v1_5
from Crypto.Signature import PKCS1_v1_5 as Signature_pkcs1_v1_5
from Crypto.PublicKey import RSA
from block import Block
import hashlib
import json
import base64
class Transaction(object):
def __init__(self,a_addr,a_public_key,b_addr,b_public_key,a_value,b_value,unspent_list):
self.a_addr = a_addr
self.a_public_key = a_public_key
self.b_addr = b_addr
self.b_public_key = b_public_key
self.a_value = a_value
self.b_value = b_value
self.unspent = unspent_list
def get_signature(self,secret_key):
# add digital signature
message = self.a_addr + self.a_public_key + self.b_addr + \
self.b_public_key + str(self.a_value) + str(self.b_value)
for i, val in enumerate(self.unspent):
message = message + str(val[0]) + str(val[1])
rsakey = RSA.importKey(secret_key.encode())
signer = Signature_pkcs1_v1_5.new(rsakey)
digest = SHA.new()
digest.update(message.encode())
sign = signer.sign(digest)
signature = base64.b64encode(sign)
self.signature = signature.decode()
def display(self):
return {
'a_addr': self.a_addr,
'a_public_key': self.a_public_key,
'b_addr': self.b_addr,
'b_public_key': self.b_public_key,
'a_value': self.a_value,
'b_value': self.b_value,
'unspent': self.unspent,
'signature': self.signature
}
@staticmethod
def rebuild(transaction_json):
a_addr = transaction_json['a_addr']
a_public_key = transaction_json['a_public_key']
b_addr = transaction_json['b_addr']
b_public_key = transaction_json['b_public_key']
a_value = transaction_json['a_value']
b_value = transaction_json['b_value']
unspent = transaction_json['unspent']
new_transaction = Transaction(a_addr,a_public_key,b_addr,b_public_key,a_value,b_value,unspent)
new_transaction.signature = transaction_json['signature']
return new_transaction
def search_transaction(public_key,blockchain):
# get all UTXO from blockchain with public_key
# return a list of all UTXO's index and total value
# 寻找某个公钥拥有的UTXO,返回所有这些UTXO的索引(块号+块中交易号)和总价值
# return (index,value)
# need a new algorithm ################################
unspent_list_of_public_key = []
value = 0
end = 0
length = len(blockchain)
for i in range(length):
for j in range(len(blockchain[-i - 1].transactions)):
if blockchain[-i - 1].transactions[-j - 1]['b_public_key'] == public_key:
unspent_list_of_public_key.append((length - i - 1,len(blockchain[-i - 1].transactions) - j - 1))
value += blockchain[-i - 1].transactions[-j - 1]['b_value']
elif blockchain[-i - 1].transactions[-j - 1]['a_public_key'] == public_key:
unspent_list_of_public_key.append((length - i - 1,len(blockchain[-i - 1].transactions) - j - 1))
if len(blockchain[-i - 1].transactions[-j - 1]['unspent']) > 0:
end = max(end , blockchain[-i - 1].transactions[-j - 1]['unspent'][-1][0] + 1)
value += blockchain[-i - 1].transactions[-j - 1]['a_value']
if length - i - 1 == end :
break
return (unspent_list_of_public_key, value)
def create_transaction(a_addr,a_public_key,b_addr,b_public_key,a_value,b_value,unspent_list,a_secretkey):
new_transaction = Transaction(a_addr,a_public_key,b_addr,b_public_key,a_value,b_value,unspent_list)
new_transaction.get_signature(a_secretkey)
return new_transaction
def verify_transaction(blocks,transaction,public_key):
# check the signature with A's public_key
# check the A's history UTXO
# verify existed transaction and its unspent list ######################
'''
收到一个交易,验证该交易的数字签名是否与发出者的公钥相匹配,若匹配,进一步验证发送者的交易是否合法
'''
message = transaction['a_addr'] + transaction['a_public_key'] + transaction['b_addr'] + \
transaction['b_public_key'] + str(transaction['a_value']) + str(transaction['b_value'])
total_utxo = 0
unspent,value = search_transaction(public_key,blocks)
for i, val in enumerate(transaction['unspent']) :
val = tuple(val)
if unspent.count(val) == 0:
return False
message = message + str(val[0]) + str(val[1])
apk = blocks[val[0]].transactions[val[1]]['a_public_key']
avalue = blocks[val[0]].transactions[val[1]]['a_value']
bpk = blocks[val[0]].transactions[val[1]]['b_public_key']
bvalue = blocks[val[0]].transactions[val[1]]['b_value']
if apk == transaction['a_public_key'] :
total_utxo = total_utxo + avalue
if bpk == transaction['a_public_key'] :
total_utxo = total_utxo + bvalue
rsakey = RSA.importKey(public_key.encode())
verifier = Signature_pkcs1_v1_5.new(rsakey)
digest = SHA.new()
# Assumes the data is base64 encoded to begin with
digest.update(message.encode())
is_verify = verifier.verify(digest, base64.b64decode(transaction['signature']))
return is_verify and total_utxo == transaction['a_value'] + transaction['b_value']
def check_block(blocks, block):
# 验证block的hash值以及每一笔交易是否合法,返回true/false
# verify hash code
context = json.dumps(block.display())
hex_dig = hashlib.sha256(context.encode()).hexdigest()
if hex_dig[0:5] != '0'*5:
return False
# verify repeated transactions in one block
for i in range(len(block.transactions)):
for j in range(i):
if (block.transactions[i]['a_public_key'] == block.transactions[j]['a_public_key']):
return False
# verify transaction
for i, val in enumerate(block.transactions):
if (not (i == len(block.transactions) -1)) and (not verify_transaction(blocks, val, val['a_public_key'])) :
return False
return True
def generate_account(name):
random_generator = Random.new().read
rsa = RSA.generate(1024,random_generator)
private_pem = rsa.exportKey()
with open('../userKey/' + name + '-private.pem','w') as f:
f.write(private_pem.decode())
public_pem = rsa.publickey().exportKey()
with open('../userKey/' + name + '-public.pem','w') as f:
f.write(public_pem.decode())
def get_addr_key(name):
'''
得到本机地址以及公钥
return (addr, public_key) is modified into
return (addr, private_key, public_key)
assuming addr is the same as public_key
'''
try:
with open('../userKey/' + name + '-private.pem') as fpr:
prkey = fpr.read()
with open('../userKey/' + name + '-public.pem') as fpu:
pukey = fpu.read()
except:
generate_account(name)
with open('../userKey/' + name + '-private.pem') as fpr:
prkey = fpr.read()
with open('../userKey/' + name + '-public.pem') as fpu:
pukey = fpu.read()
return (pukey, prkey, pukey)
|
import cv2
import time
import numpy as np
# for the lcm communication for camera
import sys
CAMERA_LENGTH = 640
CAMERA_WIDTH = 480
CMAERA_CHANNEL_1 = "CAMERA_COORD1"
CMAERA_CHANNEL_2 = "CAMERA_COORD2"
# trans matrix from base of cam1 to cam2
# [[ 1. 0. 0. 500.]
# [ 0. 1. 0. 0.]
# [ 0. 0. 1. 0.]
# [ 0. 0. 0. 1.]]
MAT_CAM1toCAM2 = np.eye(4, dtype=float)
MAT_CAM1toCAM2[0][3] = -500.0
FOCAL_LENGTH = 423.5
class Coord_calculator(object):
"""docstring for Coord_calculator"""
# def __init__(self, arg):
# self.arg = arg
def calc_RT_matrix(self, M1, M2):
"""
@param:
M1 is the trans and rot matrix from base1 to cam1
M2 is the trans and rot matrix from base2 to cam2
@return:
Rb Tb, rotation matrix and translation matrix from cam1 to cam2
"""
# test code for M1 and M2
# M1 = [[ 0, -1, 0, 1],
# [ 1, 0, 0, 10],
# [ 0, 0, 1, 100],
# [ 0, 0, 0, 1]]
# M2 = [[ 1, 0, 0, 0],
# [ 0, 1, 0, 0],
# [ 0, 0, 1, 0],
# [ 0, 0, 0, 1]]
# MAT = M1.inv * M2 * MAT_CAM1toCAM2
MAT = np.matmul(np.linalg.inv(M2),np.matmul(MAT_CAM1toCAM2, M1))
Rb = MAT[0:3, 0:3]
Tb = MAT[0:3, 3]
# print "np.linalg.inv(M1): ", np.linalg.inv(M1)
# print "np.matmul(MAT_CAM1toCAM2, np.linalg.inv(M1)): ", np.matmul(MAT_CAM1toCAM2, np.linalg.inv(M1))
# print "np.linalg.inv(MAT_CAM1toCAM2): ", np.linalg.inv(MAT_CAM1toCAM2)
# print "MAT: ", MAT
# print "MAT_CAM1toCAM2: ", MAT_CAM1toCAM2
# print "M1: ", M1
# print "M2: ", M2
return Rb, Tb
def trans_camera_to_base(self, T, M):
"""
@param:
T is the xyz pos in cam coordinat
M is the trans and rot matrix from base to cam2
"""
cam_vec = np.array([[T[0]],[T[1]],[T[2]],[1]])
base_pos = np.matmul(M, cam_vec)
return np.transpose(base_pos[0:3, :])
def calc_pos_in_camera_coord(self, Rb, Tb, ue1, ve1, ue2, ve2, f=FOCAL_LENGTH):
"""
@param:
Rb and Tb is the rotation matrix and translation matrix from cam1 to cam2
ue1, ve1 is the x and y in the cam1 coordinates
ue2, ve2 is the x and y in the cam2 coodiantes
f is the focal length and scalar factor of the camera.
@return:
the [x,y,z] pos in the cam1 coordinate and cam2 coodinates
"""
# print ue1-CAMERA_LENGTH/2, ve1-CAMERA_WIDTH/2, ue2-CAMERA_LENGTH/2, ve2-CAMERA_WIDTH/2
A1 = np.array([[(ue1-CAMERA_LENGTH/2)/f], [(ve1-CAMERA_WIDTH/2)/f], [1]])
# print A1
A2 = -np.matmul(Rb, A1)
A3 = np.array([[(ue2-CAMERA_LENGTH/2)/f], [(ve2-CAMERA_WIDTH/2)/f], [1]])
A = np.concatenate((A2, A3), axis=1)
# T = [Tz1, Tz2] z-vector with first and second cameras
#print np.matmul(A.T, A)
T = np.matmul(np.matmul(np.linalg.inv(np.matmul(A.T, A)), A.T), Tb)
# print T
# the translation vector from the ball to the first camera
T1 = np.array([(ue1-CAMERA_LENGTH/2)*T[0]/f,(ve1-CAMERA_WIDTH/2)*T[0]/f,T[0]])
# the translation vector from the ball to the second camera
T2 = np.array([(ue2-CAMERA_LENGTH/2)*T[1]/f,(ve2-CAMERA_WIDTH/2)*T[1]/f,T[1]])
return T1, T2 |
import copy
import random
import os
import ui
import util
import main
PLANET_ICON = u"\U0001FA90"
BORDER_ICON = '█'
WALL_ICON = u"\u2593"
QUIZ_ICON = u'\u001b[33;1m★ \u001b[0m'
MEME_ICON = '⚝'
DOOR_ICON = "@"
ENEMY_SHOTS_NUMBER = 3 #quantity of position occupied by one shot
ENEMY_SHOT_ICON = "\u001b[36;1m%\u001b[0m"
EARTH_ICON = u"\U0001F30D"
PLAYER_SHOT1_ICON = u"\u27B3" #strzala
PLAYER_SHOT2_ICON = u"\u27BC" #strzala2
ALKO_ICON = [u"\U0001F37A", u"\U0001F377", u"\U0001F37E"] #beer,wine,vodka
ALIEN_SHIP_ICON = u"\U0001F6F8" #ufo
LVL3_ALIEN_LIFE_ICON = u"\U0001F47D " #alien
LVL3_PLAYER_LIFE_ICON = (f'\x1b[6;31;40m \u2764 \x1b[0m')
def create_board(width, height):
'''
Creates a new game board based on input parameters.
Args:
int: The width of the board
int: The height of the board
Returns:
list: Game board
'''
board = []
for h in range(height):
board.append([""]*width)
return create_border(board, width, height)
def create_border(board, width, height):
'''
Creates a borders = puts BORDER_ICON on the edges of the board
'''
for h in range(height):
for w in range(width):
if (h == 0 or w == 0 or h == height-1 or w == width-1):
board[h][w] = BORDER_ICON
return board
def put_player_on_board(board, player):
'''
Modifies the game board by placing the player icon at its coordinates.
Args:
list: The game board
dictionary: The player information containing the icon and coordinates
Returns:
Nothing
'''
x = player['player_x_position']
y = player['player_y_position']
board[x][y] = player['icon']
def player_moves(key, board, player):
'''
Changing player position.
Obstacles - all symbols you don't wanna player to cross.
Returns:
Nothing
'''
obstacles = ['#', BORDER_ICON, WALL_ICON]
x = player['player_x_position']
y = player['player_y_position']
if key == 'a':
if (y-1) != 0 and (board[x][y-1] not in obstacles):
player['player_y_position'] = y - 1
if key == 'w':
if (x-1) != 0 and (board[x-1][y] not in obstacles):
player['player_x_position'] = x - 1
if key == 's':
if (x+1) != (len(board) - 1) and (board[x+1][y] not in obstacles):
player['player_x_position'] = x + 1
if key == 'd':
if (y+1) != (len(board[0]) - 1) and (board[x][y+1] not in obstacles):
player['player_y_position'] = y + 1
else:
pass
def generate_game_board_1(board):
mum_house_board = copy.deepcopy(board)
mum_house_board[13][5] = "E"
mum_house_board[13][6] = "X"
mum_house_board[13][7] = "I"
mum_house_board[13][8] = "T"
# można dodać ikonki drzwi
list_of_walls = {(13, 4), (12, 4), (12, 9), (13, 9), (6, 1), (6, 2), (6, 3), (6, 4), (6, 5), (1, 5), (2, 5), (3, 5), (4, 5),
(12, 5), (12, 8), (9, 13), (9, 12), (9, 11), (9, 10), (9, 9), (9, 8), (9, 7), (9, 6)}
for i in list_of_walls:
mum_house_board[i[0]][i[1]] = BORDER_ICON
# ma nie przechodzić przez ściany
list = set()
while len(list) < 12:
position_x = random.randint(2, len(board) - 2)
position_y = random.randint(2, len(board[0]) - 2)
if mum_house_board[position_x][position_y] == "":
list.add((position_x, position_y))
else:
position_x = random.randint(2, len(board) - 2)
position_y = random.randint(2, len(board[0]) - 2)
flag = 0
for i in list:
if flag < 6:
mum_house_board[i[0]][i[1]] = "B"
flag += 1
else:
mum_house_board[i[0]][i[1]] = "P"
flag += 1
return mum_house_board
def generate_game_board_2(board):
city_board = copy.deepcopy(board)
list_of_walls = {(11, 13), (11, 12), (11, 11), (10, 11), (9, 11), (9, 12), (4, 1), (4, 2), (4, 3), (4, 4),(3,4),
(4, 5), (3, 5), (2, 5), (2, 4), (2, 3), (2, 2), (12, 4), (12, 3), (12, 2), (12, 1),(3,1),(3,2),(3,3),(9,12),(9,13),(13,1),(13,2),(13,3)}
for i in list_of_walls:
city_board[i[0]][i[1]] = BORDER_ICON
list_of_excluded_fields = {(11, 13), (11, 12), (11, 11), (10, 11), (9, 11), (9, 12), (4, 1), (4, 2), (4, 3), (4, 4), (4, 5), (3, 5), (2, 5), (
2, 4), (2, 3), (2, 2), (12, 1), (12, 4), (12, 3), (12, 2), (13, 1), (13, 2), (13, 3), (10, 12), (10, 13), (3, 1), (3, 2), (3, 3), (3, 4),
(13,5), (13,6), (13,7), (13,8), (2, 1), (1, 4), (9, 13), (13, 4)}
city_board[2][1] = DOOR_ICON
city_board[1][4] = "F"
city_board[9][13] = DOOR_ICON
city_board[13][4] = DOOR_ICON
list = set()
while len(list) < 12:
position_x = random.randint(2, len(board) - 2)
position_y = random.randint(2, len(board[0]) - 2)
is_possible = (position_x, position_y)
for field in list_of_excluded_fields:
if field == is_possible:
position_x = random.randint(2, len(board) - 2)
position_y = random.randint(2, len(board[0]) - 2)
elif not is_possible in list_of_excluded_fields:
list.add((position_x, position_y))
flag = 0
for i in list:
if flag < 6:
city_board[i[0]][i[1]] = "S"
flag += 1
else:
city_board[i[0]][i[1]] = "Z"
flag += 1
return city_board
def add_to_inventory(player, board, inventory):
x = player['player_x_position']
y = player['player_y_position']
if board[x][y] == "B":
inventory["Bigos"] += 1
board[x][y] = " "
if board[x][y] == "P":
inventory["Pierogi"] += 1
board[x][y] = " "
if board[x][y] == "Z":
inventory["Złom"] += 1
board[x][y] = " "
if board[x][y] == "S":
inventory["Sliptape"] += 1
board[x][y] = " "
def exit_board(level, player):
if level == 1:
x = player['player_x_position']
y = player['player_y_position']
exit_list = (5, 6, 7, 8)
if x == 13 and y in exit_list:
return True
else:
return False
if level == 2:
x = player['player_x_position']
y = player['player_y_position']
if x == 13 and y == 4:
return True
else:
return False
def going_to_the_moon():
txt=("Spotykasz typowego Sebę. Który pyta czy masz jakiś problem?\nPo krótkiej wymianie zdań, postanawia ci pomóc...")
ui.print_text(txt,font_color=36)
im = ui.get_ascii("./ascii_files/ziomek.txt") #import text from ASCII file
ui.display_ascii(im,font_colors=[37],spacing=5) #displays ASCII file with color defined by number
input()
os.system('cls')
txt="Ruszamy w kosmos!"
ui.print_text(txt,font_color=36)
im = ui.get_ascii("./ascii_files/gulf.txt") #import text from ASCII file
ui.display_ascii(im,font_colors=[37],spacing=5) #displays ASCII file with color defined by number
input()
os.system('cls')
txt=("Trzeciego dnia podróży postanawiacie wypić wódkę zabraną przez Twojego kompana i zasypiacie. Budzicie się a Waszym oczom ukazuje się wileki labirynt!")
ui.print_text(txt,font_color=36)
im = ui.get_ascii("./ascii_files/flaszka.txt") #import text from ASCII file
ui.display_ascii(im,font_colors=[37],spacing=5) #displays ASCII file with color defined by number
input()
def chceck_inventory(inventory, level):
if level == 1:
if inventory["Pierogi"] > 4 and inventory["Bigos"] > 4:
return True
else:
return False
if level == 2:
control_sum = 0
for i in inventory:
if inventory[i] > 2:
control_sum += 1
if control_sum == 6:
return True
elif control_sum < 6:
return False
def exchange_of_goods(player, inventory):
x = player['player_x_position']
y = player['player_y_position']
if x == 9 and y == 13:
im = ui.get_ascii("./ascii_files/meta.txt") #import text from ASCII file
ui.display_ascii(im,font_colors=[37],spacing=5) #displays ASCII file with color defined by number
txt=("Cześć, chcesz wymienić bigos i pierogi na ODLOTOWY PREPARAT?")
ui.print_text(txt,font_color=36)
choice = input("T / N: ")
os.system('cls')
if choice == "t":
if inventory["Pierogi"] > 0 and inventory["Bigos"] > 0:
inventory["Pierogi"] -= 1
inventory["Bigos"] -= 1
inventory["Ropa"] += 1
return inventory
if choice == "n":
return inventory
else:
return inventory
def ferdek(player):
x = player['player_x_position']
y = player['player_y_position']
if x == 1 and y == 4:
txt=("Tu nie ma pracy dla ludzi z moim wykształceniem")
ui.print_text(txt,font_color=36)
im = ui.get_ascii("./ascii_files/ferdek.txt") #import text from ASCII file
ui.display_ascii(im,font_colors=[37],spacing=5) #displays ASCII file with color defined by number
input(" ")
os.system('cls')
def get_blasters(player, inventory):
x = player['player_x_position']
y = player['player_y_position']
if x == 2 and y == 1:
txt=("Czekasz w kolejce do pokoju, po chwili słyszysz swoje nazwisko i wchodzisz do środka")
ui.print_text(txt,font_color=36)
input(" ")
os.system('cls')
im = ui.get_ascii("./ascii_files/urzednik.txt") #import text from ASCII file
ui.display_ascii(im,font_colors=[37],spacing=5) #displays ASCII file with color defined by number
input(" ")
txt=("Po obszernym wywiadzie dotyczącym Twojej sytuacji zawodowej otrzymujesz zasiłek dla bezrobotnych")
ui.print_text(txt,font_color=36)
input(" ")
os.system('cls')
im = ui.get_ascii("./ascii_files/banknoty.txt") #import text from ASCII file
ui.display_ascii(im,font_colors=[32],spacing=5) #displays ASCII file with color defined by number
input(" ")
os.system('cls')
txt=("Odpalasz swojego smatrfona i kupujesz blastery do startku")
ui.print_text(txt,font_color=36)
input(" ")
im = ui.get_ascii("./ascii_files/allezlo.txt") #import text from ASCII file
ui.display_ascii(im,font_colors=[32],spacing=5) #displays ASCII file with color defined by number
input(" ")
os.system('cls')
inventory["Działko"] += 1
return inventory
else:
return inventory
def display_players_card(inventory):
print("============================")
print('{:<10}{:<10}{:<10}'.format("SYMBOL", "ITEM", "QUANTITY"))
for key, value in inventory.items():
item = key
quantity = value
symbol = item[0].upper()
print("----------------------------")
print('{:<10}{:<13}{:<10}'.format(" " + symbol, item, quantity))
###################################### LEVEL 2
def grid_generator(board):
'''
Creat grid on board
'''
for i in range(len(board) - 1):
if i % 2 == 0:
board[i] = [WALL_ICON] * len(board[0])
for j in board:
for n in range(len(j) - 1):
if n % 2 == 0:
j[n] = WALL_ICON
def surrounding_cells(cell, board):
sur_cells = []
x = cell[0]
y = cell[1]
# up
if x - 1 != 0:
sur_cells.append((x-1, y))
# down
if x + 1 != len(board) -1:
sur_cells.append((x+1, y))
# left
if y - 1 != 0:
sur_cells.append((x, y-1))
# right
if y + 1 != len(board[0]) -1:
sur_cells.append((x, y+1))
return sur_cells
def edges(board):
edges = []
for x in range(len(board)):
for y in range(len(board[0])):
if x != 0 and x != len(board) - 1 \
and y != 0 and y != len(board[0]) - 1:
if x % 2 == 0 and not y % 2 == 0:
edges.append([x, y])
if y % 2 == 0 and not x % 2 == 0:
edges.append([x, y])
return edges
def remove_not_used_cells(cells, board):
approved_cells = []
for cell in cells:
x = cell[0]
y = cell[1]
if board[x][y] == '':
approved_cells.append(cell)
else:
pass
return approved_cells
def maze_algorithm(cells, tree, edge, board):
'''
Checking if two cells have determinated origin. If not - edge is their orgin. Edge - kaboom
'''
if cells[0] not in tree and cells[1] not in tree:
tree[cells[0]] = edge
tree[cells[1]] = edge
board[edge[0]][edge[1]] = ''
'''
1 cell have origin, 1 not. Origin of 1st cell = origin of cell without. Edge - kaboom
'''
if cells[0] in tree and cells[1] not in tree:
tree[cells[1]] = tree[cells[0]]
board[edge[0]][edge[1]] = ''
if cells[1] in tree and cells[0] not in tree:
tree[cells[0]] = tree[cells[1]]
board[edge[0]][edge[1]] = ''
'''
cell1 and cell2 have origins. If it is the same origin - do nothing. Else - edge is the new origin for them
and cells with the same origin.
'''
if cells[0] in tree and cells[1] in tree:
if tree[cells[0]] != tree[cells[1]]:
board[edge[0]][edge[1]] = ''
root_1 = tree[cells[0]]
root_2 = tree[cells[1]]
for key, value in tree.items():
if value == root_1 or value == root_2:
tree[key] = edge
else:
pass
def maze_generator(board_to_maze):
board = copy.deepcopy(board_to_maze)
grid_generator(board) # Creates grid on board
maze_edges = edges(board) # Returns edges between 2 blank spaces on board
'''
tree[node] = root
To determinate origin of path. Root - edge that creat path.
'''
tree = {}
random.shuffle(maze_edges) # To randomize order of edges
for edge in maze_edges:
cells = surrounding_cells(edge, board) # returns surrounding cells of edge (up, down, right, left)
cells = remove_not_used_cells(cells, board) # removes not blank cells (up, down) (left, right)
maze_algorithm(cells, tree,edge,board)
return board
def put_quizzes(board):
quizzes = util.quiz_file_reader('quizzes.csv')
quizzes_xy = []
while len(quizzes_xy) != len(quizzes):
x = random.randint(2,len(board) - 1)
y = random.randint(2,len(board[0]) - 1)
if ([x,y] not in quizzes_xy) and \
board[x][y] == '':
quizzes_xy.append([x,y])
for i in quizzes_xy:
board[i[0]][i[1]] = QUIZ_ICON
def quiz_function(board, player):
x = player['player_x_position']
y = player['player_y_position']
quizzes = util.quiz_file_reader('quizzes.csv')
if board[x][y] == QUIZ_ICON:
quiz = random.choice(quizzes)
quiz_read = quiz[0].split('|')
for i in quiz_read:
print(i)
answer = input('Podaj odp.: ').lower
if answer == quiz[1].lower:
txt=('Dobra robota! Zdobywasz 5 gwiazdek!')
ui.print_text(txt,font_color=36)
player['player_*'] += 5
else:
txt=('Nie dobrze. Tracisz 30 HP')
ui.print_text(txt,font_color=36)
player['player_hp'] -= 30
board[x][y] = ''
quizzes.remove(quiz)
def put_memes(board):
memes_xy = []
while len(memes_xy) != 1:
x = random.randint(2,len(board) - 1)
y = random.randint(2,len(board[0]) - 1)
if ([x,y] not in memes_xy) and \
board[x][y] == '':
memes_xy.append([x,y])
for i in memes_xy:
board[i[0]][i[1]] = MEME_ICON
def meme_function(board, player):
x = player['player_x_position']
y = player['player_y_position']
if board[x][y] == MEME_ICON:
os.system('memes\poland-cannot-into-space.jpg')
txt=("Odnalazłeś starożytnego mema! Zdobywasz 3 gwiazdki!")
ui.print_text(txt,font_color=36)
player['player_*'] += 3
board[x][y] = ''
def display_maze_status(player):
hp = player['player_hp']
stars = player['player_*'] * "*"
print(f'You have {hp} HP and {stars} stars!')
def level_2_start():
maze = ui.get_ascii("./ascii_files/maze.txt") #import text from ASCII file
ui.display_ascii(maze,font_colors=[31],spacing=1) #displays ASCII file with color defined by number
input()
util.clear_screen()
input('''
"(...) wyższa cywilizacja przyjeżdża tymi UF-ami
i innymi i przygląda się: co oni tu wyprawiają."
\u001b[36;1mLech Wałęsa\u001b[0m
''')
util.clear_screen()
input("""
Znajdź w kosmicznym labiryncie 8 gwiazdek (★ ★ ★ ★ ★ ★ ★ ★)
i sprostaj wyzwaniom jakie stawiają obce cywilizacje!""")
util.clear_screen()
##### LEVEL3
def display_ascii_with_move(text,counter,bg_color=6, font_colors=[30,31,32,33,34,35,36,37]):
font_color = random.choice(font_colors)
print(f'\x1b[{bg_color};{font_color};40m') #COLOR START
#x = random.randint(1,15)
counter=counter%10
spacing = "\t"*counter #*x
if counter%2==0:
print("\n\n\n\n")
for line in text:
line = spacing + line
print(line.rstrip())
print('\x1b[0m') #COLOR END
def kometa(board,iter,HEIGHT):
iter=(iter%(HEIGHT-1)) if iter>=HEIGHT-1 else iter
iter=iter+1 if iter==0 else iter
board[iter][7]=board[iter][8]=board[iter][9]="*"
def level3_shot():
'''
Random shot probability
'''
result = random.randint(1,1)
if result == 1:
return True
else:
return False
def first_shot():
txt=("Potrzymaj mi piwo! I patrz!!!\n")
ui.print_text(txt,font_color=36)
enemy= ui.get_ascii("./ascii_files/alien_sitting_to_ufo.txt")
ui.display_ascii(enemy,font_colors=[32], spacing=7)
def get_random_position(board,number_of_positions):
board_width = len(board[0])-1
board_height = len(board)-1
shots_list = set()
#shot in random position
while len(shots_list) < number_of_positions:
h,w = random.randint(1,board_height-1),random.randint(1,board_width-1)
shots_list.add((h,w))
return shots_list
def intro_lvl3():
txt=("W obliczu konfrontacji z obcymi, topowe ziemskie technologie zdają się być zawodne. Załoga postnawia zdobyć cel podstępem... \nProponujecie grę w alchemika: zamiana procentów na promile")
ui.print_text(txt,font_color=33)
input()
enemy_state=0
enemy_shots_quantity=1
player_live= [LVL3_PLAYER_LIFE_ICON,LVL3_PLAYER_LIFE_ICON,LVL3_PLAYER_LIFE_ICON,LVL3_PLAYER_LIFE_ICON,LVL3_PLAYER_LIFE_ICON]
alien_live= [LVL3_ALIEN_LIFE_ICON,LVL3_ALIEN_LIFE_ICON,LVL3_ALIEN_LIFE_ICON]
def play_level3(temporary_board,player,counter):
global enemy_state,enemy_shots_quantity
x = player['player_x_position']
y = player['player_y_position']
print("PLAYER LIFE: ","".join(player_live),"\t\t\t\t\t","ALIEN LIFE: ","".join(alien_live))
PLAYER_SHOT_ICON= ALKO_ICON#[PLAYER_SHOT1_ICON, PLAYER_SHOT2_ICON]
if enemy_state<1:
temporary_board[2][10] = PLAYER_SHOT_ICON[0]
elif enemy_state==1:
temporary_board[1][20] = PLAYER_SHOT_ICON[1]
elif enemy_state>1:
temporary_board[2][3] = PLAYER_SHOT_ICON[2]
if enemy_state<1:
enemy= ui.get_ascii("./ascii_files/alien_standing.txt")
ui.display_ascii(enemy,font_colors=[32], spacing=5)
elif enemy_state==1:
enemy= ui.get_ascii("./ascii_files/alien_in_ufo.txt")
ui.display_ascii(enemy, spacing=(counter%10))
elif enemy_state>1:
enemy = ui.get_ascii("./ascii_files/enemy2.txt")
display_ascii_with_move(enemy,counter,7,[30,31,33])
key = util.key_pressed()
if key == 'q':
input("You left the game")
return False
else:
player_moves(key, temporary_board, player)
util.clear_screen()
#generate PLAYER shot
if temporary_board[x][y] in PLAYER_SHOT_ICON:
shot_result = level3_shot()
player['player_x_position'] = main.PLAYER_START_X
player['player_y_position'] = main.PLAYER_START_Y
if shot_result:
if enemy_state==0:
enemy_shots_quantity = ENEMY_SHOTS_NUMBER
first_shot()
elif enemy_state==1:
txt = "Po przejściu na wino, przeciwnik ma probnlemy z kontrolą lotu. Zwycięstwo jest blisko!"
ui.print_text(txt,font_color=33)
enemy_state+=1
del alien_live[-1]
#generate and puts on board enemy shots
enemy_shots_position = list(get_random_position(temporary_board,enemy_shots_quantity))
for position in enemy_shots_position:
temporary_board[position[0]][position[1]] = ENEMY_SHOT_ICON
if (position[0],position[1])==(x,y):
txt=("Mały zielony kosmita wygrywa starcie. Po rozmowie z Posejdonem wracasz do walki.")
ui.print_text(txt,font_color=33)
input()
player['player_x_position'] = main.PLAYER_START_X
player['player_y_position'] = main.PLAYER_START_Y
del player_live[-1]
break
if len(alien_live)==0:
img= ui.get_ascii("./ascii_files/koniec.txt")
ui.display_ascii(img,font_colors=[31], spacing=0)
input()
return False
elif len(player_live)==0 or counter >200:
input("Poległeś w walce !")
return False
else:
return True
############################END GAME
def end_game():
txt="Obca cywilizacja jest pod wrażeniem twoich dokonań, zapraszają Cię do wspólnej podrózy w kierunku anomalii"
ui.print_text(txt,font_color=33)
img= ui.get_ascii("./ascii_files/alien_head.txt")
ui.display_ascii(img,font_colors=[32], spacing=5)
input("=>")
os.system('memes\Twardowsky2.png')
|
# Atomic Data 2003/3/26-3/28
AtomData = {
"Vc":{"Z":0 ,"LMX":2 ,"RWS": 1.500,"PeriodicTable":( 1, 0),"Mass":0.000 ,"Name":"Vacancy"},
"H" :{"Z":1 ,"LMX":2 ,"RWS": 1.390,"PeriodicTable":( 1, 1),"Mass":1.008 ,"Name":"Hydrogen"},
"He":{"Z":2 ,"LMX":2 ,"RWS": 2.550,"PeriodicTable":( 1, 18) ,"Mass":4.003 ,"Name":"Helium"},
"Li":{"Z":3 ,"LMX":2 ,"RWS": 3.040,"PeriodicTable":( 2, 1) ,"Mass":6.941 ,"Name":"Lithium"},
"Be":{"Z":4 ,"LMX":2 ,"RWS": 2.270,"PeriodicTable":( 2, 2) ,"Mass":9.012 ,"Name":"Beryllium"},
"B" :{"Z":5 ,"LMX":2 ,"RWS": 1.960,"PeriodicTable":( 2, 13),},
"C" :{"Z":6 ,"LMX":2 ,"RWS": 1.660,"PeriodicTable":( 2, 14),},
"N" :{"Z":7 ,"LMX":2 ,"RWS": 1.900,"PeriodicTable":( 2, 15),},
"O" :{"Z":8 ,"LMX":2 ,"RWS": 1.900,"PeriodicTable":( 2, 16),},
"F" :{"Z":9 ,"LMX":2 ,"RWS": 2.170,"PeriodicTable":( 2, 17),},
"Ne":{"Z":10 ,"LMX":2 ,"RWS": 2.890,"PeriodicTable":( 2, 18),},
"Na":{"Z":11 ,"LMX":2 ,"RWS": 3.760,"PeriodicTable":( 3, 1),},
"Mg":{"Z":12 ,"LMX":2 ,"RWS": 3.250,"PeriodicTable":( 3, 2),},
"Al":{"Z":13 ,"LMX":2 ,"RWS": 2.950,"PeriodicTable":( 3, 13),},
"Si":{"Z":14 ,"LMX":2 ,"RWS": 2.630,"PeriodicTable":( 3, 14),},
"P" :{"Z":15 ,"LMX":2 ,"RWS": 2.560,"PeriodicTable":( 3, 15),},
"S" :{"Z":16 ,"LMX":2 ,"RWS": 2.700,"PeriodicTable":( 3, 16),},
"Cl":{"Z":17 ,"LMX":2 ,"RWS": 2.850,"PeriodicTable":( 3, 17),},
"Ar":{"Z":18 ,"LMX":2 ,"RWS": 3.710,"PeriodicTable":( 3, 18),},
"K" :{"Z":19 ,"LMX":3 ,"RWS": 4.660,"PeriodicTable":( 4, 1),},
"Ca":{"Z":20 ,"LMX":3 ,"RWS": 3.880,"PeriodicTable":( 4, 2),},
"Sc":{"Z":21 ,"LMX":3 ,"RWS": 3.310,"PeriodicTable":( 4, 3),},
"Ti":{"Z":22 ,"LMX":3 ,"RWS": 2.990,"PeriodicTable":( 4, 4),},
"V" :{"Z":23 ,"LMX":3 ,"RWS": 2.760,"PeriodicTable":( 4, 5),},
"Cr":{"Z":24 ,"LMX":3 ,"RWS": 2.640,"PeriodicTable":( 4, 6),},
"Mn":{"Z":25 ,"LMX":3 ,"RWS": 2.570,"PeriodicTable":( 4, 7),},
"Fe":{"Z":26 ,"LMX":3 ,"RWS": 2.520,"PeriodicTable":( 4, 8),},
"Co":{"Z":27 ,"LMX":3 ,"RWS": 2.520,"PeriodicTable":( 4, 9),},
"Ni":{"Z":28 ,"LMX":3 ,"RWS": 2.550,"PeriodicTable":( 4, 10),},
"Cu":{"Z":29 ,"LMX":3 ,"RWS": 2.620,"PeriodicTable":( 4, 11),},
"Zn":{"Z":30 ,"LMX":3 ,"RWS": 2.780,"PeriodicTable":( 4, 12),},
"Ga":{"Z":31 ,"LMX":3 ,"RWS": 2.750,"PeriodicTable":( 4, 13),},
"Ge":{"Z":32 ,"LMX":3 ,"RWS": 2.790,"PeriodicTable":( 4, 14),},
"As":{"Z":33 ,"LMX":3 ,"RWS": 2.830,"PeriodicTable":( 4, 15),},
"Se":{"Z":34 ,"LMX":3 ,"RWS": 2.940,"PeriodicTable":( 4, 16),},
"Br":{"Z":35 ,"LMX":3 ,"RWS": 3.130,"PeriodicTable":( 4, 17),},
"Kr":{"Z":36 ,"LMX":3 ,"RWS": 4.320,"PeriodicTable":( 4, 18),},
"Rb":{"Z":37 ,"LMX":3 ,"RWS": 4.950,"PeriodicTable":( 5, 1),},
"Sr":{"Z":38 ,"LMX":3 ,"RWS": 4.220,"PeriodicTable":( 5, 2),},
"Y" :{"Z":39 ,"LMX":3 ,"RWS": 3.610,"PeriodicTable":( 5, 3),},
"Zr":{"Z":40 ,"LMX":3 ,"RWS": 3.280,"PeriodicTable":( 5, 4),},
"Nb":{"Z":41 ,"LMX":3 ,"RWS": 3.030,"PeriodicTable":( 5, 5),},
"Mo":{"Z":42 ,"LMX":3 ,"RWS": 2.910,"PeriodicTable":( 5, 6),},
"Tc":{"Z":43 ,"LMX":3 ,"RWS": 2.820,"PeriodicTable":( 5, 7),},
"Ru":{"Z":44 ,"LMX":3 ,"RWS": 2.770,"PeriodicTable":( 5, 8),},
"Rh":{"Z":45 ,"LMX":3 ,"RWS": 2.780,"PeriodicTable":( 5, 9),},
"Pd":{"Z":46 ,"LMX":3 ,"RWS": 2.840,"PeriodicTable":( 5, 10),},
"Ag":{"Z":47 ,"LMX":3 ,"RWS": 2.950,"PeriodicTable":( 5, 11),},
"Cd":{"Z":48 ,"LMX":3 ,"RWS": 3.140,"PeriodicTable":( 5, 12),},
"In":{"Z":49 ,"LMX":3 ,"RWS": 3.300,"PeriodicTable":( 5, 13),},
"Sn":{"Z":50 ,"LMX":3 ,"RWS": 3.450,"PeriodicTable":( 5, 14),},
"Sb":{"Z":51 ,"LMX":3 ,"RWS": 3.300,"PeriodicTable":( 5, 15),},
"Te":{"Z":52 ,"LMX":3 ,"RWS": 3.310,"PeriodicTable":( 5, 16),},
"I" :{"Z":53 ,"LMX":3 ,"RWS": 3.500,"PeriodicTable":( 5, 17),},
"Xe":{"Z":54 ,"LMX":3 ,"RWS": 4.310,"PeriodicTable":( 5, 18),},
"Cs":{"Z":55 ,"LMX":3 ,"RWS": 5.300,"PeriodicTable":( 6, 1),},
"Ba":{"Z":56 ,"LMX":3 ,"RWS": 4.200,"PeriodicTable":( 6, 2),},
"La":{"Z":57 ,"LMX":3 ,"RWS": 3.910,"PeriodicTable":( 9, 1),},
"Ce":{"Z":58 ,"LMX":3 ,"RWS": 3.800,"PeriodicTable":( 9, 2),},
"Pr":{"Z":59 ,"LMX":3 ,"RWS": 3.750,"PeriodicTable":( 9, 3),},
"Nd":{"Z":60 ,"LMX":3 ,"RWS": 3.700,"PeriodicTable":( 9, 4),},
"Pm":{"Z":61 ,"LMX":3 ,"RWS": 3.650,"PeriodicTable":( 9, 5),},
"Sm":{"Z":62 ,"LMX":3 ,"RWS": 3.600,"PeriodicTable":( 9, 6),},
"Eu":{"Z":63 ,"LMX":3 ,"RWS": 3.550,"PeriodicTable":( 9, 7),},
"Gd":{"Z":64 ,"LMX":3 ,"RWS": 3.520,"PeriodicTable":( 9, 8),},
"Tb":{"Z":65 ,"LMX":3 ,"RWS": 3.610,"PeriodicTable":( 9, 9),},
"Dy":{"Z":66 ,"LMX":3 ,"RWS": 3.670,"PeriodicTable":( 9, 10),},
"Ho":{"Z":67 ,"LMX":3 ,"RWS": 3.700,"PeriodicTable":( 9, 11),},
"Er":{"Z":68 ,"LMX":3 ,"RWS": 3.730,"PeriodicTable":( 9, 12),},
"Tm":{"Z":69 ,"LMX":3 ,"RWS": 3.750,"PeriodicTable":( 9, 13),},
"Yb":{"Z":70 ,"LMX":3 ,"RWS": 3.560,"PeriodicTable":( 9, 14),},
"Lu":{"Z":71 ,"LMX":3 ,"RWS": 3.440,"PeriodicTable":( 9, 15),},
"Hf":{"Z":72 ,"LMX":3 ,"RWS": 3.230,"PeriodicTable":( 6, 4),},
"Ta":{"Z":73 ,"LMX":3 ,"RWS": 3.040,"PeriodicTable":( 6, 5),},
"W" :{"Z":74 ,"LMX":3 ,"RWS": 2.930,"PeriodicTable":( 6, 6),},
"Re":{"Z":75 ,"LMX":3 ,"RWS": 2.860,"PeriodicTable":( 6, 7),},
"Os":{"Z":76 ,"LMX":3 ,"RWS": 2.820,"PeriodicTable":( 6, 8),},
"Ir":{"Z":77 ,"LMX":3 ,"RWS": 2.830,"PeriodicTable":( 6, 9),},
"Pt":{"Z":78 ,"LMX":3 ,"RWS": 2.880,"PeriodicTable":( 6, 10),},
"Au":{"Z":79 ,"LMX":3 ,"RWS": 2.980,"PeriodicTable":( 6, 11),},
"Hg":{"Z":80 ,"LMX":3 ,"RWS": 3.270,"PeriodicTable":( 6, 12),},
"Tl":{"Z":81 ,"LMX":3 ,"RWS": 3.570,"PeriodicTable":( 6, 13),},
"Pb":{"Z":82 ,"LMX":3 ,"RWS": 3.620,"PeriodicTable":( 6, 14),},
"Bi":{"Z":83 ,"LMX":3 ,"RWS": 3.370,"PeriodicTable":( 6, 15),},
"Po":{"Z":84 ,"LMX":3 ,"RWS": 3.460,"PeriodicTable":( 6, 16),},
"At":{"Z":85 ,"LMX":3 ,"RWS": 3.630,"PeriodicTable":( 6, 17),},
"Rn":{"Z":86 ,"LMX":3 ,"RWS": 4.440,"PeriodicTable":( 6, 18),},
"Fr":{"Z":87 ,"LMX":3 ,"RWS": 5.810,"PeriodicTable":( 7, 1),},
"Ra":{"Z":88 ,"LMX":3 ,"RWS": 4.300,"PeriodicTable":( 7, 2),},
"Ac":{"Z":89 ,"LMX":3 ,"RWS": 3.840,"PeriodicTable":( 10, 1),},
"Th":{"Z":90 ,"LMX":3 ,"RWS": 3.520,"PeriodicTable":( 10, 2),},
"Pa":{"Z":91 ,"LMX":3 ,"RWS": 3.320,"PeriodicTable":( 10, 3),},
"U" :{"Z":92 ,"LMX":3 ,"RWS": 3.130,"PeriodicTable":( 10, 4),},
"Np":{"Z":93 ,"LMX":3 ,"RWS": 3.020,"PeriodicTable":( 10, 5),},
"Pu":{"Z":94 ,"LMX":3 ,"RWS": 2.960,"PeriodicTable":( 10, 6),},
"Am":{"Z":95 ,"LMX":3 ,"RWS": 2.930,"PeriodicTable":( 10, 7),},
"Cm":{"Z":96 ,"LMX":3 ,"RWS": 2.930,"PeriodicTable":( 10, 8),},
"Bk":{"Z":97 ,"LMX":3 ,"RWS": 2.950,"PeriodicTable":( 10, 9),},
"Cf":{"Z":98 ,"LMX":3 ,"RWS": 2.990,"PeriodicTable":( 10, 10),},
"Es":{"Z":99 ,"LMX":3 ,"RWS": 3.050,"PeriodicTable":( 10, 11),},
"Fm":{"Z":100 ,"LMX":3 ,"RWS": 3.170,"PeriodicTable":( 10, 12),},
"Md":{"Z":101 ,"LMX":3 ,"RWS": 0.000,"PeriodicTable":( 10, 13),},
"No":{"Z":102 ,"LMX":3 ,"RWS": 0.000,"PeriodicTable":( 10, 14),},
"Lr":{"Z":103 ,"LMX":3 ,"RWS": 3.500,"PeriodicTable":( 10, 15),},
0: "Vc",
1: "H",
2: "He",
3: "Li",
4: "Be",
5: "B",
6: "C",
7: "N",
8: "O",
9: "F",
10: "Ne",
11: "Na",
12: "Mg",
13: "Al",
14: "Si",
15: "P",
16: "S",
17: "Cl",
18: "Ar",
19: "K",
20: "Ca",
21: "Sc",
22: "Ti",
23: "V",
24: "Cr",
25: "Mn",
26: "Fe",
27: "Co",
28: "Ni",
29: "Cu",
30: "Zn",
31: "Ga",
32: "Ge",
33: "As",
34: "Se",
35: "Br",
36: "Kr",
37: "Rb",
38: "Sr",
39: "Y",
40: "Zr",
41: "Nb",
42: "Mo",
43: "Tc",
44: "Ru",
45: "Rh",
46: "Pd",
47: "Ag",
48: "Cd",
49: "In",
50: "Sn",
51: "Sb",
52: "Te",
53: "I",
54: "Xe",
55: "Cs",
56: "Ba",
57: "La",
58: "Ce",
59: "Pr",
60: "Nd",
61: "Pm",
62: "Sm",
63: "Eu",
64: "Gd",
65: "Tb",
66: "Dy",
67: "Ho",
68: "Er",
69: "Tm",
70: "Yb",
71: "Lu",
72: "Hf",
73: "Ta",
74: "W",
75: "Re",
76: "Os",
77: "Ir",
78: "Pt",
79: "Au",
80: "Hg",
81: "Tl",
82: "Pb",
83: "Bi",
84: "Po",
85: "At",
86: "Rn",
87: "Fr",
88: "Ra",
89: "Ac",
90: "Th",
91: "Pa",
92: "U",
93: "Np",
94: "Pu",
95: "Am",
96: "Cm",
97: "Bk",
98: "Cf",
99: "Es",
100:"Fm",
101:"Md",
102:"No",
103:"Lr",
}
|
# Negetive number check
def check(num):
return True if (num<0) else False
print(check(-2))
### The function does check and return the negatives from a list
lst = [4,-5,4, -3, 23, -254]
def neg(lst):
return [num for num in lst if num <0]
# or the above statement can be written as= return sum([num < 0 for num in nums])
print(neg(lst)) |
from db import db # importing SQLAlchemy Object
class Device(db.Model):
""" Referring the tablename along with column as required and create if not exists."""
__tablename__ = 'devicedesc'
id = db.Column(db.Integer, primary_key=True)
device_id = db.Column(db.Integer)
password = db.Column(db.String(40))
cpu_usage = db.Column(db.Float(precision=2))
def __init__(self,device_id,password,cpu_usage):
self.device_id = device_id
self.password = password
self.cpu_usage= cpu_usage
def json(self):
return {'device_id':self.device_id, 'cpu_usage':self.cpu_usage}
@classmethod
def get_by_id(cls,id_):
return cls.query.filter_by(id= id_).first()
@classmethod
def get_by_device_id(cls, dev_id):
return cls.query.filter_by(device_id=dev_id).first()
def save_to_db(self):
db.session.add(self)
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
|
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import render, get_object_or_404, render_to_response
from .models import Organization, Location, Item
from django.views.generic import ListView, CreateView, DetailView, UpdateView
from .forms import OrganizationForm, LocationForm, ItemForm
from django.http import HttpResponseRedirect, JsonResponse
from django.template.loader import render_to_string
from datetime import timezone
import datetime
def home(request):
#order_by orders the list by publication and the [:2] limits result to be displayed
recently_added_org = Organization.objects.order_by('-timestamp')[:4]
recently_added_loc = Location.objects.order_by('-timestamp')[:4]
recently_added_item = Item.objects.order_by('-timestamp')[:4]
total_orgs = Organization.objects.all().count()
total_locations = Location.objects.all().count()
total_items = Item.objects.all().count()
context = {
'recently_added_org': recently_added_org,
'recently_added_loc': recently_added_loc,
'recently_added_item': recently_added_item,
'total_orgs': total_orgs,
'total_locations': total_locations,
'total_items': total_items,
}
return render(request,'home.html', context)
def org_list(request):
queryset = Organization.objects.all()
context = {
'object_list': queryset,
'title': "Organization"
}
return render(request, 'catalog/organization_list.html',context)
def save_all(request,form,template_name):
data = dict()
if request.method == 'POST':
if form.is_valid():
instance = form.save(commit=False) #acts like a pre_save
instance.user= request.user #instance.save()
instance.last_accessed=datetime.datetime.now()
form.save()
data['form_is_valid'] = True
queryset = Organization.objects.all()
data['org_list'] = render_to_string('catalog/organization_list_update.html',{'object_list':queryset} )
else:
data['form_is_valid'] = False
context = { 'form':form,'type':"organization"}
data['html_form'] = render_to_string(template_name,context,request=request)
return JsonResponse(data)
def org_create(request):
if request.method == 'POST':
form = OrganizationForm(request.POST)
else:
form = OrganizationForm()
return save_all(request,form,'forms/create.html')
def org_update(request,id):
org = get_object_or_404(Organization,id=id)
if request.method == 'POST':
form = OrganizationForm(request.POST,instance=org)
else:
form = OrganizationForm(instance=org)
return save_all(request,form,'forms/update.html')
def org_delete(request,id):
data = dict()
org = get_object_or_404(Organization,id=id)
if request.method == "POST":
org.delete()
data['form_is_valid'] = True
orgs = Organization.objects.all()
data['org_list'] = render_to_string('catalog/organization_list_update.html',{'object_list':orgs,"message":'Deleted Successfully'})
else:
context = {'org':org}
data['html_form'] = render_to_string('catalog/organization_delete.html',context,request=request)
return JsonResponse(data)
# Location CRUD
def location_list(request, org_id):
organization = Organization.objects.filter(id=org_id)[0]
location_list = organization.location_set.all()
item_count = Item.objects.filter(id=org_id).count()
return render(request, 'catalog/location_list.html', context={'location_list':location_list, 'organization':organization, 'item_count':item_count} )
def item_list(request, item_id):
location = Location.objects.filter(id=item_id)[0]
item_list = location.item_set.all()
return render(request, 'catalog/item_list.html', context={'item_list':item_list, 'location':location} )
def search(request, param):
if request.method == 'POST':
search = request.POST['search']
else:
search = ""
organizations = Organization.objects.filter(org_name__contains=search)
return render_to_response('forms/search_org.html',{'organizations': organizations})
def org_by_id(request):
return render_to_response('organization_list', Organization.objects.get(id=id))
def loc_list(request):
queryset = Location.objects.all()
context = {
'location_list': queryset,
'title': "Location",
'org_th': "Name of Organization"
}
return render(request, 'catalog/location_list.html',context )
def save_all_loc(request,form,template_name):
data = dict()
if request.method == 'POST':
if form.is_valid():
instance = form.save(commit=False) #acts like a pre_save
instance.user= request.user #instance.save()
instance.last_accessed=datetime.datetime.now()
form.save()
data['form_is_valid'] = True
queryset = Location.objects.all()
data['loc_list'] = render_to_string('catalog/location_by_organization.html',{'location_list':queryset} )
else:
data['form_is_valid'] = False
context = { 'form':form,'type':"Location"}
data['html_form'] = render_to_string(template_name,context,request=request)
return JsonResponse(data)
def loc_create(request):
if request.method == 'POST':
form = LocationForm(request.POST)
else:
form = LocationForm()
return save_all(request,form,'forms/loc_create.html')
def loc_update(request,id):
loc = get_object_or_404(Organization,id=id)
if request.method == 'POST':
form = LocationForm(request.POST,instance=org)
else:
form = LocationForm(instance=org)
return save_all(request,form,'forms/loc_update.html')
def loc_delete(request,id):
data = dict()
loc = get_object_or_404(Location,id=id)
if request.method == "POST":
loc.delete()
data['form_is_valid'] = True
location = Location.objects.all()
data['org_list'] = render_to_string('catalog/location_by_organization.html',{'object_list':orgs,"message":'Deleted Successfully'})
else:
context = {'Loc':location}
data['html_form'] = render_to_string('catalog/organization_delete.html',context,request=request)
return JsonResponse(data)
|
ans = 0
def is_palindrome(n):
s = str(n)
r = s[::-1]
if r == s:
return True
return False
for x in range(999, 901, -1):
for y in range(999, 901, -1):
if is_palindrome(x*y):
print(x*y)
exit()
|
# Program to convert temperature to celsius from faremheit
f = int(input('Enter Temperature In Farenheit : '))
c = (f-32)*5/9
print('Temperature In Celsius Is', c) |
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
# from phonenumber_field.modelfields import PhoneNumberField
user_type= [
('prime', 'PRIME'),
('non prime', 'NON PRIME'),
]
class UserRegisterForm(UserCreationForm):
email= forms.EmailField()
phone= forms.CharField()
user_type= forms.CharField(label='user_type',
widget=forms.Select(choices=user_type), initial="non prime", disabled=True)
class Meta:
model = User
fields = ['username' , 'email' , 'phone' , 'password1', 'password2', 'user_type']
widgets = {
'user_type': forms.TextInput(attrs={'readonly': 'readonly'}),
} |
import config
from modle import dzzhkl_molde
import json, logging
import requests
# 电子账户开立
def dzzhkl(req):
data = dzzhkl_molde.dzzhkl_modle(req)
header = config.header
url = config.url['jinjian_url']
req = requests.post(url, headers=header, data=json.dumps(data))
if req.status_code == 200:
req_dict = json.loads(req.text)
if req_dict['header']['status_code'] == 200:
logging.warning('开立电子账户成功')
return req.text
else:
logging.warning('开立电子账户,请求:{},返回:{}'.format(url, req.text))
exit()
else:
logging.warning('开立电子账户,请求:{},返回:{}'.format(url, req.text))
exit()
|
ciphertext = input("enter ciphertext: ")
for i in range(0, len(ciphertext), 2):
print(ciphertext[i:i+2], end=" ") |
# (c) Copyright 2018 SUSE LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from mock import MagicMock
from mock import mock
from mock import patch
import networking_vsphere
from networking_vsphere.common import constants as const
from networking_vsphere.utils import vim_objects
from oslotest import base
class TestBase(base.BaseTestCase):
def setUp(self):
super(TestBase, self).setUp()
self.addCleanup(patch.stopall)
self.connection_patcher = patch(
'networking_vsphere.utils.vim_objects.api.VMwareAPISession')
self.mocked_session = self.connection_patcher.start()
session_instance = MagicMock()
session_instance.invoke_api.return_value = [MagicMock(), MagicMock()]
self.mocked_session.return_value = session_instance
class TestVcenterProxy(TestBase):
def setUp(self):
super(TestVcenterProxy, self).setUp()
self.sut = vim_objects.VcenterProxy(name='test_dvs',
vcenter_user="username",
vcenter_ip='127.0.0.1',
vcenter_port=443,
vcenter_password='test'
)
self.sut.connect_to_vcenter()
def test_connect_to_vcenter(self):
self.assertIsNotNone(self.sut.cf)
def test_get_type(self):
self.sut.connect_to_vcenter()
self.sut.get_type('fake_type')
self.sut.cf.create.called_with('ns0:fake_type')
def test_get_all_objects_of_type(self):
self.assertIsNotNone(self.sut.get_all_objects_of_type('some_type'))
self.sut.session.invoke_api.assert_called_with(
vim_objects.vim_util,
'get_objects',
self.sut.session.vim,
'some_type',
const.VIM_MAX_OBJETS
)
def test_get_vcenter_hosts(self):
self.assertIsNotNone(self.sut.get_hosts())
self.sut.session.invoke_api.assert_called_with(
vim_objects.vim_util,
'get_objects',
self.sut.session.vim,
'HostSystem',
const.VIM_MAX_OBJETS
)
class TestDistributedVirtualSwitch(TestBase):
def setUp(self):
super(TestDistributedVirtualSwitch, self).setUp()
self.sut = vim_objects.DistributedVirtualSwitch(
'test_dvs',
vcenter_ip='127.0.0.1',
vcenter_port=443,
vcenter_password='test',
host_names=[],
pnic_devices=[
'vmnic1',
'vmnic2']
)
self.sut.host_names = ['HostSystem1', 'HostSystem2']
self.sut.connect_to_vcenter()
@mock.patch.object(networking_vsphere.utils.vim_objects.VcenterProxy,
'get_type')
@mock.patch.object(networking_vsphere.utils.vim_objects.VcenterProxy,
'get_used_pnics_keys_in_host')
@mock.patch.object(networking_vsphere.utils.vim_objects.VcenterProxy,
'get_all_pnic_keys_in_host')
@mock.patch.object(networking_vsphere.utils.vim_objects.VcenterProxy,
'get_mob_by_name')
def test_create_spec(self, mocked_get_by_name, mocked_all_keys,
mocked_used_keys,
mocked_get_type):
key1 = 'key-vim.host.PhysicalNic-vmnic1'
key2 = 'key-vim.host.PhysicalNic-vmnic2'
key3 = 'key-vim.host.PhysicalNic-vmnic3'
mocked_all_keys.return_value = {key1, key2, key3}
mocked_used_keys.return_value = {key2}
mocked_result = MagicMock()
mocked_get_type.return_value = mocked_result
mocked_host = MagicMock()
mocked_host.obj = MagicMock()
mocked_get_by_name.return_value = MagicMock()
self.sut.hosts = [mocked_host]
self.assertEqual(self.sut.create_spec, mocked_result)
for _type in [
'DVSCreateSpec',
'DistributedVirtualSwitchProductSpec',
'VMwareDVSConfigSpec',
'ConfigSpecOperation',
'DistributedVirtualSwitchHostMemberPnicBacking',
'DistributedVirtualSwitchHostMemberPnicSpec',
'DVSNameArrayUplinkPortPolicy'
]:
mocked_get_type.assert_any_call(_type)
@mock.patch.object(networking_vsphere.utils.vim_objects.VcenterProxy,
'get_type')
@mock.patch.object(networking_vsphere.utils.vim_objects.VcenterProxy,
'get_used_pnics_keys_in_host')
@mock.patch.object(networking_vsphere.utils.vim_objects.VcenterProxy,
'get_all_pnic_keys_in_host')
@mock.patch.object(networking_vsphere.utils.vim_objects.VcenterProxy,
'get_mob_by_name')
def test_config_spec(self, mocked_get_mob_by_name, mocked_all_keys,
mocked_used_keys,
mocked_get_type):
key1 = 'key-vim.host.PhysicalNic-vmnic1'
key2 = 'key-vim.host.PhysicalNic-vmnic2'
key3 = 'key-vim.host.PhysicalNic-vmnic3'
mocked_all_keys.return_value = {key1, key2, key3}
mocked_used_keys.return_value = {key2}
mocked_result = MagicMock()
mocked_get_type.return_value = mocked_result
mocked_host = MagicMock()
mocked_host.obj = MagicMock()
mocked_get_mob_by_name.return_value = MagicMock()
self.sut.hosts = [mocked_host]
spec = self.sut.config_spec
self.assertEqual(spec, mocked_result)
for _type in [
'VMwareDVSConfigSpec',
'ConfigSpecOperation',
'DistributedVirtualSwitchHostMemberPnicBacking',
'DistributedVirtualSwitchHostMemberPnicSpec',
'DVSNameArrayUplinkPortPolicy'
]:
mocked_get_type.assert_any_call(_type)
self.assertEqual(spec.name, self.sut.name)
self.assertEqual(spec.description, self.sut.description)
self.assertEqual(spec.maxPorts, self.sut.max_ports)
self.assertEqual(spec.maxMtu, self.sut.max_mtu)
@mock.patch.object(networking_vsphere.utils.vim_objects.VcenterProxy,
'get_type')
@mock.patch.object(networking_vsphere.utils.vim_objects.VcenterProxy,
'get_used_pnics_keys_in_host')
@mock.patch.object(networking_vsphere.utils.vim_objects.VcenterProxy,
'get_all_pnic_keys_in_host')
def test_list_of_member_hosts_specs(self, mocked_all_keys,
mocked_used_keys,
mocked_get_type):
key1 = 'key-vim.host.PhysicalNic-vmnic1'
key2 = 'key-vim.host.PhysicalNic-vmnic2'
key3 = 'key-vim.host.PhysicalNic-vmnic3'
mocked_all_keys.return_value = {key1, key2, key3}
mocked_used_keys.return_value = {key2}
mocked_result = MagicMock()
mocked_host = MagicMock()
mocked_host.obj = MagicMock()
self.sut.hosts = [mocked_host]
mocked_get_type.return_value = mocked_result
results = self.sut.list_of_host_member_config_specs
self.assertEqual(len(results),
len(self.sut.hosts)
)
for _type in [
'ConfigSpecOperation',
'DistributedVirtualSwitchHostMemberPnicBacking',
'DistributedVirtualSwitchHostMemberPnicSpec',
]:
mocked_get_type.assert_any_call(_type)
@mock.patch.object(networking_vsphere.utils.vim_objects.VcenterProxy,
'get_type')
def test_host_member_pnic_backing(self, mocked_get_type):
mocked_result = MagicMock()
mocked_result.__len__.return_value = 1
mocked_get_type.return_value = mocked_result
results = self.sut.host_member_pnic_backing(['vmnic1'])
for _type in [
'DistributedVirtualSwitchHostMemberPnicBacking',
'DistributedVirtualSwitchHostMemberPnicSpec',
]:
mocked_get_type.assert_any_call(_type)
self.assertEqual(len(results), 1)
@mock.patch.object(networking_vsphere.utils.vim_objects.VcenterProxy,
'get_type')
def test_host_member_pnic_spec(self, mocked_get_type):
mocked_result = MagicMock()
mocked_result.__len__.return_value = 1
mocked_get_type.return_value = mocked_result
results = self.sut.host_member_pnic_spec(['vmnic1'])
self.assertEqual(len(results), 1)
mocked_get_type.assert_any_call(
'DistributedVirtualSwitchHostMemberPnicSpec')
@mock.patch.object(networking_vsphere.utils.vim_objects.VcenterProxy,
'get_type')
def test_uplink_port_policy(self, mocked_get_type):
mocked_result = MagicMock()
mocked_result.__len__.return_value = 1
mocked_get_type.return_value = mocked_result
results = self.sut.uplink_port_policy
self.assertEqual(len(results), 1)
mocked_get_type.assert_any_call('DVSNameArrayUplinkPortPolicy')
self.assertEqual(len(results.uplinkPortName),
len(self.sut.pnic_keys))
def test_uplink_port_names(self):
self.assertEqual(self.sut.uplink_port_names,
['dvUplink0', 'dvUplink1'])
self.sut.pnic_keys = []
self.assertEqual(self.sut.uplink_port_names, ['dvUplink'])
@mock.patch.object(networking_vsphere.utils.vim_objects.VcenterProxy,
'get_mob_by_name')
def test_datacenter(self, mocked_get_mob_by_name):
self.assertIsNotNone(self.sut.datacenter)
mocked_get_mob_by_name.assert_called_with('Datacenter',
self.sut.datacenter_name)
class TestDVSPortGroup(TestBase):
def setUp(self):
super(TestDVSPortGroup, self).setUp()
self.sut = vim_objects.DVSPortGroup('test_dvs_pg',
vlan_type=None,
vlan_id=None,
vlan_range_start=0,
vlan_range_end=4094,
dvs_name=None,
nic_teaming=None,
description=None,
allow_promiscuous=False,
forged_transmits=False,
auto_expand=True
)
@mock.patch.object(networking_vsphere.utils.vim_objects.VcenterProxy,
'get_type')
def test_config_spec(self, mocked_get_type):
mocked_result = MagicMock()
mocked_get_type.return_value = mocked_result
self.assertEqual(self.sut.config_spec, mocked_result)
for _type in [
'DVPortgroupConfigSpec',
'DistributedVirtualPortgroupPortgroupType',
'VMwareDVSPortSetting',
'DVSSecurityPolicy',
'BoolPolicy',
'VmwareUplinkPortTeamingPolicy',
'DVSFailureCriteria',
'VMwareUplinkPortOrderPolicy'
]:
mocked_get_type.assert_any_call(_type)
@mock.patch.object(networking_vsphere.utils.vim_objects.VcenterProxy,
'get_type')
def test_dvs_port_settings(self, mocked_get_type):
mocked_result = MagicMock()
mocked_get_type.return_value = mocked_result
self.assertEqual(self.sut.dvs_port_settings, mocked_result)
for _type in [
'VMwareDVSPortSetting',
'DVSSecurityPolicy',
'BoolPolicy',
'VmwareUplinkPortTeamingPolicy',
'DVSFailureCriteria',
'VMwareUplinkPortOrderPolicy'
]:
mocked_get_type.assert_any_call(_type)
@mock.patch.object(networking_vsphere.utils.vim_objects.VcenterProxy,
'get_type')
def test_vlan_spec(self, mocked_get_type):
mocked_result = MagicMock()
mocked_result.__len__.return_value = 1
mocked_get_type.return_value = mocked_result
self.sut.vlan_type = None
self.assertEqual(len(self.sut.vlan_spec), 1)
mocked_get_type.assert_called_with(
'VmwareDistributedVirtualSwitchVlanIdSpec')
self.sut.vlan_type = 'vlan'
self.assertEqual(len(self.sut.vlan_spec), 1)
mocked_get_type.assert_called_with(
'VmwareDistributedVirtualSwitchVlanIdSpec')
self.sut.vlan_type = 'trunk'
self.assertEqual(len(self.sut.vlan_spec), 1)
mocked_get_type.assert_called_with(
'VmwareDistributedVirtualSwitchTrunkVlanSpec')
@mock.patch.object(networking_vsphere.utils.vim_objects.VcenterProxy,
'get_type')
def test_vlan_spec_id(self, mocked_get_type):
mocked_result = MagicMock()
mocked_get_type.return_value = mocked_result
self.sut.vlan_type = None
self.sut.vlanid = None
self.assertEqual(self.sut.vlan_spec_id, 0)
mocked_get_type.assert_not_called()
mocked_get_type.reset_mock()
self.sut.vlan_type = 'vlan'
self.sut.vlan_id = None
self.assertEqual(self.sut.vlan_spec_id, 0)
mocked_get_type.assert_not_called()
mocked_get_type.reset_mock()
self.sut.vlan_type = 'trunk'
self.sut.vlan_id = None
results = self.sut.vlan_spec_id
mocked_get_type.assert_called_with('NumericRange')
self.assertEqual(results.start,
self.sut.vlan_range_start)
self.assertEqual(results.end,
self.sut.vlan_range_end)
mocked_get_type.reset_mock()
self.sut.vlan_type = None
self.sut.vlan_id = 1
self.assertEqual(self.sut.vlan_spec_id, 1)
mocked_get_type.assert_not_called()
mocked_get_type.reset_mock()
self.sut.vlan_type = 'vlan'
self.sut.vlan_id = 1
self.assertEqual(self.sut.vlan_spec_id, 1)
mocked_get_type.assert_not_called()
mocked_get_type.reset_mock()
self.sut.vlan_type = 'trunk'
self.sut.vlan_id = 1
results = self.sut.vlan_spec_id
mocked_get_type.assert_called_with('NumericRange')
self.assertEqual(results.start,
self.sut.vlan_range_start)
self.assertEqual(results.end,
self.sut.vlan_range_end)
mocked_get_type.reset_mock()
@mock.patch.object(networking_vsphere.utils.vim_objects.VcenterProxy,
'get_type')
def test_security_policy(self, mocked_get_type):
mocked_result = MagicMock()
mocked_get_type.return_value = mocked_result
results = self.sut.security_policy
self.assertEqual(results, mocked_result)
for _type in [
'DVSSecurityPolicy',
'BoolPolicy'
]:
mocked_get_type.assert_any_call(_type)
self.assertEqual(results.allowPromiscuous.value,
self.sut.allow_promiscuous)
self.assertEqual(results.forgedTransmits.value,
self.sut.forged_transmits)
@mock.patch.object(networking_vsphere.utils.vim_objects.VcenterProxy,
'get_type')
def test_uplink_teaming_policy(self, mocked_get_type):
mocked_result = MagicMock()
mocked_get_type.return_value = mocked_result
self.assertEqual(len(self.sut.uplink_teaming_policy.
uplinkPortOrder.activeUplinkPort), 0)
self.sut.nic_teaming['active_nics'] = ['vmnic1', 'vmnic2']
results = self.sut.uplink_teaming_policy
self.assertEqual(results, mocked_result)
for _type in [
'VmwareUplinkPortTeamingPolicy',
'BoolPolicy',
'DVSFailureCriteria',
'VMwareUplinkPortOrderPolicy'
]:
mocked_get_type.assert_any_call(_type)
self.assertEqual(len(results.uplinkPortOrder.activeUplinkPort), 2)
self.assertEqual(results.policy.value,
self.sut.nic_teaming['load_balancing'])
self.assertEqual(results.failureCriteria.checkBeacon.value,
self.sut.nic_teaming['network_failover_detection'])
self.assertEqual(results.notifySwitches.value,
self.sut.nic_teaming['notify_switches'])
|
import networkx as nx
from typing import Optional
from bokeh.io import output_file, show
from bokeh.models import (
BoxSelectTool,
Circle,
NodesOnly,
EdgesAndLinkedNodes,
HoverTool,
MultiLine,
NodesAndLinkedEdges,
Plot,
Range1d,
TapTool,
BoxZoomTool,
ResetTool,
WheelZoomTool,
)
from bokeh.palettes import Spectral4
from bokeh.plotting import from_networkx
def build_plot(term: str, g: nx.Graph, fpath: Optional[str] = None):
plot = Plot(
plot_width=1200,
plot_height=800,
x_range=Range1d(-1.1, 1.1),
y_range=Range1d(-1.1, 1.1),
)
plot.title.text = f""""{term}" google vs. graph"""
plot.add_tools(TapTool(), BoxSelectTool())
graph_renderer = from_networkx(g, nx.spring_layout, scale=1, center=(0, 0))
graph_renderer.node_renderer.glyph = Circle(
size="total_weight",
fill_color=Spectral4[0]
)
graph_renderer.node_renderer.selection_glyph = Circle(
size=15, fill_color=Spectral4[2]
)
graph_renderer.node_renderer.hover_glyph = Circle(size=15, fill_color=Spectral4[1])
graph_renderer.edge_renderer.glyph = MultiLine(
line_color="#CCCCCC", line_alpha=0.8, line_width="weight"
)
graph_renderer.edge_renderer.selection_glyph = MultiLine(
line_color=Spectral4[2], line_width=5
)
graph_renderer.edge_renderer.hover_glyph = MultiLine(
line_color=Spectral4[1], line_width=5
)
graph_renderer.selection_policy = NodesAndLinkedEdges()
graph_renderer.inspection_policy = NodesAndLinkedEdges()
plot.renderers.append(graph_renderer)
node_hover_tool = HoverTool(tooltips=[("name", "@name")])
plot.add_tools(
node_hover_tool, BoxZoomTool(), ResetTool(), TapTool(), WheelZoomTool()
)
if not fpath:
fpath = f"vs_graph_{term.replace(' ', '_').replace('/', '-')}.html"
output_file(fpath)
show(plot)
|
import torch
from torch import Tensor, nn as nn
def multiclass_hinge_loss(outputs: Tensor, targets: Tensor, margin=1., reduction='mean', device='cpu') -> Tensor:
assert outputs.shape[0] == targets.shape[0]
batch_size = outputs.shape[0]
num_classes = outputs.shape[1]
# TODO to be revisited when PyTorch implement https://www.tensorflow.org/api_docs/python/tf/map_fn
loss = torch.tensor(0., device=device)
for x, y in zip(outputs, targets):
loss += (torch.relu(margin + x - x[y]).sum() - margin)
loss /= num_classes
if reduction == 'mean':
loss /= batch_size
return loss
def simplex_projection(v: Tensor) -> Tensor:
# Performs projection of vector v (in R^n) onto the n-dimensional simplex.
x = v.clone().flatten()
n = x.size(0)
if n != v.nelement():
raise ValueError(f"v must be have a shape of (n, 1) or (n, ) and not {v.shape}")
sorted_x = torch.sort(x, dim=0, descending=True)[0]
elem_sum = 0
delta = 0
for i in range(n):
elem_sum += sorted_x[i]
delta = (1 - elem_sum) / (i + 1)
if i + 1 == n or -delta >= sorted_x[i + 1]:
break
x += delta
x[torch.lt(x, 0)] = 0
x = x.view_as(v)
return x
def calc_lip_const(A: Tensor, beta: float):
# inner minimization utility function
# TODO This is a non-elegant bypass
try:
_, s, _ = A.svd()
L = ((max(s).item()) ** 2) / beta
except RuntimeError: # torch SVD may not find a solution
L = 1e-5
if L == 0:
L = 1e-5
return L
def flatten_params(params, flatten_grad=False):
"""
some description
:param params: model params
:type params:
:param flatten_grad: Whether or not to flatten the gradients of the parameters
:type flatten_grad: bool
:return:
:rtype: Tensor
"""
if flatten_grad:
return torch.cat([param.grad.flatten() for param in params])
else:
return torch.cat([param.flatten() for param in params])
def reshape_params(param_groups: dict, params_flattened):
reshaped_params = []
total_elements = 0
for param in param_groups['params']:
reshaped_params.append(params_flattened[total_elements:total_elements + param.nelement()])
reshaped_params[-1] = reshaped_params[-1].view_as(param)
total_elements += param.nelement()
return reshaped_params
def g_i_y_hat(output: Tensor, y_true: Tensor, y_hat: int): # batch compatible version
"""
:param output:
:type output:
:param y_true: y_true "target"
:type y_true:
:param y_hat:
:type y_hat:
:return:
:rtype:
"""
zero_one_loss = (~torch.eq(output.argmax(-1), y_true)).float().squeeze(0)
loss = torch.tensor(0., device='cuda')
for idx, y in enumerate(y_true):
loss += zero_one_loss[idx] + output[idx][y_hat] - output[idx][y]
return loss / len(output)
class MulticlassHingeLoss(nn.Module):
def __init__(self, margin=1., reduction='mean', device='cuda'):
super(MulticlassHingeLoss, self).__init__()
self.margin = margin
self.device = device
if reduction == 'mean' or reduction == 'sum':
self.reduction = reduction
else:
raise RuntimeError(f'Unsupported reduction: "{reduction}"')
def forward(self, inputs: Tensor, targets: Tensor) -> Tensor:
return multiclass_hinge_loss(inputs, targets, self.margin, self.reduction, self.device)
|
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0' # '3,2' #'3,2,1,0'
from data_util import *
# from unet_5_scale_more_aug.model import SaltNet as Net
# from resnet34.model_resnet34_bn import SaltNet as Net
from seresnet50.model_se_resnext50_bn import SeResNeXt50Unet as Net
# from resnet_aug0.model import SaltNet as Net
SIZE = 101
PAD_1 = 13
PAD_2 = 14
Y0, Y1, X0, X1 = PAD_1, PAD_1 + SIZE, PAD_1, PAD_1 + SIZE,
## global setting ############################################################
out_dir = \
'E:\\DHWorkStation\\Project\\tgs_pytorch\\output\\seresnext50_bn\\fold1\\sallow\\'
initial_checkpoint = \
'E:\\DHWorkStation\\Project\\tgs_pytorch\\output\\seresnext50_bn\\swa_test\\checkpoint\\swa_00065600_loss_0.224_model.pth'
# out_dir + '/resnet34/checkpoint/00018000_model.pth'
# '/root/share/project/kaggle/tgs/results/simple-004-d/checkpoint/00032000_model.pth'
# '/root/share/project/kaggle/tgs/results/simple-004-b/checkpoint/00016000_model.pth'
# '/root/share/project/kaggle/tgs/results/simple-002-02-xx/checkpoint/00014000_model.pth'
# split, mode = 'valid_400_1_origin', 'valid'
split, mode = 'test_18000', 'test'
# #augment = 'flip'
# augment = 'null'
# #augment = 'intensity'
# #augment = 'intensity-flip'
#
def augment_flip(image, mask, index):
cache = Struct(image=image.copy(), mask=mask.copy())
if mask == []:
image = do_horizontal_flip(image)
# image = do_center_pad_to_factor(image, factor=32)
# image = cv2.resize(image, dsize=(SIZE, SIZE))
image = do_center_pad(image, PAD_1, PAD_2)
else:
image, mask = do_horizontal_flip2(image, mask)
# image, mask = do_resize2(image, mask, SIZE, SIZE)
image, mask = do_center_pad2(image, mask, PAD_1, PAD_2)
# image, mask = do_center_pad_to_factor2(image, mask, factor=32)
return image, mask, index, cache
def unaugment_flip(prob):
# dy0, dy1, dx0, dx1 = compute_center_pad(IMAGE_HEIGHT, IMAGE_WIDTH, factor=32)
# prob = prob[:, dy0:dy0 + IMAGE_HEIGHT, dx0:dx0 + IMAGE_WIDTH]
res = []
for p in prob:
p = p[Y0:Y1, X0:X1]
p = p[:, ::-1]
# p = cv2.resize(p, (101, 101))
res.append(p)
res = np.array(res)
# prob = prob[:, Y0:Y1, X0:X1]
# prob = prob[:,:, ::-1]
return res
# ---------------------
# augment == 'null' :
def augment_null(image, mask, index):
cache = Struct(image=image.copy(), mask=mask.copy())
if mask == []:
# image = cv2.resize(image, dsize=(SIZE, SIZE))
image = do_center_pad(image, PAD_1, PAD_2)
# image = do_center_pad_to_factor(image, factor=32)
else:
# image, mask = do_resize2(image, mask, SIZE, SIZE)
image, mask = do_center_pad2(image, mask, PAD_1, PAD_2)
# image, mask = do_center_pad_to_factor2(image, mask, factor=32)
return image, mask, index, cache
def unaugment_null(prob):
res = []
for p in prob:
p = p[Y0:Y1, X0:X1]
p = cv2.resize(p, (101, 101))
res.append(p)
res = np.array(res)
# dy0, dy1, dx0, dx1 = compute_center_pad(IMAGE_HEIGHT, IMAGE_WIDTH, factor=32)
# prob = prob[:, dy0:dy0 + IMAGE_HEIGHT, dx0:dx0 + IMAGE_WIDTH]
return res
def run_predict(augment):
if augment == 'null':
test_augment = augment_null
test_unaugment = unaugment_null
if augment == 'flip':
test_augment = augment_flip
test_unaugment = unaugment_flip
# ....................................................
## setup -----------------
os.makedirs(out_dir + '/test/' + split, exist_ok=True)
os.makedirs(out_dir + '/backup', exist_ok=True)
# backup_project_as_zip(PROJECT_PATH, out_dir +'/backup/code.test.%s.zip'%IDENTIFIER)
log = Logger()
log.open(out_dir + '/log.submit.txt', mode='a')
log.write('\n--- [START %s] %s\n\n' % (IDENTIFIER, '-' * 64))
log.write('\tSEED = %u\n' % SEED)
log.write('\tPROJECT_PATH = %s\n' % PROJECT_PATH)
log.write('\tout_dir = %s\n' % out_dir)
log.write('\n')
## dataset ----------------------------------------
log.write('** dataset setting **\n')
batch_size = 32
test_dataset = TsgDataset(split, test_augment, mode)
test_loader = DataLoader(
test_dataset,
sampler=SequentialSampler(test_dataset),
batch_size=batch_size,
drop_last=False,
num_workers=0,
pin_memory=True,
collate_fn=null_collate)
assert (len(test_dataset) >= batch_size)
log.write('batch_size = %d\n' % (batch_size))
log.write('\n')
## net ----------------------------------------
log.write('** net setting **\n')
net = Net().cuda()
if initial_checkpoint is not None:
log.write('\tinitial_checkpoint = %s\n' % initial_checkpoint)
net.load_state_dict(torch.load(initial_checkpoint, map_location=lambda storage, loc: storage))
log.write('%s\n\n' % (type(net)))
log.write('\n')
####### start here ##########################
all_prob = []
all_num = 0
all_loss = np.zeros(2, np.float32)
net.set_mode('test')
for input, truth, index, cache in test_loader:
# print(input.shape)
#
print('\r', all_num, end='', flush=True)
batch_size = len(index)
all_num += batch_size
input = input.cuda()
with torch.no_grad():
logit = net(input)
prob = F.sigmoid(logit)
if 0: ##for debug
truth = truth.cuda()
loss = net.criterion(logit, truth)
dice = net.metric(logit, truth)
all_loss += batch_size * np.array((loss.item(), dice.item(),))
##-----------------------------
prob = prob.squeeze().data.cpu().numpy()
prob = test_unaugment(prob)
all_prob.append(prob)
if 0: ##for debug
os.makedirs(out_dir + '/test/%s/%s' % (split, augment), exist_ok=True)
for b in range(batch_size):
name = test_dataset.ids[index[b]]
predict = prob[b]
image = cache[b].image * 255
truth = cache[b].mask
image = np.dstack([image, image, image])
overlay0 = draw_mask_overlay(predict, image, color=[0, 0, 255])
overlay0 = draw_mask_to_contour_overlay(predict, overlay0, 2, color=[0, 0, 255])
if truth == []:
overlay1 = np.zeros((101, 101, 3), np.float32)
else:
overlay1 = draw_mask_overlay(truth, image, color=[255, 0, 0])
overlay = np.hstack([image, overlay0, overlay1])
cv2.imwrite(out_dir + '/test/%s/%s/%s.png' % (split, augment, name), overlay * 255)
# image_show_norm('overlay',overlay,1,2)
image_show('overlay', overlay, 2)
cv2.waitKey(0)
print('\r', all_num, end='\n', flush=True)
all_prob = np.concatenate(all_prob)
# for thres in xrange(0.15,0.85,0.05):
all_prob = (all_prob * 255).astype(np.uint8)
np.save(out_dir + '/test/%s-%s.prob.uint8.npy' % (split, augment), all_prob)
print(all_prob.shape)
print('')
assert (all_num == len(test_loader.sampler))
all_loss = all_loss / all_num
print(all_loss)
log.write('\n')
def run_submit(augment,thres):
print('running submit')
if augment in ['null', 'flip']:
augmentation = [
1, out_dir + '/test/%s-%s.prob.uint8.npy' % (split, augment),
]
csv_file = out_dir + '/test/%s-%s.csv' % (split, augment)
if augment == 'aug2':
augmentation = [
1, out_dir + '/test/%s-%s.prob.uint8.npy' % (split, 'null'),
1, out_dir + '/test/%s-%s.prob.uint8.npy' % (split, 'flip'),
]
csv_file = out_dir + '/test/%s-%s.csv' % (split, augment)
##---------------------------------------
# augments, csv_file = ['null','flip'], '/submit1_simple-valid0-300-aug.csv.gz'
# augments, csv_file = ['flip'], '/submit1_simple-xxx-flip.csv.gz'
# augments, csv_file = ['null'], '/submit1_simple-xxx-null.csv.gz'
##---------------------------------------
# save
log_file = csv_file + '.log'
write_list_to_file(augmentation, log_file)
augmentation = np.array(augmentation, dtype=object).reshape(-1, 2)
num_augments = len(augmentation)
w, augment_file = augmentation[0]
all_prob = w * np.load(augment_file).astype(np.float32) / 255
all_w = w
for i in range(1, num_augments):
w, augment_file = augmentation[i]
prob = w * np.load(augment_file).astype(np.float32) / 255
all_prob += prob
all_w += w
all_prob /= all_w
all_prob = all_prob > thres
print(all_prob.shape)
# ----------------------------
split_file = 'E:\\DHWorkStation\\Project\\tgs_pytorch\\data/split/' + split
lines = read_list_from_file(split_file)
id = []
rle_mask = []
for n, line in enumerate(lines):
folder, name = line.split('/')
id.append(name)
if (all_prob[n].sum() <= 0):
encoding = ''
else:
encoding = run_length_encode(all_prob[n])
assert (encoding != [])
rle_mask.append(encoding)
df = pd.DataFrame({'id': id, 'rle_mask': rle_mask}).astype(str)
df.to_csv(csv_file, index=False, columns=['id', 'rle_mask'], encoding='utf-8')
print('submit done')
# csv_file = out_dir + '/submit1_iter20k-1.csv'
# df.to_csv(csv_file, index=False, columns=['id', 'rle_mask'])
############################################################################################
def run_local_leaderboard(augment):
# -----------------------------------------------------------------------
submit_file = out_dir + '/test/%s-%s.csv' % (split, augment)
dump_dir = out_dir + '/test/%s-%s-dump' % (split, augment)
os.makedirs(dump_dir, exist_ok=True)
log = Logger()
log.open(out_dir + '/test/log.submit.txt', mode='a')
split_file = 'E:\\DHWorkStation\\Project\\tgs_pytorch\\data/split/' + split
lines = read_list_from_file(split_file)
ids = [line.split('/')[-1] for line in lines]
sorted(ids)
df_submit = pd.read_csv(submit_file).set_index('id')
df_submit = df_submit.fillna('')
df_truth = pd.read_csv('E:\\DHWorkStation\\Project\\tgs_pytorch\\data/train.csv').set_index('id')
df_truth = df_truth.loc[ids]
df_truth = df_truth.fillna('')
N = len(df_truth)
predict = np.zeros((N, 101, 101), np.bool)
truth = np.zeros((N, 101, 101), np.bool)
for n in range(N):
id = ids[n]
p = df_submit.loc[id].rle_mask
t = df_truth.loc[id].rle_mask
p = run_length_decode(p, H=101, W=101, fill_value=1).astype(np.bool)
t = run_length_decode(t, H=101, W=101, fill_value=1).astype(np.bool)
predict[n] = p
truth[n] = t
# if 0:
# image_p = predict[n].astype(np.uint8)*255
# image_t = truth[n] .astype(np.uint8)*255
# image_show('image_p', image_p,2)
# image_show('image_t', image_t,2)
# cv2.waitKey(0)
##--------------
### Threshold Optimizer
precision, result, threshold = do_kaggle_metric(predict, truth, threshold=0.5)
precision_mean = precision.mean()
tp, fp, fn, tn_empty, fp_empty = result.transpose(1, 2, 0).sum(2)
all = tp + fp + fn + tn_empty + fp_empty
p = (tp + tn_empty) / (tp + tn_empty + fp + fp_empty + fn)
log.write('\n')
log.write(' | | | empty | \n')
log.write('th | prec | tp fp fn | tn fp | \n')
log.write('-------------------------------------------------------------------------------------------\n')
for i, t in enumerate(threshold):
log.write(
'%0.2f | %0.2f | %3d / %0.2f %3d / %0.2f %3d / %0.2f | %3d / %0.2f %3d / %0.2f | %5d\n' % (
t, p[i],
tp[i], tp[i] / all[i],
fp[i], fp[i] / all[i],
fn[i], fn[i] / all[i],
tn_empty[i], tn_empty[i] / all[i],
fp_empty[i], fp_empty[i] / all[i],
all[i])
)
log.write('\n')
log.write('num images : %d\n' % N)
log.write('LB score : %0.5f\n' % (precision_mean))
# --------------------------------------
predict = predict.reshape(N, -1)
truth = truth.reshape(N, -1)
p = predict > 0.5
t = truth > 0.5
intersection = t & p
union = t | p
# iou = intersection.sum(1)/(union.sum(1)+EPS)
log.write('iou : %0.5f\n' % (intersection.sum() / (union.sum() + EPS)))
return
# exit(0)
## show --------------------------
predicts = predict.reshape(-1, 101, 101).astype(np.float32)
truths = truth.reshape(-1, 101, 101).astype(np.float32)
for m, name in enumerate(ids):
print('%s' % name)
print(' | | | empty | ')
print('th | prec | tp fp fn | tn fp | ')
print('------------------------------------------------')
for i, t in enumerate(threshold):
tp, fp, fn, fp_empty, tn_empty = result[m, :, i]
p = (tp + tn_empty) / (tp + tn_empty + fp + fp_empty + fn)
print('%0.2f | %0.2f | %d %d %d | %d %d ' % (
t, p, tp, fp, fn, fp_empty, tn_empty))
print(precision[m])
print('')
# ----
image_file = '/root/share/project/kaggle/tgs/data/train/images/' + name + '.png'
image = cv2.imread(image_file, cv2.IMREAD_COLOR)
# mask = mask>0
predict = predicts[m]
truth = truths[m]
# print(predict.sum())
overlay0 = draw_mask_overlay(predict, image, color=[0, 0, 255])
overlay0 = draw_mask_to_contour_overlay(predict, overlay0, 1, color=[0, 0, 255])
overlay1 = draw_mask_overlay(truth, image, color=[0, 255, 0])
overlay1 = draw_mask_to_contour_overlay(truth, overlay1, 1, color=[0, 255, 0])
overlay2 = draw_mask_overlay(predict, None, color=[0, 0, 255])
overlay2 = draw_mask_overlay(truth, overlay2, color=[0, 255, 0])
draw_shadow_text(image, '%0.2f' % precision[m], (3, 15), 0.5, [255, 255, 255], 1)
overlay = np.hstack([image, overlay0, overlay1, overlay2])
cv2.imwrite(dump_dir + '/%s.png' % name, overlay)
image_show('overlay', overlay, 2)
cv2.waitKey(1)
if mode == 'valid':
for a in ['null', 'flip']:
print('a=', a)
run_predict(a)
#run_submit('aug2')
for t in np.arange(0.3,0.6,0.01):
print(t)
for a in [ 'aug2']:
print('a=', a)
run_submit(a,t)
run_local_leaderboard(a)
if mode == 'test':
run_predict('null')
run_predict('flip')
run_submit('aug2',0.51)
# run_local_leaderboard()
print('\nsucess!')
|
# region imports
import win32com.client
import openpyxl
from openpyxl.utils import coordinate_from_string, column_index_from_string
from openpyxl.worksheet import cell_range
import re
import tkinter as tk
# endregion
# region class imports
import Scraper
import Interface
from Interface import interface
# endregion
def main_program():
root = tk.Tk()
app = interface(root)
# TODO: Set up file dialog to handle selecting filepath
msg = Scraper.open_email(
r'C:\Users\Couch\Desktop\TimesheetReader\test.msg')
# Load Excel workbook
path = app.browse_file_dialog()
wb = openpyxl.load_workbook(path)
sheet = wb.active
# Amount of cells (Start - Break - Finish = 3) for each day (7); 3*7days = 21
MAX_CELL_COUNT = len(sheet['D5':'F11']*3)
# Get list of times from email
# TODO: Fix disgusting regex
regex = r'\d?\d?\:?\d?\d?\s\w\.\w\.|-'
times = Scraper.scrape_msg(msg, regex)
# Create new list to copy times to
# Append all elements as 0 to prefill data in Excel
days = []
for i in range(0, MAX_CELL_COUNT):
days.append(0)
times_index = 0
for i in range(0, MAX_CELL_COUNT):
if times_index < len(times):
days[times_index] = str(times[times_index])
times_index += 1
# Format times
days = Scraper.format_times(days)
Interface.print_status(
'Copying times to spreadsheet: {0} at path: {1}'.format(str(sheet), path))
# write days data to cells
i = 0
for rowOfCells in sheet['D5':'F11']:
for cell in rowOfCells:
cell.value = days[i]
i += 1
print('\tRow: {0} copied!'.format(str(rowOfCells)))
wb.save(path)
Interface.print_status("Completed\n{0}".format('='*100))
root.mainloop()
if __name__ == '__main__':
main_program() |
import numpy as np
import json
import os
import sys
import eval_helpers
def computeDist(gtFrames,prFrames):
assert(len(gtFrames) == len(prFrames))
nJoints = eval_helpers.Joint().count
distAll = {}
for pidx in range(nJoints):
distAll[pidx] = np.zeros([0,0])
for imgidx in range(len(gtFrames)):
# ground truth
gtFrame = gtFrames[imgidx]
# prediction
detFrame = prFrames[imgidx]
if (gtFrames[imgidx]["annorect"] is not None):
for ridx in range(len(gtFrames[imgidx]["annorect"])):
rectGT = gtFrames[imgidx]["annorect"][ridx]
rectPr = prFrames[imgidx]["annorect"][ridx]
if ("annopoints" in rectGT.keys() and rectGT["annopoints"] is not None):
pointsGT = rectGT["annopoints"][0]["point"]
pointsPr = rectPr["annopoints"][0]["point"]
for pidx in range(len(pointsGT)):
pointGT = [pointsGT[pidx]["x"][0],pointsGT[pidx]["y"][0]]
idxGT = pointsGT[pidx]["id"][0]
p = eval_helpers.getPointGTbyID(pointsPr,idxGT)
if (len(p) > 0 and
isinstance(p["x"][0], (int, float)) and
isinstance(p["y"][0], (int, float))):
pointPr = [p["x"][0],p["y"][0]]
# compute distance between GT and prediction
d = np.linalg.norm(np.subtract(pointGT,pointPr))
# compute head size for distance normalization
headSize = eval_helpers.getHeadSize(rectGT["x1"][0],rectGT["y1"][0],
rectGT["x2"][0],rectGT["y2"][0])
# normalize distance
dNorm = d/headSize
else:
dNorm = np.inf
distAll[idxGT] = np.append(distAll[idxGT],[[dNorm]])
return distAll
def computePCK(distAll,distThresh):
pckAll = np.zeros([len(distAll)+1,1])
nCorrect = 0
nTotal = 0
for pidx in range(len(distAll)):
idxs = np.argwhere(distAll[pidx] <= distThresh)
pck = 100.0*len(idxs)/len(distAll[pidx])
pckAll[pidx,0] = pck
nCorrect += len(idxs)
nTotal += len(distAll[pidx])
pckAll[len(distAll),0] = 100.0*nCorrect/nTotal
return pckAll
def evaluatePCKh(gtFramesAll,prFramesAll):
distThresh = 0.5
# compute distances
distAll = computeDist(gtFramesAll,prFramesAll)
# compute PCK metric
pckAll = computePCK(distAll,distThresh)
return pckAll
|
# Formulario de registro de Usuario usando Python y Tkinter
# Vamos a importar tkinter desde tkinter import *
#importar tkinter como tk
from tkinter import *
# creacion de sen_data para recuperar los datos que se guardan en las variables y paa que el boton de enviar informacion funcione
def send_data():
nombredeusuario_data =nombredeusuario.get()
contraseña_data =str(contraseña.get())
nombrecompleto_data =nombrecompleto.get()
edad_data =str(edad.get())
correoelectronico_data =correoelectronico.get()
#se emplea get que nos permite obtener el texto que contenga este campo, no lo de vuelve.
print(nombredeusuario_data, "\t",contraseña_data,"\t", nombrecompleto_data, "\t",edad_data,"\t", correoelectronico_data ) # se utilizo ("\t") para tabular y dar espacios en el formato de datos
#Creacion de un archivo de texto donde se guarda la informacion que agraga el usuario, se utiliza la txt que indica que sera de texto, y con open para crearlo.
nuevoarchivo = open ("REGISTRO.txt", "a")# a:append(nos permite agregar la informacion al final de lo que ya existe en el archivo)
nuevoarchivo.write(nombredeusuario_data)
nuevoarchivo.write("\t")
nuevoarchivo.write (contraseña_data)
nuevoarchivo.write("\t")
nuevoarchivo.write(nombrecompleto_data)
nuevoarchivo.write("\t")
nuevoarchivo.write(edad_data)
nuevoarchivo.write("\t")
nuevoarchivo.write(correoelectronico_data)
nuevoarchivo.write("\n")
# ("\n") nos permitira espaciar el texto para que se vea mas estetico.
nuevoarchivo.close()
print(" Nuevo Usuario Registrado. Nombre de Usuario: {} | Nombre Completo: {} ".format(nombredeusuario_data, nombrecompleto_data))
# se imprime por consola para verificar como se esta guardando en el registro de texto.
# Creacion de ingreso de datos del usuario, despues de entrar los datos con entry y enviarlos.Nos aseguaramos que las casillas se limpien con delete (0,END)
nombredeusuario_entry.delete(0,END)
contraseña_entry.delete(0,END)
nombrecompleto_entry.delete(0,END)
edad_entry.delete(0,END)
correoelectronico_entry.delete(0,END)
# delete limpiara los index y (0,END) permitira que el recorrido de borrado sea desde cero hasta el final en ese campo. se puede verificar ingresando al registro y vemos quqe se agrago bien.
# Creacion y diseño de la ventana principal.
miventana=Tk() # TK SE LLAMA LA LIBRERIA QUE UTLIZAMOS
miventana.geometry("650x550")# geometry se utliza para el tamaño o dimension de la ventana.
miventana.title("Registro de Usuario")
miventana.resizable(False,False)#resizable para ampliar el boton de la ventana
miventana.config(background="lightsteelblue")# config el color de fondo de nuestra ventana.
titulo_principal = Label(text="Registro de Usuario | UbicuiLAB", font=("Arial" ,15), bg="steelblue", fg="white", width="550", height="2")
titulo_principal.pack() # pack sirve para que el titulo se posicione en la parte superior.
# se utiliza Label para que nos servira para definir el campo de texto de cada uno de nuestros variables de nuestro formulario.
nombredeusuario_label= Label(text="Nombre de Usuario", bg="lightsteelblue")
nombredeusuario_label.place(x=270, y=70)
contraseña_label= Label(text="Contraseña", bg="lightsteelblue")
contraseña_label.place(x=298, y=140)
nombrecompleto_label= Label(text="Nombre Completo", bg="lightsteelblue")
nombrecompleto_label.place(x=276, y=220)
edad_label= Label(text="Edad",bg="lightsteelblue")
edad_label.place(x=312, y=300)
Correoelectronico_label= Label(text="Correo Electronico", bg="lightsteelblue")
Correoelectronico_label.place(x=274, y=380)
# place se utiliza para posicionar ese texto dentro de nuestra ventana.
#Creacion de variables del Registro de Usuario
#DECLARAMOS "stringVar" como tipo de dato para lo siguiente. esta provine de LA LIBRERIA TK.
nombredeusuario = StringVar()
contraseña = StringVar()
nombrecompleto= StringVar()
edad = StringVar()
correoelectronico = StringVar()
#Es importante crear las variables StringVar y en el campo de entrada de datos entry, estamos utilizando la sentencia textvariable, para asociar la entrada de datos se hace para cada uno de las variables creadas anteriormente.
nombredeusuario_entry = Entry(textvariable=nombredeusuario, width="40")
contraseña_entry = Entry(textvariable=contraseña, width="40", show="*")
nombrecompleto_entry = Entry(textvariable=nombrecompleto, width="40")
edad_entry = Entry(textvariable=edad, width="40")
correoelectronico_entry = Entry(textvariable=correoelectronico, width="40")
#Se utiliza en campo de entrada de datos de contraseña,la sentencia show* para que no se vea lo que se escriba en ese campo.
# Se continua posicionando con place ese texto dentro de nuestra ventana.
nombredeusuario_entry.place(x=160, y=108)
contraseña_entry.place(x=160, y=182)
nombrecompleto_entry.place(x=160, y=260)
edad_entry.place(x=160, y=340)
correoelectronico_entry.place(x=160, y=420)
# se crea un boton para que los datos que introduzca el usuario sean capturados, al hacer click en ese boton.
enviarinformacion_btn = Button(miventana, text= "Enviar Información", command= send_data, width= "30", height= "2", bg= "lightgrey")
# se utiliza la sentencia command para que indique donde queremos dirigirnos cuando hacemos click en el botón.
enviarinformacion_btn.place(x=190, y=470)
miventana.mainloop() #mainloop es el método que indica que la ventana esta lista para ejecutarse.
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 9 19:51:43 2018
@author: Matheus
"""
from rake_nltk import Rake
# função para ranquear as palavras do texto
def GetRelevanteKeyWords(text, quantity, language):
r = Rake(language=language)
r.extract_keywords_from_text(text)
r.get_ranked_phrases() # To get keyword phrases ranked highest to lowest.
r.get_word_frequency_distribution()
teste = r.get_word_degrees()
keyWordsSorted = sorted(teste.items(), key=lambda x: x[1], reverse = True)
keyWordsRelevante = [x[0] for x in keyWordsSorted[0:quantity]]
return {'keywords': keyWordsRelevante, 'result': keyWordsSorted}
from nltk import WordNetLemmatizer
from nltk import re
# função para extrair os tokens do texto
def tokenize(str):
# remove punctuation
tokens = re.findall(r"<a.*?/a>|<[^\>]*>|[\w'@#]+",
str.lower())
# lemmatize words. try both noun and verb lemmatizations
lmtzr = WordNetLemmatizer()
for i in range(0,len(tokens)):
res = lmtzr.lemmatize(tokens[i])
if res == tokens[i]:
tokens[i] = lmtzr.lemmatize(tokens[i], 'v')
else:
tokens[i] = res
return tokens
text = "cry The Project MATHEUS Matheus Gutenberg EBook of a a a a a a cry matheus Crime and Punishment, cry by MATHEUS Matheus Fyodor Dostoevsky\r\n"
tokenize(text)
keyWordsExtraction = GetRelevanteKeyWords(text, 5, "english") # portuguese or english
keyWordsExtraction['keywords']
keyWordsExtraction['result'] |
import sys,os
import multiprocessing
from tqdm import tqdm
from functools import reduce
import pickle
from shutil import rmtree
sys.path.append("..")
from util.utility import split_equal, preprocess_sentence
from edit_distance.substring_distance import Sub_dist
base_path = os.path.abspath(os.path.realpath(os.path.dirname(__file__)))
def distance_words(sentences: list, lev_handler: Sub_dist,store_path:str or None,pnum:int):
results = []
for sentence in tqdm(sentences):
tokens = preprocess_sentence(sentence)
results.append(lev_handler.get_sentence_hypothesis(tokens))
if store_path is not None:
with open(os.path.join(store_path,f"{pnum}.pkl"),"wb") as f:
pickle.dump(results,f)
del results
return None
return results
def multiprocess_prior(lev_handler: Sub_dist, sentences: list,store_path=None):
if store_path is not None:
if os.path.isdir(store_path):
rmtree(store_path)
os.makedirs(store_path)
processes = multiprocessing.cpu_count()-1
mult_args = list(zip(split_equal(sentences,processes),[lev_handler]*processes,[store_path]*processes,range(processes)))
with multiprocessing.Pool(processes=processes) as pool:
results = pool.starmap(distance_words, mult_args)
if store_path is None:
return reduce(lambda x, y: x+y, results) |
"Interface to Phantom"
from amuse.community import (
CodeInterface,
LegacyFunctionSpecification,
legacy_function,
LiteratureReferencesMixIn,
)
from amuse.community.interface.gd import (
GravitationalDynamicsInterface,
GravitationalDynamics,
# GravityFieldInterface,
GravityFieldCode,
)
from amuse.community.interface.stopping_conditions import (
StoppingConditionInterface,
StoppingConditions,
)
from amuse.units import units, generic_unit_system
from amuse.units.generic_unit_converter import ConvertBetweenGenericAndSiUnits
class PhantomInterface(
CodeInterface,
LiteratureReferencesMixIn,
GravitationalDynamicsInterface,
StoppingConditionInterface,
# SinglePointGravityFieldInterface,
):
"""
The Phantom Smoothed Particle Hydrodynamics code, by Daniel Price et al.
References:
.. [#] ADS:2018PASA...35...31P (Price et al., 2018, PASA, Volume 35, id.e031 82 pp)
"""
def __init__(self, **options):
CodeInterface.__init__(
self,
name_of_the_worker="phantom_worker",
**options)
LiteratureReferencesMixIn.__init__(self)
@legacy_function
def new_dm_particle():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter(
'index_of_the_particle', dtype='int32', direction=function.OUT,
)
for x in ['mass', 'x', 'y', 'z', 'vx', 'vy', 'vz']:
function.addParameter(x, dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
def new_particle(self, mass, x, y, z, vx, vy, vz):
return self.new_dm_particle(mass, x, y, z, vx, vy, vz)
@legacy_function
def new_sph_particle():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter(
'index_of_the_particle', dtype='int32', direction=function.OUT,
)
for x in ['mass', 'x', 'y', 'z', 'vx', 'vy', 'vz', 'u']:
function.addParameter(x, dtype='float64', direction=function.IN)
function.addParameter(
'h_smooth', dtype='float64', direction=function.IN, default=0.01,
)
function.result_type = 'int32'
return function
@legacy_function
def new_sink_particle():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter(
'index_of_the_particle', dtype='int32', direction=function.OUT,
)
for x in ['mass', 'x', 'y', 'z', 'vx', 'vy', 'vz']:
function.addParameter(x, dtype='float64', direction=function.IN)
function.addParameter(
'radius', dtype='float64', direction=function.IN, default=0.01,
# default should be h_acc
)
function.addParameter(
'h_smooth', dtype='float64', direction=function.IN, default=0.01,
# default should be h_smooth_sinksink?
)
function.result_type = 'int32'
return function
@legacy_function
def get_state_dm():
"""
Retrieve the current state of a DM particle. The mass, position and
velocity are returned.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter(
'index_of_the_particle', dtype='int32', direction=function.IN,
description="""Index of the particle to get the state from. This
index must have been returned by an earlier call to
:meth:`new_particle`""")
function.addParameter(
'mass', dtype='float64', direction=function.OUT,
description="The current mass of the particle")
function.addParameter(
'x', dtype='float64', direction=function.OUT,
description="The current position vector of the particle")
function.addParameter(
'y', dtype='float64', direction=function.OUT,
description="The current position vector of the particle")
function.addParameter(
'z', dtype='float64', direction=function.OUT,
description="The current position vector of the particle")
function.addParameter(
'vx', dtype='float64', direction=function.OUT,
description="The current velocity vector of the particle")
function.addParameter(
'vy', dtype='float64', direction=function.OUT,
description="The current velocity vector of the particle")
function.addParameter(
'vz', dtype='float64', direction=function.OUT,
description="The current velocity vector of the particle")
function.result_type = 'int32'
function.result_doc = """
0 - OK
particle was removed from the model
-1 - ERROR
particle could not be found
"""
return function
@legacy_function
def get_state_sink():
"""
Retrieve the current state of a sink particle. The mass, position and
velocity are returned.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter(
'index_of_the_particle', dtype='int32', direction=function.IN,
description="""Index of the particle to get the state from. This
index must have been returned by an earlier call to
:meth:`new_particle`""")
function.addParameter(
'mass', dtype='float64', direction=function.OUT,
description="The current mass of the particle")
function.addParameter(
'x', dtype='float64', direction=function.OUT,
description="The current position vector of the particle")
function.addParameter(
'y', dtype='float64', direction=function.OUT,
description="The current position vector of the particle")
function.addParameter(
'z', dtype='float64', direction=function.OUT,
description="The current position vector of the particle")
function.addParameter(
'vx', dtype='float64', direction=function.OUT,
description="The current velocity vector of the particle")
function.addParameter(
'vy', dtype='float64', direction=function.OUT,
description="The current velocity vector of the particle")
function.addParameter(
'vz', dtype='float64', direction=function.OUT,
description="The current velocity vector of the particle")
function.addParameter(
'radius', dtype='float64', direction=function.OUT,
description="The accretion radius of the particle")
function.addParameter(
'h_smooth', dtype='float64', direction=function.OUT,
description="The smoothing length of the particle")
function.result_type = 'int32'
function.result_doc = """
0 - OK
particle was removed from the model
-1 - ERROR
particle could not be found
"""
return function
def get_state(self, index_of_the_particle):
return self.get_state_dm(index_of_the_particle)
@legacy_function
def get_state_sph():
"""
Retrieve the current state of an SPH particle. The mass, position,
velocity, internal energy and smoothing length are returned.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter(
'index_of_the_particle', dtype='int32', direction=function.IN,
description="""Index of the particle to get the state from. This
index must have been returned by an earlier call to
:meth:`new_particle`""")
function.addParameter(
'mass', dtype='float64', direction=function.OUT,
description="The current mass of the particle")
function.addParameter(
'x', dtype='float64', direction=function.OUT,
description="The current position vector of the particle")
function.addParameter(
'y', dtype='float64', direction=function.OUT,
description="The current position vector of the particle")
function.addParameter(
'z', dtype='float64', direction=function.OUT,
description="The current position vector of the particle")
function.addParameter(
'vx', dtype='float64', direction=function.OUT,
description="The current velocity vector of the particle")
function.addParameter(
'vy', dtype='float64', direction=function.OUT,
description="The current velocity vector of the particle")
function.addParameter(
'vz', dtype='float64', direction=function.OUT,
description="The current velocity vector of the particle")
function.addParameter(
'u', dtype='float64', direction=function.OUT,
description="The current internal energy of the particle")
function.addParameter(
'h_smooth', dtype='float64', direction=function.OUT,
description="The current smoothing length of the particle")
function.result_type = 'int32'
function.result_doc = """
0 - OK
particle was removed from the model
-1 - ERROR
particle could not be found
"""
return function
@legacy_function
def set_state_sph():
"""
Update the current state of an SPH particle. The mass, position,
velocity, internal energy and smoothing length are updated.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter(
'index_of_the_particle', dtype='int32', direction=function.IN,
description="""Index of the particle for which the state is to be
updated. This index must have been returned by an earlier call to
:meth:`new_particle`""")
function.addParameter(
'mass', dtype='float64', direction=function.IN,
description="The new mass of the particle")
function.addParameter(
'x', dtype='float64', direction=function.IN,
description="The new position vector of the particle")
function.addParameter(
'y', dtype='float64', direction=function.IN,
description="The new position vector of the particle")
function.addParameter(
'z', dtype='float64', direction=function.IN,
description="The new position vector of the particle")
function.addParameter(
'vx', dtype='float64', direction=function.IN,
description="The new velocity vector of the particle")
function.addParameter(
'vy', dtype='float64', direction=function.IN,
description="The new velocity vector of the particle")
function.addParameter(
'vz', dtype='float64', direction=function.IN,
description="The new velocity vector of the particle")
function.addParameter(
'u', dtype='float64', direction=function.IN,
description="The new internal energy of the particle")
function.addParameter(
'h_smooth', dtype='float64', direction=function.IN,
description="The new smoothing length of the particle")
function.result_type = 'int32'
function.result_doc = """
0 - OK
particle was found in the model and the information was set
-1 - ERROR
particle could not be found
-2 - ERROR
code does not support updating of a particle
-3 - ERROR
not yet implemented
"""
return function
def set_state(
self, index_of_the_particle, mass, x, y, z, vx, vy, vz,
):
return self.set_state_dm(
index_of_the_particle, mass, x, y, z, vx, vy, vz)
@legacy_function
def set_state_dm():
"""
Update the current state of a DM particle. The mass, position and
velocity are updated.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter(
'index_of_the_particle', dtype='int32', direction=function.IN,
description="""Index of the particle for which the state is to be
updated. This index must have been returned by an earlier call to
:meth:`new_particle`""")
function.addParameter(
'mass', dtype='float64', direction=function.IN,
description="The new mass of the particle")
function.addParameter(
'x', dtype='float64', direction=function.IN,
description="The new position vector of the particle")
function.addParameter(
'y', dtype='float64', direction=function.IN,
description="The new position vector of the particle")
function.addParameter(
'z', dtype='float64', direction=function.IN,
description="The new position vector of the particle")
function.addParameter(
'vx', dtype='float64', direction=function.IN,
description="The new velocity vector of the particle")
function.addParameter(
'vy', dtype='float64', direction=function.IN,
description="The new velocity vector of the particle")
function.addParameter(
'vz', dtype='float64', direction=function.IN,
description="The new velocity vector of the particle")
# function.addParameter(
# 'radius', dtype='float64', direction=function.IN,
# description="The new softening length of the particle")
function.result_type = 'int32'
function.result_doc = """
0 - OK
particle was found in the model and the information was set
-1 - ERROR
particle could not be found
-2 - ERROR
code does not support updating of a particle
-3 - ERROR
not yet implemented
"""
return function
@legacy_function
def set_state_sink():
"""
Update the current state of a sink particle. The mass, position and
velocity are updated.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter(
'index_of_the_particle', dtype='int32', direction=function.IN,
description="""Index of the particle for which the state is to be
updated. This index must have been returned by an earlier call to
:meth:`new_particle`""")
function.addParameter(
'mass', dtype='float64', direction=function.IN,
description="The new mass of the particle")
function.addParameter(
'x', dtype='float64', direction=function.IN,
description="The new position vector of the particle")
function.addParameter(
'y', dtype='float64', direction=function.IN,
description="The new position vector of the particle")
function.addParameter(
'z', dtype='float64', direction=function.IN,
description="The new position vector of the particle")
function.addParameter(
'vx', dtype='float64', direction=function.IN,
description="The new velocity vector of the particle")
function.addParameter(
'vy', dtype='float64', direction=function.IN,
description="The new velocity vector of the particle")
function.addParameter(
'vz', dtype='float64', direction=function.IN,
description="The new velocity vector of the particle")
function.addParameter(
'radius', dtype='float64', direction=function.IN,
description="The accretion radius of the particle")
function.addParameter(
'h_smooth', dtype='float64', direction=function.IN,
description="The smoothing length of the particle")
function.result_type = 'int32'
function.result_doc = """
0 - OK
particle was found in the model and the information was set
-1 - ERROR
particle could not be found
-2 - ERROR
code does not support updating of a particle
-3 - ERROR
not yet implemented
"""
return function
@legacy_function
def set_internal_energy():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter(
'index_of_the_particle', dtype='int32', direction=function.IN,
description='',
)
function.addParameter(
'u', dtype='float64', direction=function.IN,
description='',
)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def set_h2ratio():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter(
'index_of_the_particle', dtype='int32', direction=function.IN,
description='',
)
function.addParameter(
'h2ratio', dtype='float64', direction=function.IN,
description='',
)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def set_hi_abundance():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter(
'index_of_the_particle', dtype='int32', direction=function.IN,
description='',
)
function.addParameter(
'hi_abundance', dtype='float64', direction=function.IN,
description='',
)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def set_proton_abundance():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter(
'index_of_the_particle', dtype='int32', direction=function.IN,
description='',
)
function.addParameter(
'proton_abundance', dtype='float64', direction=function.IN,
description='',
)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def set_electron_abundance():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter(
'index_of_the_particle', dtype='int32', direction=function.IN,
description='',
)
function.addParameter(
'electron_abundance', dtype='float64', direction=function.IN,
description='',
)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def set_co_abundance():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter(
'index_of_the_particle', dtype='int32', direction=function.IN,
description='',
)
function.addParameter(
'co_abundance', dtype='float64', direction=function.IN,
description='',
)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def set_smoothing_length():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter(
'index_of_the_particle', dtype='int32', direction=function.IN,
description='',
)
function.addParameter(
'h_smooth', dtype='float64', direction=function.IN,
description=''
)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def get_density():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter(
'index_of_the_particle', dtype='int32', direction=function.IN,
description=''
)
function.addParameter(
'density', dtype='float64', direction=function.OUT,
description="The current density of the particle"
)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def get_pressure():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter(
'index_of_the_particle', dtype='int32', direction=function.IN,
description='',
)
function.addParameter(
'pressure', dtype='float64', direction=function.OUT,
description=''
)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def get_h2ratio():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter(
'index_of_the_particle', dtype='int32', direction=function.IN,
description='',
)
function.addParameter(
'h2ratio', dtype='float64', direction=function.OUT,
description=''
)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def get_hi_abundance():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter(
'index_of_the_particle', dtype='int32', direction=function.IN,
description='',
)
function.addParameter(
'hi_abundance', dtype='float64', direction=function.OUT,
description=''
)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def get_proton_abundance():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter(
'index_of_the_particle', dtype='int32', direction=function.IN,
description='',
)
function.addParameter(
'proton_abundance', dtype='float64', direction=function.OUT,
description=''
)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def get_electron_abundance():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter(
'index_of_the_particle', dtype='int32', direction=function.IN,
description='',
)
function.addParameter(
'electron_abundance', dtype='float64', direction=function.OUT,
description=''
)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def get_co_abundance():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter(
'index_of_the_particle', dtype='int32', direction=function.IN,
description='',
)
function.addParameter(
'co_abundance', dtype='float64', direction=function.OUT,
description=''
)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def get_internal_energy():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter(
'index_of_the_particle', dtype='int32', direction=function.IN,
description=''
)
function.addParameter(
'u', dtype='float64', direction=function.OUT,
description="The current internal energy of the particle",
)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def get_smoothing_length():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter(
'index_of_the_particle', dtype='int32', direction=function.IN,
description=''
)
function.addParameter(
'h_smooth', dtype='float64', direction=function.OUT,
description="The current smoothing length of the particle"
)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def get_time_step():
function = LegacyFunctionSpecification()
function.addParameter(
'time_step', dtype='float64', direction=function.OUT,
unit=generic_unit_system.time,
)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def set_time_step():
function = LegacyFunctionSpecification()
function.addParameter(
'time_step', dtype='float64', direction=function.IN,
unit=generic_unit_system.time,
)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def get_c_courant():
function = LegacyFunctionSpecification()
function.addParameter(
'c_courant', dtype='float64', direction=function.OUT,
)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def set_c_courant():
function = LegacyFunctionSpecification()
function.addParameter(
'c_courant', dtype='float64', direction=function.IN,
)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def get_c_force():
function = LegacyFunctionSpecification()
function.addParameter(
'C_force', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def set_c_force():
function = LegacyFunctionSpecification()
function.addParameter(
'C_force', dtype='float64', direction=function.IN)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def get_c_cool():
function = LegacyFunctionSpecification()
function.addParameter(
'C_cool', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def set_c_cool():
function = LegacyFunctionSpecification()
function.addParameter(
'C_cool', dtype='float64', direction=function.IN)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def get_tolv():
function = LegacyFunctionSpecification()
function.addParameter(
'tolv', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def set_tolv():
function = LegacyFunctionSpecification()
function.addParameter(
'tolv', dtype='float64', direction=function.IN)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def get_hfact():
function = LegacyFunctionSpecification()
function.addParameter(
'hfact', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def set_hfact():
function = LegacyFunctionSpecification()
function.addParameter(
'hfact', dtype='float64', direction=function.IN)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def get_tolh():
function = LegacyFunctionSpecification()
function.addParameter(
'tolh', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def set_tolh():
function = LegacyFunctionSpecification()
function.addParameter(
'tolh', dtype='float64', direction=function.IN)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def get_tree_accuracy():
function = LegacyFunctionSpecification()
function.addParameter(
'tree_accuracy', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def set_tree_accuracy():
function = LegacyFunctionSpecification()
function.addParameter(
'tree_accuracy', dtype='float64', direction=function.IN)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def get_alpha():
function = LegacyFunctionSpecification()
function.addParameter(
'alpha', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def set_alpha():
function = LegacyFunctionSpecification()
function.addParameter(
'alpha', dtype='float64', direction=function.IN)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def get_alphamax():
function = LegacyFunctionSpecification()
function.addParameter(
'alphamax', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def set_alphamax():
function = LegacyFunctionSpecification()
function.addParameter(
'alphamax', dtype='float64', direction=function.IN)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def get_beta():
function = LegacyFunctionSpecification()
function.addParameter(
'beta', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def set_beta():
function = LegacyFunctionSpecification()
function.addParameter(
'beta', dtype='float64', direction=function.IN)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def get_avdecayconst():
function = LegacyFunctionSpecification()
function.addParameter(
'avdecayconst', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def set_avdecayconst():
function = LegacyFunctionSpecification()
function.addParameter(
'avdecayconst', dtype='float64', direction=function.IN)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def get_idamp():
function = LegacyFunctionSpecification()
function.addParameter(
'idamp', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def set_idamp():
function = LegacyFunctionSpecification()
function.addParameter(
'idamp', dtype='int32', direction=function.IN)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def get_ieos():
function = LegacyFunctionSpecification()
function.addParameter(
'ieos', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def set_ieos():
function = LegacyFunctionSpecification()
function.addParameter(
'ieos', dtype='int32', direction=function.IN)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def get_icooling():
function = LegacyFunctionSpecification()
function.addParameter(
'icooling', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def set_icooling():
function = LegacyFunctionSpecification()
function.addParameter(
'icooling', dtype='int32', direction=function.IN)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def get_polyk():
function = LegacyFunctionSpecification()
function.addParameter(
'polyk', dtype='float64', direction=function.OUT,
unit=(generic_unit_system.speed**2)
)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def set_polyk():
function = LegacyFunctionSpecification()
function.addParameter(
'polyk', dtype='float64', direction=function.IN,
unit=(generic_unit_system.speed**2)
)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def get_gamma():
function = LegacyFunctionSpecification()
function.addParameter(
'gamma', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def set_gamma():
function = LegacyFunctionSpecification()
function.addParameter(
'gamma', dtype='float64', direction=function.IN)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def get_mu():
function = LegacyFunctionSpecification()
function.addParameter(
'mu', dtype='float64', direction=function.OUT,
unit=units.amu
)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def set_mu():
function = LegacyFunctionSpecification()
function.addParameter(
'mu', dtype='float64', direction=function.IN,
unit=units.amu
)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def get_rhofinal():
function = LegacyFunctionSpecification()
function.addParameter(
'rhofinal', dtype='float64', direction=function.OUT,
unit=(generic_unit_system.density),
)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def set_rhofinal():
function = LegacyFunctionSpecification()
function.addParameter(
'rhofinal', dtype='float64', direction=function.IN,
unit=(generic_unit_system.density),
)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def get_rho_crit():
function = LegacyFunctionSpecification()
function.addParameter(
'rho_crit', dtype='float64', direction=function.OUT,
unit=(generic_unit_system.density),
)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def set_rho_crit():
function = LegacyFunctionSpecification()
function.addParameter(
'rho_crit', dtype='float64', direction=function.IN,
unit=(generic_unit_system.density),
)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def get_r_crit():
function = LegacyFunctionSpecification()
function.addParameter(
'r_crit', dtype='float64', direction=function.OUT,
unit=generic_unit_system.length,
)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def set_r_crit():
function = LegacyFunctionSpecification()
function.addParameter(
'r_crit', dtype='float64', direction=function.IN,
unit=generic_unit_system.length,
)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def get_h_acc():
function = LegacyFunctionSpecification()
function.addParameter(
'h_acc', dtype='float64', direction=function.OUT,
unit=generic_unit_system.length,
)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def set_h_acc():
function = LegacyFunctionSpecification()
function.addParameter(
'h_acc', dtype='float64', direction=function.IN,
unit=generic_unit_system.length,
)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def get_h_soft_sinkgas():
function = LegacyFunctionSpecification()
function.addParameter(
'h_soft_sinkgas', dtype='float64', direction=function.OUT,
unit=generic_unit_system.length,
)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def set_h_soft_sinkgas():
function = LegacyFunctionSpecification()
function.addParameter(
'h_soft_sinkgas', dtype='float64', direction=function.IN,
unit=generic_unit_system.length,
)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def get_h_soft_sinksink():
function = LegacyFunctionSpecification()
function.addParameter(
'h_soft_sinksink', dtype='float64', direction=function.OUT,
unit=generic_unit_system.length,
)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def set_h_soft_sinksink():
function = LegacyFunctionSpecification()
function.addParameter(
'h_soft_sinksink', dtype='float64', direction=function.IN,
unit=generic_unit_system.length,
)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def get_f_acc():
function = LegacyFunctionSpecification()
function.addParameter(
'f_acc', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def set_f_acc():
function = LegacyFunctionSpecification()
function.addParameter(
'f_acc', dtype='float64', direction=function.IN)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def get_iexternalforce():
function = LegacyFunctionSpecification()
function.addParameter(
'iexternalforce', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def set_iexternalforce():
function = LegacyFunctionSpecification()
function.addParameter(
'iexternalforce', dtype='int32', direction=function.IN)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def get_irealvisc():
function = LegacyFunctionSpecification()
function.addParameter(
'irealvisc', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def set_irealvisc():
function = LegacyFunctionSpecification()
function.addParameter(
'irealvisc', dtype='int32', direction=function.IN)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def get_shearparam():
function = LegacyFunctionSpecification()
function.addParameter(
'shearparam', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def set_shearparam():
function = LegacyFunctionSpecification()
function.addParameter(
'shearparam', dtype='float64', direction=function.IN)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def get_bulkvisc():
function = LegacyFunctionSpecification()
function.addParameter(
'bulkvisc', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def set_bulkvisc():
function = LegacyFunctionSpecification()
function.addParameter(
'bulkvisc', dtype='float64', direction=function.IN)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def get_unit_length():
function = LegacyFunctionSpecification()
function.addParameter(
'unit_length', dtype='float64', direction=function.OUT,
unit=units.cm # generic_unit_system.length
)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def set_unit_length():
function = LegacyFunctionSpecification()
function.addParameter(
'unit_length', dtype='float64', direction=function.IN,
unit=units.cm # generic_unit_system.length
)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def get_unit_mass():
function = LegacyFunctionSpecification()
function.addParameter(
'unit_mass', dtype='float64', direction=function.OUT,
unit=units.g # generic_unit_system.mass
)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def set_unit_mass():
function = LegacyFunctionSpecification()
function.addParameter(
'unit_mass', dtype='float64', direction=function.IN,
unit=units.g # generic_unit_system.mass
)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def get_unit_time():
function = LegacyFunctionSpecification()
function.addParameter(
'unit_time', dtype='float64', direction=function.OUT,
unit=units.s # generic_unit_system.time
)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def set_unit_time():
function = LegacyFunctionSpecification()
function.addParameter(
'unit_time', dtype='float64', direction=function.IN,
unit=units.s # generic_unit_system.time
)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
class Phantom(GravitationalDynamics, GravityFieldCode):
__interface__ = PhantomInterface
def __init__(
self,
convert_nbody=None,
**options):
# if convert_nbody is None:
# NOTE we use a fixed converter here internally!
# Not doing this *really* complicates things as we'd need to change the
# internal units used in Phantom as well.
phantom_solarm = 1.9891e33 | units.g
phantom_pc = 3.086e18 | units.cm
phantom_gg = 6.672041e-8 | units.cm**3 * units.g**-1 * units.s**-2
# phantom_mass = 1.0 | units.MSun
# phantom_mass = 1.98892e33 | units.g
# phantom_time = 60 * 60 * 24 * 365.25 * 1e6 | units.s
# phantom_length = (phantom_time**2 * phantom_gg * phantom_mass)**(1/3)
phantom_mass = 1.0 * phantom_solarm
phantom_length = 0.1 * phantom_pc
phantom_time = (phantom_length**3 / (phantom_gg*phantom_mass))**0.5
unit_converter = ConvertBetweenGenericAndSiUnits(
# Phantom uses CGS units internally, scaled with G=1
# So we need to make sure we use those same units here...
# Also, Phantom's value for G is not the same as AMUSE's...
phantom_length,
phantom_mass, # 1.0 MSun
phantom_time, # 1 Julian Myr
)
convert_nbody = unit_converter
self.stopping_conditions = StoppingConditions(self)
GravitationalDynamics.__init__(
self,
PhantomInterface(**options),
convert_nbody,
**options
)
def initialize_code(self):
result = self.overridden().initialize_code()
if self.unit_converter is not None:
mass = self.unit_converter.to_si(generic_unit_system.mass)
time = self.unit_converter.to_si(generic_unit_system.time)
# self.parameters._original.unit_mass = mass
# self.parameters._original.unit_time = time
# self.set_unit_mass(self.unit_converter.to_si(generic_unit_system.mass).value_in(units.g))
# self.set_unit_length(self.unit_converter.to_si(generic_unit_system.length).value_in(units.cm))
# self.set_unit_time(self.unit_converter.to_si(generic_unit_system.time).value_in(units.s))
return result
def define_state(self, handler):
GravitationalDynamics.define_state(self, handler)
GravityFieldCode.define_state(self, handler)
# self.stopping_conditions.define_state(handler)
handler.add_transition('END', 'INITIALIZED', 'initialize_code', False)
handler.add_method('END', 'initialize_code')
handler.add_transition('RUN', 'UPDATE', 'new_sph_particle', False)
handler.add_method('EDIT', 'new_sph_particle')
handler.add_method('UPDATE', 'new_sph_particle')
handler.add_transition('RUN', 'UPDATE', 'new_dm_particle', False)
handler.add_method('EDIT', 'new_dm_particle')
handler.add_method('UPDATE', 'new_dm_particle')
handler.add_transition('RUN', 'UPDATE', 'new_sink_particle', False)
handler.add_method('EDIT', 'new_sink_particle')
handler.add_method('UPDATE', 'new_sink_particle')
self.stopping_conditions.define_state(handler)
def define_parameters(self, handler):
handler.add_method_parameter(
"get_time_step",
"set_time_step",
"time_step",
"Maximum internal time step",
default_value=0.01 | generic_unit_system.time
)
handler.add_method_parameter(
"get_c_courant",
"set_c_courant",
"c_courant",
"Courant number",
default_value=0.3
)
handler.add_method_parameter(
"get_c_force",
"set_c_force",
"c_force",
"dt_force number",
default_value=0.25
)
handler.add_method_parameter(
"get_c_cool",
"set_c_cool",
"c_cool",
"dt_cool number",
default_value=0.25
)
handler.add_method_parameter(
"get_tolv",
"set_tolv",
"tolv",
"tolerance on v iterations in timestepping",
default_value=1.0e-2
)
handler.add_method_parameter(
"get_hfact",
"set_hfact",
"hfact",
"h in units of particle spacing [h = hfact(m/rho)^(1/3)]",
default_value=1.2
)
handler.add_method_parameter(
"get_tolh",
"set_tolh",
"tolh",
"tolerance on h-rho iterations",
default_value=1.0e-4
)
handler.add_method_parameter(
"get_tree_accuracy",
"set_tree_accuracy",
"tree_accuracy",
"tree opening criterion (0.0-1.0)",
default_value=0.5
)
handler.add_method_parameter(
"get_alpha",
"set_alpha",
"alpha",
"MINIMUM art. viscosity parameter",
default_value=0.
)
handler.add_method_parameter(
"get_alphamax",
"set_alphamax",
"alphamax",
"MAXIMUM art. viscosity parameter",
default_value=1.0
)
handler.add_method_parameter(
"get_beta",
"set_beta",
"beta",
"beta viscosity",
default_value=2.0
)
handler.add_method_parameter(
"get_avdecayconst",
"set_avdecayconst",
"avdecayconst",
"decay time constant for viscosity switches",
default_value=0.1
)
handler.add_method_parameter(
"get_idamp",
"set_idamp",
"idamp",
"artificial damping of velocities (0=off, 1=constant, 2=star)",
default_value=0
)
handler.add_method_parameter(
"get_polyk",
"set_polyk",
"polyk",
"polyk value",
default_value=(0. | units.km**2 * units.s**-2)
)
handler.add_method_parameter(
"get_ieos",
"set_ieos",
"ieos",
"eqn of state (1=isoth;2=adiab;3=locally iso;8=barotropic)",
default_value=1
)
handler.add_method_parameter(
"get_icooling",
"set_icooling",
"icooling",
"Cooling (0=off 1=default 2/3 = other)",
default_value=0
)
handler.add_method_parameter(
"get_gamma",
"set_gamma",
"gamma",
"gamma value ",
default_value=1
)
handler.add_method_parameter(
"get_mu",
"set_mu",
"mu",
"mean molecular weight",
default_value=(2.381 | units.amu)
)
handler.add_method_parameter(
"get_rhofinal",
"set_rhofinal",
"rhofinal",
"maximum allowed density (<=0 to ignore)",
default_value=(0 | generic_unit_system.density)
)
handler.add_method_parameter(
"get_rho_crit",
"set_rho_crit",
"rho_crit",
"density above which sink particles are created",
default_value=(1e-10 | units.g * units.cm**-3)
)
handler.add_method_parameter(
"get_r_crit",
"set_r_crit",
"r_crit",
"critical radius for point mass creation"
" (no new sinks < r_crit from existing sink)",
default_value=(0.005 | generic_unit_system.length)
)
handler.add_method_parameter(
"get_h_acc",
"set_h_acc",
"h_acc",
"accretion radius for new sink particles",
default_value=(0.001 | generic_unit_system.length)
)
handler.add_method_parameter(
"get_h_soft_sinkgas",
"set_h_soft_sinkgas",
"h_soft_sinkgas",
"softening length for new sink particles",
default_value=(0. | generic_unit_system.length)
)
handler.add_method_parameter(
"get_h_soft_sinksink",
"set_h_soft_sinksink",
"h_soft_sinksink",
"softening length between sink particles",
default_value=(0. | generic_unit_system.length)
)
handler.add_method_parameter(
"get_f_acc",
"set_f_acc",
"f_acc",
"particles < f_acc*h_acc accreted without checks",
default_value=0.8
)
handler.add_method_parameter(
"get_iexternalforce",
"set_iexternalforce",
"iexternalforce",
"1=star,2=coro,3=bina,4=prdr,5=toru,6=toys,7=exte,"
"8=spir,9=Lens,10=neut,11=Eins",
default_value=0
)
handler.add_method_parameter(
"get_irealvisc",
"set_irealvisc",
"irealvisc",
"physical viscosity type (0=none,1=const,2=Shakura/Sunyaev)",
default_value=0
)
handler.add_method_parameter(
"get_shearparam",
"set_shearparam",
"shearparam",
"magnitude of shear viscosity (irealvisc=1) or alpha_SS"
" (irealvisc=2)",
default_value=0.1
)
handler.add_method_parameter(
"get_bulkvisc",
"set_bulkvisc",
"bulkvisc",
"magnitude of bulk viscosity",
default_value=0.0
)
handler.add_method_parameter(
"get_unit_length",
"set_unit_length",
"code_unit_length",
"code unit length",
default_value=(
((60 * 60 * 24 * 365.25 * 1e6) | units.s)**2
* (6.672041e-8 | units.cm**3 * units.g**-1 * units.s**-2)
* (1.98892e33 | units.g)
)**(1/3)
)
handler.add_method_parameter(
"get_unit_mass",
"set_unit_mass",
"code_unit_mass",
"code unit mass",
default_value=1.98892e33 | units.g
)
handler.add_method_parameter(
"get_unit_time",
"set_unit_time",
"code_unit_time",
"code unit time",
default_value=3.15576e13 | units.s
)
self.stopping_conditions.define_parameters(handler)
def define_particle_sets(self, handler):
handler.define_super_set(
'particles',
['dm_particles', 'gas_particles', 'sink_particles'],
index_to_default_set=0,
)
handler.define_set('dm_particles', 'index_of_the_particle')
handler.set_new('dm_particles', 'new_dm_particle')
handler.set_delete('dm_particles', 'delete_particle')
handler.add_getter('dm_particles', 'get_state_dm')
handler.add_setter('dm_particles', 'set_state_dm')
handler.add_getter('dm_particles', 'get_mass')
handler.add_setter('dm_particles', 'set_mass')
handler.add_getter('dm_particles', 'get_position')
handler.add_setter('dm_particles', 'set_position')
handler.add_getter('dm_particles', 'get_velocity')
handler.add_setter('dm_particles', 'set_velocity')
handler.add_getter('dm_particles', 'get_acceleration')
handler.define_set('gas_particles', 'index_of_the_particle')
handler.set_new('gas_particles', 'new_sph_particle')
handler.set_delete('gas_particles', 'delete_particle')
handler.add_getter('gas_particles', 'get_state_sph')
handler.add_setter('gas_particles', 'set_state_sph')
handler.add_getter('gas_particles', 'get_mass')
handler.add_setter('gas_particles', 'set_mass')
handler.add_getter('gas_particles', 'get_position')
handler.add_setter('gas_particles', 'set_position')
handler.add_getter('gas_particles', 'get_velocity')
handler.add_setter('gas_particles', 'set_velocity')
handler.add_getter('gas_particles', 'get_acceleration')
handler.add_getter('gas_particles', 'get_internal_energy')
handler.add_setter('gas_particles', 'set_internal_energy')
handler.add_getter('gas_particles', 'get_smoothing_length')
handler.add_setter('gas_particles', 'set_smoothing_length')
handler.add_getter('gas_particles', 'get_density', names=('rho',))
handler.add_getter('gas_particles', 'get_density', names=('density',))
handler.add_getter('gas_particles', 'get_pressure')
handler.add_getter('gas_particles', 'get_h2ratio')
handler.add_getter('gas_particles', 'get_hi_abundance')
handler.add_getter('gas_particles', 'get_proton_abundance')
handler.add_getter('gas_particles', 'get_electron_abundance')
handler.add_getter('gas_particles', 'get_co_abundance')
handler.add_setter('gas_particles', 'set_h2ratio')
handler.add_setter('gas_particles', 'set_hi_abundance')
handler.add_setter('gas_particles', 'set_proton_abundance')
handler.add_setter('gas_particles', 'set_electron_abundance')
handler.add_setter('gas_particles', 'set_co_abundance')
handler.define_set('sink_particles', 'index_of_the_particle')
handler.set_new('sink_particles', 'new_sink_particle')
handler.set_delete('sink_particles', 'delete_particle')
handler.add_getter('sink_particles', 'get_state_sink')
handler.add_setter('sink_particles', 'set_state_sink')
handler.add_getter('sink_particles', 'get_mass')
handler.add_setter('sink_particles', 'set_mass')
handler.add_getter('sink_particles', 'get_position')
handler.add_setter('sink_particles', 'set_position')
handler.add_getter('sink_particles', 'get_velocity')
handler.add_setter('sink_particles', 'set_velocity')
handler.add_getter('sink_particles', 'get_acceleration')
handler.add_getter('sink_particles', 'get_radius')
handler.add_setter('sink_particles', 'set_radius')
handler.add_getter('sink_particles', 'get_smoothing_length')
handler.add_setter('sink_particles', 'set_smoothing_length')
self.stopping_conditions.define_particle_set(handler, 'particles')
def define_methods(self, handler):
GravitationalDynamics.define_methods(self, handler)
handler.add_method(
"new_dm_particle",
(
generic_unit_system.mass,
generic_unit_system.length,
generic_unit_system.length,
generic_unit_system.length,
generic_unit_system.speed,
generic_unit_system.speed,
generic_unit_system.speed,
),
(
handler.INDEX,
handler.ERROR_CODE,
)
)
handler.add_method(
"new_sph_particle",
(
generic_unit_system.mass,
generic_unit_system.length,
generic_unit_system.length,
generic_unit_system.length,
generic_unit_system.speed,
generic_unit_system.speed,
generic_unit_system.speed,
generic_unit_system.specific_energy,
generic_unit_system.length,
),
(
handler.INDEX,
handler.ERROR_CODE,
)
)
handler.add_method(
"new_sink_particle",
(
generic_unit_system.mass,
generic_unit_system.length,
generic_unit_system.length,
generic_unit_system.length,
generic_unit_system.speed,
generic_unit_system.speed,
generic_unit_system.speed,
generic_unit_system.length,
generic_unit_system.length,
),
(
handler.INDEX,
handler.ERROR_CODE,
)
)
handler.add_method(
"get_state_dm",
(
handler.INDEX,
),
(
generic_unit_system.mass,
generic_unit_system.length,
generic_unit_system.length,
generic_unit_system.length,
generic_unit_system.speed,
generic_unit_system.speed,
generic_unit_system.speed,
handler.ERROR_CODE,
)
)
handler.add_method(
"set_state_dm",
(
handler.INDEX,
generic_unit_system.mass,
generic_unit_system.length,
generic_unit_system.length,
generic_unit_system.length,
generic_unit_system.speed,
generic_unit_system.speed,
generic_unit_system.speed,
# generic_unit_system.length,
),
(
handler.ERROR_CODE,
)
)
handler.add_method(
"get_state_sph",
(
handler.INDEX,
),
(
generic_unit_system.mass,
generic_unit_system.length,
generic_unit_system.length,
generic_unit_system.length,
generic_unit_system.speed,
generic_unit_system.speed,
generic_unit_system.speed,
generic_unit_system.specific_energy,
generic_unit_system.length,
handler.ERROR_CODE,
)
)
handler.add_method(
"set_state_sph",
(
handler.INDEX,
generic_unit_system.mass,
generic_unit_system.length,
generic_unit_system.length,
generic_unit_system.length,
generic_unit_system.speed,
generic_unit_system.speed,
generic_unit_system.speed,
generic_unit_system.specific_energy,
generic_unit_system.length,
),
(
handler.ERROR_CODE,
)
)
handler.add_method(
"get_state_sink",
(
handler.INDEX,
),
(
generic_unit_system.mass,
generic_unit_system.length,
generic_unit_system.length,
generic_unit_system.length,
generic_unit_system.speed,
generic_unit_system.speed,
generic_unit_system.speed,
generic_unit_system.length,
generic_unit_system.length,
handler.ERROR_CODE,
)
)
handler.add_method(
"set_state_sink",
(
handler.INDEX,
generic_unit_system.mass,
generic_unit_system.length,
generic_unit_system.length,
generic_unit_system.length,
generic_unit_system.speed,
generic_unit_system.speed,
generic_unit_system.speed,
generic_unit_system.length,
generic_unit_system.length,
),
(
handler.ERROR_CODE,
)
)
handler.add_method(
"get_density",
(
handler.INDEX,
),
(
generic_unit_system.density,
handler.ERROR_CODE,
)
)
handler.add_method(
"get_smoothing_length",
(
handler.INDEX,
),
(
generic_unit_system.length,
handler.ERROR_CODE,
)
)
handler.add_method(
"set_smoothing_length",
(
handler.INDEX,
generic_unit_system.length,
),
(
handler.ERROR_CODE,
)
)
handler.add_method(
"get_pressure",
(
handler.INDEX,
),
(
generic_unit_system.pressure,
handler.ERROR_CODE,
)
)
handler.add_method(
"get_h2ratio",
(
handler.INDEX,
),
(
handler.NO_UNIT,
handler.ERROR_CODE,
)
)
handler.add_method(
"get_hi_abundance",
(
handler.INDEX,
),
(
handler.NO_UNIT,
handler.ERROR_CODE,
)
)
handler.add_method(
"get_proton_abundance",
(
handler.INDEX,
),
(
handler.NO_UNIT,
handler.ERROR_CODE,
)
)
handler.add_method(
"get_electron_abundance",
(
handler.INDEX,
),
(
handler.NO_UNIT,
handler.ERROR_CODE,
)
)
handler.add_method(
"get_co_abundance",
(
handler.INDEX,
),
(
handler.NO_UNIT,
handler.ERROR_CODE,
)
)
handler.add_method(
"get_internal_energy",
(
handler.INDEX,
),
(
generic_unit_system.specific_energy,
handler.ERROR_CODE,
)
)
handler.add_method(
"set_internal_energy",
(
handler.INDEX,
generic_unit_system.specific_energy,
),
(
handler.ERROR_CODE,
)
)
handler.add_method(
"set_h2ratio",
(
handler.INDEX,
handler.NO_UNIT,
),
(
handler.ERROR_CODE,
)
)
handler.add_method(
"set_hi_abundance",
(
handler.INDEX,
handler.NO_UNIT,
),
(
handler.ERROR_CODE,
)
)
handler.add_method(
"set_proton_abundance",
(
handler.INDEX,
handler.NO_UNIT,
),
(
handler.ERROR_CODE,
)
)
handler.add_method(
"set_electron_abundance",
(
handler.INDEX,
handler.NO_UNIT,
),
(
handler.ERROR_CODE,
)
)
handler.add_method(
"set_co_abundance",
(
handler.INDEX,
handler.NO_UNIT,
),
(
handler.ERROR_CODE,
)
)
handler.add_method(
"get_time_step",
(
),
(
generic_unit_system.time,
handler.ERROR_CODE,
)
)
handler.add_method(
"set_time_step",
(
generic_unit_system.time,
),
(
handler.ERROR_CODE,
)
)
self.stopping_conditions.define_methods(handler)
|
import turtle
myTurtle = turtle.Turtle()
myTurtle.shape('turtle')
#mySecondTurtle = turtle.Turtle()
myList = [1,2,3,5,8]
print(myList)
print(myList[0])
print(myList[4])
def printMyList():
print('In the function')
for i in range(0, len(myList)):
print(myList[i], end=' ')
# calling the function
printMyList()
def addMyList():
sumOffList = 0
print('\nSumming up my list off numbers')
for i in range(0, len(myList)):
sumOffList = sumOffList + myList[i]
print('The sum off myList is ', sumOffList)
addMyList()
print(sum(myList[:]))
def usingTurtleWithLists():
for i in range(0, len(myList)):
myTurtle.forward(myList[i]*10)
myTurtle.right(90)
print(sum(myList[:]))
usingTurtleWithLists() ## ??
turtle.done() |
from flask import Flask, jsonify, request
import json
import requests
from requests.auth import HTTPBasicAuth
import tango_credentials_prod as tango_credentials
import logging
import socket
from logging.handlers import SysLogHandler
# Set up logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
syslog = SysLogHandler(address=('logging handler address here', XXXXX))
formatter = logging.Formatter('%(asctime)s TANGOCLIENT: %(message)s', datefmt='%b %d %H:%M:%S')
syslog.setFormatter(formatter)
logger.addHandler(syslog)
logger.addHandler(logging.StreamHandler())
for cred in ['platformid', 'tangoapikey', 'customer', 'account_identifier', 'client_api_key']:
assert getattr(tango_credentials, cred)
# Workhorse functions
vendor_sku_map = {
# Supported Vendors at #1
'amazon': 'AMZN-E-V-STD',
'target': 'TRGT-E-V-STD',
# Supported Vendors at $5
'starbucks': 'SBUX-E-V-STD',
'walmart': 'WAL-E-V-STD',
'itunes': 'APPL-E-{AMT}-STD',
'depot': 'HMDP1-E-V-STD',
'bestbuy': 'BSTB1-E-V-STD',
'chipotle': 'CHIP-E-{AMT}-STD',
'cvs': 'CVSP-E-V-STD',
'dominos': 'DOMINOS1-E-V-STD',
'ihop': 'IHOP1-E-V-STD',
'tgif': 'TGIFRIDAYS1-E-V-STD',
# Charities
'habitat': 'HABT-D-V-STD',
'parks': 'NTPF-D-V-STD',
'water': 'CNWR-D-V-STD',
# Tango card
'tangocard': 'TNGO-E-V-STD'
}
campaigns = {
'Campaign1': {
'account_identifier': 'Campaign1',
'reward_from': 'Stanford-Berkeley Opinion Survey',
'reward_subject': 'Your Gift Card from Stanford and UC Berkeley is here.',
'reward_message': 'Thank you for completing this wave of the Stanford-Berkeley Opinion Study!',
'campaign': 'Campaign1'
},
'Campaign2': {
'account_identifier': 'Campaign2',
'reward_from': 'Stanford-Berkeley Opinion Survey',
'reward_subject': 'Your Gift Card from Stanford and UC Berkeley is here.',
'reward_message': 'Thank you for completing this wave of the Stanford-Berkeley Opinion Survey!',
'campaign': 'Campaign2'
}
}
def Pay(vendor, payout_amount_in_cents, respondent_name, respondent_email, campaign):
assert campaign in campaigns.keys(), '%s is not a supported campaign.' % campaign
assert vendor in vendor_sku_map.keys(), '%s is not a supported vendor.' % vendor
assert payout_amount_in_cents > 0, 'Payout amount zero or less.'
assert payout_amount_in_cents <= 2000, 'Payout amount over $20.'
if vendor not in ['amazon', 'target', 'habitat', 'parks', 'water', 'tangocard']:
assert payout_amount_in_cents >= 500, '%s does not allow payouts under $5.' % vendor
payload = {
"customer": tango_credentials.customer, # master account level thing
"recipient": {
"name": respondent_name,
"email": respondent_email
},
"sku": vendor_sku_map[vendor],
"send_reward": True
}
for k, v in campaigns[campaign].iteritems():
payload[k] = v
if qturl:
payload['reward_message'] = payload['reward_message'].replace('{qturl}', qturl)
if post_incentive:
payload['reward_message'] = payload['reward_message'].replace('{post_incentive}', post_incentive)
if vendor in ['itunes', 'chipotle']:
payload['sku'] = payload['sku'].replace('{AMT}', str(payout_amount_in_cents))
else:
payload['amount'] = payout_amount_in_cents
if vendor in ['habitat', 'parks', 'water']:
payload['reward_subject'] = 'Thank you for donating.'
logging.info('Attempting to make payout request: %s' % str(payload))
r = requests.post(tango_credentials.url_base + 'orders',
auth = HTTPBasicAuth(tango_credentials.platformid,
tango_credentials.tangoapikey),
data = json.dumps(payload))
logging.info('Tango API response: %s' % r.status_code)
logging.info(r.text)
return r.json()['success']
app = Flask(__name__)
@app.route('/pay_respondent', methods=['GET'])
def pay_respondent():
success = False
try:
logging.info('Received request with info: ' + str(request.args))
respondent_name = request.args.get('respondent_name')
respondent_email = request.args.get('respondent_email')
vendor = request.args.get('vendor')
amount_in_dollars = request.args.get('amount_in_dollars')
provided_key = request.args.get('key')
campaign = request.args.get('campaign')
if provided_key == tango_credentials.client_api_key:
success = Pay(vendor, int(amount_in_dollars) * 100, respondent_name, respondent_email, campaign)
else:
logging.info('API key provided was %s not %s.' % (provided_key, tango_credentials.client_api_key))
except Exception as e:
logging.info('Failed because ' + str(e))
finally:
return json.dumps({'success': str(success)})
if __name__ == '__main__':
port = int(os.environ.get("PORT", 80))
app.run(host='0.0.0.0', port=port, debug=True, threaded=True)
|
test = int(input())
while test > 0 :
str1 = input()
str1 = list(str1)
l = []
l.append(str1[0])
for i in range(1,len(str1)) :
if str1[i] != l[-1] :
l.append(str1[i])
print(''.join(map(str,l)))
test -= 1
|
from odoo import models, fields, api, _
class reschedule_booking(models.TransientModel):
_inherit = 'reschedule.booking'
@api.multi
def reschedule_booking(self):
res = super(reschedule_booking, self).reschedule_booking()
self.env[self._context.get('active_model')].sudo().browse(self._context.get('active_id')).send_booking_reschedule_notification()
return res |
import logging
class OneLineFormatter(logging.Formatter):
def format(self, record) -> str:
result = super(OneLineFormatter, self).format(record)
result = result.replace('\n', '|')
return result
|
from django.contrib import admin
from .models import Debt, Payment
# Register your models here.
admin.site.register(Debt)
admin.site.register(Payment)
|
# coding:utf-8
from datetime import date, timedelta, datetime
from django.conf import settings
from django.core.paginator import Paginator, EmptyPage
from django.core.urlresolvers import reverse
from django.template.defaultfilters import slugify as djslugify
from math import ceil
from random import choice
from string import lowercase, digits
from time import time
from unidecode import unidecode
import exceptions as exc
import os
import ujson
from PIL import Image as pyImage
def make_img_url_path(path, img_type):
if not path:
return ""
return os.path.join(settings.MEDIA_URL, path.replace("orig", img_type))
def get_image_extension(img_obj):
img = pyImage.open(img_obj)
# yukarda geri sarma olmuyor
img_obj.seek(0)
return ".%s" % img.format.lower()
|
import torch.nn as nn
import math
import torch
import torch.utils.model_zoo as model_zoo
from torchvision import datasets, transforms, models
#__all__ = ['vgg16_bn']
model_urls = {
'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth',
'resnet18': 'https://s3.amazonaws.com/pytorch/models/resnet18-5c106cde.pth'
}
class VGG(nn.Module):
def __init__(self, features, output_size=1274, batch_size=64):
super(VGG, self).__init__()
self.features = features
def forward(self, x, length):
x = self.features(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
class Classifier(nn.Module):
def __init__(self):
super(Classifier, self).__init__()
self.linear = nn.Sequential(
nn.Linear(in_features=2048, out_features=1024),
nn.BatchNorm1d(1024),
# nn.Dropout(0.5),
nn.ReLU(True),
nn.Linear(in_features=1024, out_features=512),
nn.BatchNorm1d(512),
# nn.Dropout(0.5),
nn.ReLU(True),
nn.Linear(in_features=512, out_features=256),
nn.BatchNorm1d(256),
# nn.Dropout(0.5),
nn.ReLU(True),
nn.Linear(in_features=256, out_features=128),
nn.BatchNorm1d(128),
# nn.Dropout(0.5),
nn.ReLU(True),
)
self.output_layer = nn.Linear(in_features=128, out_features=11)
self.log_softmax = nn.LogSoftmax(dim=-1)
self.softmax = nn.Softmax(dim=1)
self._initialize_weights()
def forward(self, x, length):
assert( length.sum() == x.size(0))
# x = self.last(x)
frame_feature = []
start = 0
for l in length: # [2, 4]
# print(x[start:start+l].mean(dim=0).unsqueeze(0).view(1,-1).size())
frame_feature.append(x[start:start+l].mean(dim=0).unsqueeze(0).view(1,-1))
start += l
frame_feature = torch.cat(frame_feature, dim=0)
x = self.linear(frame_feature)
return x
# x = self.output_layer(x)
# x = self.log_softmax(x)
# return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
s = 1
first_flag=True
for v in cfg:
s=1
if (v==64 and first_flag):
s=2
first_flag=False
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, stride=s, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
def conv_bn_relu(in_channels,out_channels,kernel_size=3,stride=2,padding=1):
return nn.Sequential(
nn.Conv2d(in_channels,out_channels,kernel_size=kernel_size,padding=padding,stride=stride),
nn.BatchNorm2d(out_channels),
nn.ReLU(True)
)
cfg = {
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
}
def vgg16bn(pretrained=False, **kwargs):
"""
VGG 16-layer model (configuration "D") with batch normalization
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
yolo = VGG(make_layers(cfg['D'], batch_norm=True), **kwargs)
if pretrained:
print("True")
vgg_state_dict = model_zoo.load_url(model_urls['vgg16_bn'])
yolo_state_dict = yolo.state_dict()
for k in vgg_state_dict.keys():
if k in yolo_state_dict.keys() and k.startswith('features'):
yolo_state_dict[k] = vgg_state_dict[k]
yolo.load_state_dict(yolo_state_dict)
return yolo
class Resnet(nn.Module):
def __init__(self, features):
super(Resnet, self).__init__()
self.features = features
def forward(self, x, length):
x = self.features(x)
return x
def Resnet50(pretrained=False, **kwargs):
resnet50 = models.resnet50(pretrained=pretrained)
newmodel = torch.nn.Sequential(*(list(resnet50.children())[:-1]))
model = Resnet(newmodel, **kwargs)
return model
def test_res():
import torch
model = Resnet50(pretrained=True)
# img = torch.rand(1,3,448,448)
batch_size = 1
img = torch.rand(10, 3, 240,320)
length = [3,7]
#img = img.view(batch_size*img.size(1), img.size(2),img.size(3),img.size(4))
model.eval()
output = model(img, length)
print(output.size())
def test():
import torch
model = vgg16bn(pretrained=True, batch_size=2)
# img = torch.rand(1,3,448,448)
batch_size = 1
img = torch.rand(10, 3, 240,320)
length = [3,7]
#img = img.view(batch_size*img.size(1), img.size(2),img.size(3),img.size(4))
model.eval()
output = model(img, length)
print(output.size())
if __name__ == '__main__':
# test()
test_res()
|
# Sparki_Myro testing
from __future__ import print_function
from sparki_learning import *
com_port = None # replace with your COM port or /dev/
setDebug(DEBUG_INFO)
while not com_port:
com_port = input("What is your com port or /dev/? ")
init(com_port)
for x in timer(15):
print("x = " + str(x))
forward(1,1)
backward(1,1)
|
from django.utils.translation import ugettext_lazy as _
from django.db import models
from django.contrib.auth.models import (AbstractBaseUser, PermissionsMixin)
from django.utils import timezone
from unavis import managers
class UserModel(AbstractBaseUser, PermissionsMixin):
"""
```UserModel``` defines all the user on the
website. It can be either anonymous, the admin
or anyone else. It abstracts and stay very close
the the django.contrib.auth packages models to
work with for permissions and groups.
"""
email = models.EmailField(_('email address'),
primary_key=True, unique=True,
null=False, blank=False)
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=30, blank=True)
is_staff = models.BooleanField(_('staff status'), default=False,
help_text=_('Designates whether '
'the user can log into '
'this admin site.'))
validated_at = models.DateTimeField(_('DateTime an account is validated'),
null=True, default=None,
editable=False)
is_active = models.BooleanField(_('active'), default=True,
help_text=_('Designates whether the active'
' flag of the user. '))
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
USERNAME_FIELD = 'email'
objects = managers.UserModelManager.from_queryset(
managers.UserModelQuerySet
)()
class Meta:
swappable = 'AUTH_USER_MODEL'
abstract = False
@property
def validated(self):
return bool(self.validated_at is not None)
|
#!usr/bin/env python
#-*- coding:utf-8 -*-
"""
@author: Jeff Zhang
@date: 2017-08-30
"""
import autograd.numpy as np
from autograd.scipy.misc import logsumexp
from autograd.convenience_wrappers import value_and_grad as vgrad
from functools import partial
def EM(init_params, data, callback=None):
def EM_update(params):
natural_params = list(map(np.log, params))
loglike, E_stats = vgrad(log_partition_function)(natural_params, data) # E step
if callback: callback(loglike, params)
return list(map(normalize, E_stats)) # M step
def fixed_point(f, x0):
x1 = f(x0)
while different(x0, x1):
x0, x1 = x1, f(x1)
return x1
def different(params1, params2):
allclose = partial(np.allclose, atol=1e-3, rtol=1e-3)
return not all(map(allclose, params1, params2))
return fixed_point(EM_update, init_params)
def normalize(a):
def replace_zeros(a):
return np.where(a > 0., a, 1.)
return a / replace_zeros(a.sum(-1, keepdims=True))
def log_partition_function(natural_params, data):
if isinstance(data, list):
return sum(map(partial(log_partition_function, natural_params), data))
log_pi, log_A, log_B = natural_params
log_alpha = log_pi
for y in data:
log_alpha = logsumexp(log_alpha[:,None] + log_A, axis=0) + log_B[:,y]
return logsumexp(log_alpha) |
import numpy as np
import matplotlib.pyplot as plt
from numpy.linalg import svd
X = plt.imread("YOUR_IMAGE.jpg").astype(np.float)
X /= 256.0
X = X.mean(axis=2) # make X black and white
plt.imsave("YOUR_IMAGE_IN_BLACK_AND_WHITE.jpg", np.dstack([X]*3))
# CODE GOES HERE
|
"""
def __init__(self,
criterion="gini", 基尼系数
splitter="best",
max_depth=None, 树的深度大小
min_samples_split=2, # 减枝
min_samples_leaf=1, # 减枝
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None, 随机数种子
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
class_weight=None,
presort=False):
"""
from sklearn.tree import DecisionTreeClassifier
import pandas as pd
from sklearn.feature_extraction import DictVectorizer
from sklearn.model_selection import train_test_split
from sklearn.tree import export_graphviz
from sklearn.metrics import classification_report
from sklearn.ensemble import RandomForestClassifier
# 获取数据
data = pd.read_csv("data/train.csv")
x = data[["Pclass", "Age", "Sex"]]
y = data["Survived"]
# 缺失值处理
x["Age"].fillna(x["Age"].mean(), inplace=True)
# 分为训练 测试
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25)
# 特征工程 将sex 转换为onehot编码
dict = DictVectorizer(sparse=False)
x_train = dict.fit_transform(x_train.to_dict(orient='record'))
x_test = dict.transform(x_test.to_dict(orient='record'))
# print(dict.feature_names_)
# 使用决策树
dec = DecisionTreeClassifier()
dec.fit(x_train, y_train)
print("预测准确率", dec.score(x_test, y_test))
dt_predict = dec.predict(x_test)
# print("预测结果", dt_predict)
print(classification_report(y_test, dt_predict, target_names=["died", "survived"]))
# 数的结构显示
# 导出图像显示
# exp = export_graphviz(dec, out_file="./tree.dot", feature_names=['年龄', 'Pclass', 'Sex=female', 'Sex=male'])
# 使用随机森林
rfc = RandomForestClassifier()
rfc.fit(x_train, y_train)
rfc_y_predict = rfc.predict(x_test)
print(rfc.score(x_test, y_test))
print(classification_report(y_test, rfc_y_predict, target_names=["died", "survived"]))
|
# -*- coding: utf-8 -*-
# @Time : 2020/5/15 10:52
# @Author : 永
# @File : text.py
# @Software: PyCharm
import re
li = ['\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t', '/\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t', '\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t-\n\t\t\t\t\t\t\t\t\t\t\t\t337~375平米\n\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t']
area = re.sub(r"[\W*\\a-z]","",str(li))
print(area) |
"""
Util
calcRatio
removeDuplicateLInks
trimDomainStr -- remove all but the domain
trimLinkStr -- limit to filename length ?
isMimeTypeValid
isPageExpired
"""
import sys, json, os
import requests
import requests_cache
from datetime import datetime
from Writer import Writer
from Reader import Reader
import os
class Util:
def __init__(self):
self.read = Reader()
self.write = Writer()
def calcRatio(self):
pass
def os_filename_length(self, func):
length = 0
def memoized_func(length=None, *args):
if length is not 0:
return length
x = str(os.statvfs('/')).split('=')
y = x[len(x) - 1]
length = int(''.join(list(filter(str.isdigit, y))))
return memoized_func
def trimDomainStr(self, url):
filename_maxlength = self.os_filename_length()
url_length = len(url)
return (url[:filename_maxlength]) if url_length > filename_maxlength else url
@staticmethod
def getArg(argstr):
for arg in sys.argv:
if argstr in arg:
return arg.split("=")[1]
class Requests:
# cache saved to sqlite db for persistence
def __init__(self):
requests_cache.install_cache(cache_name='web_cache', backend='sqlite', expire_after=600)
self.write = Writer()
def fetch(self, url):
try:
r = requests.get(url, verify=False)
return r
except Exception as e:
self.write.logError(e)
def disable_cache(self):
try:
requests_cache.disabled()
except Exception as e:
self.write.logError(e)
def clear_cache(self):
try:
requests_cache.clear()
except Exception as e:
self.write.logError(e)
def is_from_cache(self, r):
try:
c = r.from_cache
self.write.print('From Cache: ' + str(c))
return c
except Exception as e:
self.write.logError(e)
class Node:
def __init__(self, _url, sourcenodes, depth=0, index=0):
self.write = Writer()
self.url = _url
self.index = index
self.sourceNodes = sourcenodes
self.depth = depth
def toJSON(self):
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
@staticmethod
def load_json_file():
with open('webcrawler_data.json') as f:
data = json.load(f)
return data
# @staticmethod
# def from_json(data, cls):
# annotations: dict = cls.__annotations__ if hasattr(cls, '__annotations__') else None
# if issubclass(cls, List):
# list_type = cls.__args__[0]
# instance: list = list()
# for value in data:
# instance.append(Node.from_json(value, list_type))
# return instance
# elif issubclass(cls, Dict):
# key_type = cls.__args__[0]
# val_type = cls.__args__[1]
# instance: dict = dict()
# for key, value in data.items():
# instance.update(Node.from_json(key, key_type), Node.from_json(value, val_type))
# return instance
# else:
# instance: cls = cls()
# for name, value in data.items():
# field_type = annotations.get(name)
# if inspect.isclass(field_type) and isinstance(value, (dict, tuple, list, set, frozenset)):
# setattr(instance, name, from_json(value, field_type))
# else:
# setattr(instance, name, value)
# return instance
def to_json_file(self, entry, file='webcrawler_data.json'):
try:
a = []
if not os.path.isfile(file):
a.append(entry)
with open(file, mode='w') as f:
f.write(json.dumps(a, indent=2))
else:
with open(file) as feedsjson:
feeds = json.load(feedsjson)
feeds.append(entry)
with open(file, mode='w') as f:
f.write(json.dumps(feeds, indent=2))
except json.decoder.JSONDecodeError as j:
self.write.logError('JSONDecodeError: ' + str(j))
except FileNotFoundError:
self.write.logError(self.file_name + " not found. ")
except Exception as e:
self.write.logError(e)
# def set_depth(self, x):
# self.depth = x
#
# def get_depth(self):
# return self.depth
#
# def del_depth(self):
# del self.depth
#
# depth = property(get_depth,set_depth,del_depth)
# # url = self.url
# # index = self.index
# # sourcenodes = self.sourceNodes
class Links:
def __init__(self, _url, _links, _depth):
self.url = _url
self.links = _links
self.depth = _depth
class Pages:
def __init__(self):
self.write = Writer()
self.pages = {}
self.read = Reader()
pass
def new_page(self, page):
self.pages[page] = []
def get_page(self, page):
return self.pages.get(page)
def add_link(self, _page, link, depth, d=None):
try:
d = {
"link": link,
"depth": depth,
# "date": datetime.timestamp(datetime.now())
}
page = self.get_page(_page)
if page is not None:
page.append(d)
else: # add new page empty object and recall the fx
self.new_page(_page)
self.add_link(_page, link, depth)
except Exception as e:
self.write.logError(e)
def output_to_tsv(self):
self.write.toTsv(self.pages)
def output(self):
return self.pages
def output_to_json(self, links, _exist=False, depth=0):
try:
existing_json = []
for l in links:
found = self.pages.get(l.url)
if found is not None and 'page_links' not in found[0]:
found.insert(0, {
"page_links": len(l.links)
})
# else:
# for k in l.links:
# self.add_link(l.url, k, l.depth)
# found = self.pages.get(l.url)
# if found is not None:
# found.append({
# "page_links": len(l.links)
# })
# only use existing json if defined in cli param
if _exist:
existing_json = self.get_existing_json()
if existing_json is not None:
self.pages = self.merge_json(self.pages, existing_json)
with open('outputToJson.json', mode='w') as f:
f.write(json.dumps(self.pages, indent=2))
except Exception as e:
self.write.logError(e)
def get_existing_json(self):
return self.read.json_to_crawl("outputToJson.json")
def merge_json(self, x, y):
try:
z = {**x, **y}
return z
except Exception as e:
self.write.logError(e)
class Edge:
def __init__(self, source, target):
self.source = source
self.target = target
def __eq__(self, other):
return self.source == other.source and self.target == other.target
def __hash__(self):
return hash((self.source, self.target))
class Graph:
def __init__(self, nodes=[], edges=set()):
self.nodes = nodes
self.edges = edges
def addNode(self, node, nodeIndex):
node.index = nodeIndex
self.nodes.append(node)
def addEdge(self, sourceNodeIdx, targetNodeIdx):
edge = Edge(sourceNodeIdx, targetNodeIdx)
self.edges.add(edge)
|
import pytest
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from webdriver_manager.firefox import GeckoDriverManager
def pytest_addoption(parser):
parser.addoption(
"--browser_name", action="store", default="chrome"
)
@pytest.fixture(scope="class")
def setup(request):
print("start run tcs")
browser = request.config.getoption("browser_name")
if browser == "chrome":
driver = webdriver.Chrome(ChromeDriverManager().install())
elif browser == "firefox":
driver = webdriver.Firefox(executable_path=GeckoDriverManager().install())
driver.maximize_window()
driver.implicitly_wait(10)
request.cls.driver = driver
yield
print("close tc")
driver.close()
driver.quit() |
import os, sys
import awesomeengine
import behaviors
import editor_behaviors
import modes
def go():
if getattr(sys, 'frozen', False):
root = sys._MEIPASS
else:
root = os.path.dirname(os.path.abspath(__file__))
e = awesomeengine.Engine(os.path.join(root, 'res'))
e.behavior_manager.register_module(behaviors)
e.behavior_manager.register_module(editor_behaviors)
e.create_window(title='Prepare For Disappointment', size=(1280, 720))
e.add_mode('welcome', modes.AttractMode())
e.add_mode('edit', modes.EditorMode())
e.add_mode('play', modes.PlayMode())
e.add_mode('splash', modes.SpalshScreen())
e.add_mode('dead', modes.DeadMode())
e.change_mode('welcome')
e.run()
if __name__ == '__main__':
go()
|
"""A Future class similar to the one in PEP 3148."""
__all__ = (
'Future', 'wrap_future', 'isfuture',
)
import concurrent.futures
import contextvars
import logging
import sys
from types import GenericAlias
from . import base_futures
from . import events
from . import exceptions
from . import format_helpers
isfuture = base_futures.isfuture
_PENDING = base_futures._PENDING
_CANCELLED = base_futures._CANCELLED
_FINISHED = base_futures._FINISHED
STACK_DEBUG = logging.DEBUG - 1 # heavy-duty debugging
class Future:
"""This class is *almost* compatible with concurrent.futures.Future.
Differences:
- This class is not thread-safe.
- result() and exception() do not take a timeout argument and
raise an exception when the future isn't done yet.
- Callbacks registered with add_done_callback() are always called
via the event loop's call_soon().
- This class is not compatible with the wait() and as_completed()
methods in the concurrent.futures package.
(In Python 3.4 or later we may be able to unify the implementations.)
"""
# Class variables serving as defaults for instance variables.
_state = _PENDING
_result = None
_exception = None
_loop = None
_source_traceback = None
_cancel_message = None
# A saved CancelledError for later chaining as an exception context.
_cancelled_exc = None
# This field is used for a dual purpose:
# - Its presence is a marker to declare that a class implements
# the Future protocol (i.e. is intended to be duck-type compatible).
# The value must also be not-None, to enable a subclass to declare
# that it is not compatible by setting this to None.
# - It is set by __iter__() below so that Task._step() can tell
# the difference between
# `await Future()` or`yield from Future()` (correct) vs.
# `yield Future()` (incorrect).
_asyncio_future_blocking = False
__log_traceback = False
def __init__(self, *, loop=None):
"""Initialize the future.
The optional event_loop argument allows explicitly setting the event
loop object used by the future. If it's not provided, the future uses
the default event loop.
"""
if loop is None:
self._loop = events._get_event_loop()
else:
self._loop = loop
self._callbacks = []
if self._loop.get_debug():
self._source_traceback = format_helpers.extract_stack(
sys._getframe(1))
def __repr__(self):
return base_futures._future_repr(self)
def __del__(self):
if not self.__log_traceback:
# set_exception() was not called, or result() or exception()
# has consumed the exception
return
exc = self._exception
context = {
'message':
f'{self.__class__.__name__} exception was never retrieved',
'exception': exc,
'future': self,
}
if self._source_traceback:
context['source_traceback'] = self._source_traceback
self._loop.call_exception_handler(context)
__class_getitem__ = classmethod(GenericAlias)
@property
def _log_traceback(self):
return self.__log_traceback
@_log_traceback.setter
def _log_traceback(self, val):
if val:
raise ValueError('_log_traceback can only be set to False')
self.__log_traceback = False
def get_loop(self):
"""Return the event loop the Future is bound to."""
loop = self._loop
if loop is None:
raise RuntimeError("Future object is not initialized.")
return loop
def _make_cancelled_error(self):
"""Create the CancelledError to raise if the Future is cancelled.
This should only be called once when handling a cancellation since
it erases the saved context exception value.
"""
if self._cancelled_exc is not None:
exc = self._cancelled_exc
self._cancelled_exc = None
return exc
if self._cancel_message is None:
exc = exceptions.CancelledError()
else:
exc = exceptions.CancelledError(self._cancel_message)
exc.__context__ = self._cancelled_exc
# Remove the reference since we don't need this anymore.
self._cancelled_exc = None
return exc
def cancel(self, msg=None):
"""Cancel the future and schedule callbacks.
If the future is already done or cancelled, return False. Otherwise,
change the future's state to cancelled, schedule the callbacks and
return True.
"""
self.__log_traceback = False
if self._state != _PENDING:
return False
self._state = _CANCELLED
self._cancel_message = msg
self.__schedule_callbacks()
return True
def __schedule_callbacks(self):
"""Internal: Ask the event loop to call all callbacks.
The callbacks are scheduled to be called as soon as possible. Also
clears the callback list.
"""
callbacks = self._callbacks[:]
if not callbacks:
return
self._callbacks[:] = []
for callback, ctx in callbacks:
self._loop.call_soon(callback, self, context=ctx)
def cancelled(self):
"""Return True if the future was cancelled."""
return self._state == _CANCELLED
# Don't implement running(); see http://bugs.python.org/issue18699
def done(self):
"""Return True if the future is done.
Done means either that a result / exception are available, or that the
future was cancelled.
"""
return self._state != _PENDING
def result(self):
"""Return the result this future represents.
If the future has been cancelled, raises CancelledError. If the
future's result isn't yet available, raises InvalidStateError. If
the future is done and has an exception set, this exception is raised.
"""
if self._state == _CANCELLED:
exc = self._make_cancelled_error()
raise exc
if self._state != _FINISHED:
raise exceptions.InvalidStateError('Result is not ready.')
self.__log_traceback = False
if self._exception is not None:
raise self._exception.with_traceback(self._exception_tb)
return self._result
def exception(self):
"""Return the exception that was set on this future.
The exception (or None if no exception was set) is returned only if
the future is done. If the future has been cancelled, raises
CancelledError. If the future isn't done yet, raises
InvalidStateError.
"""
if self._state == _CANCELLED:
exc = self._make_cancelled_error()
raise exc
if self._state != _FINISHED:
raise exceptions.InvalidStateError('Exception is not set.')
self.__log_traceback = False
return self._exception
def add_done_callback(self, fn, *, context=None):
"""Add a callback to be run when the future becomes done.
The callback is called with a single argument - the future object. If
the future is already done when this is called, the callback is
scheduled with call_soon.
"""
if self._state != _PENDING:
self._loop.call_soon(fn, self, context=context)
else:
if context is None:
context = contextvars.copy_context()
self._callbacks.append((fn, context))
# New method not in PEP 3148.
def remove_done_callback(self, fn):
"""Remove all instances of a callback from the "call when done" list.
Returns the number of callbacks removed.
"""
filtered_callbacks = [(f, ctx)
for (f, ctx) in self._callbacks
if f != fn]
removed_count = len(self._callbacks) - len(filtered_callbacks)
if removed_count:
self._callbacks[:] = filtered_callbacks
return removed_count
# So-called internal methods (note: no set_running_or_notify_cancel()).
def set_result(self, result):
"""Mark the future done and set its result.
If the future is already done when this method is called, raises
InvalidStateError.
"""
if self._state != _PENDING:
raise exceptions.InvalidStateError(f'{self._state}: {self!r}')
self._result = result
self._state = _FINISHED
self.__schedule_callbacks()
def set_exception(self, exception):
"""Mark the future done and set an exception.
If the future is already done when this method is called, raises
InvalidStateError.
"""
if self._state != _PENDING:
raise exceptions.InvalidStateError(f'{self._state}: {self!r}')
if isinstance(exception, type):
exception = exception()
if type(exception) is StopIteration:
raise TypeError("StopIteration interacts badly with generators "
"and cannot be raised into a Future")
self._exception = exception
self._exception_tb = exception.__traceback__
self._state = _FINISHED
self.__schedule_callbacks()
self.__log_traceback = True
def __await__(self):
if not self.done():
self._asyncio_future_blocking = True
yield self # This tells Task to wait for completion.
if not self.done():
raise RuntimeError("await wasn't used with future")
return self.result() # May raise too.
__iter__ = __await__ # make compatible with 'yield from'.
# Needed for testing purposes.
_PyFuture = Future
def _get_loop(fut):
# Tries to call Future.get_loop() if it's available.
# Otherwise fallbacks to using the old '_loop' property.
try:
get_loop = fut.get_loop
except AttributeError:
pass
else:
return get_loop()
return fut._loop
def _set_result_unless_cancelled(fut, result):
"""Helper setting the result only if the future was not cancelled."""
if fut.cancelled():
return
fut.set_result(result)
def _convert_future_exc(exc):
exc_class = type(exc)
if exc_class is concurrent.futures.CancelledError:
return exceptions.CancelledError(*exc.args)
elif exc_class is concurrent.futures.TimeoutError:
return exceptions.TimeoutError(*exc.args)
elif exc_class is concurrent.futures.InvalidStateError:
return exceptions.InvalidStateError(*exc.args)
else:
return exc
def _set_concurrent_future_state(concurrent, source):
"""Copy state from a future to a concurrent.futures.Future."""
assert source.done()
if source.cancelled():
concurrent.cancel()
if not concurrent.set_running_or_notify_cancel():
return
exception = source.exception()
if exception is not None:
concurrent.set_exception(_convert_future_exc(exception))
else:
result = source.result()
concurrent.set_result(result)
def _copy_future_state(source, dest):
"""Internal helper to copy state from another Future.
The other Future may be a concurrent.futures.Future.
"""
assert source.done()
if dest.cancelled():
return
assert not dest.done()
if source.cancelled():
dest.cancel()
else:
exception = source.exception()
if exception is not None:
dest.set_exception(_convert_future_exc(exception))
else:
result = source.result()
dest.set_result(result)
def _chain_future(source, destination):
"""Chain two futures so that when one completes, so does the other.
The result (or exception) of source will be copied to destination.
If destination is cancelled, source gets cancelled too.
Compatible with both asyncio.Future and concurrent.futures.Future.
"""
if not isfuture(source) and not isinstance(source,
concurrent.futures.Future):
raise TypeError('A future is required for source argument')
if not isfuture(destination) and not isinstance(destination,
concurrent.futures.Future):
raise TypeError('A future is required for destination argument')
source_loop = _get_loop(source) if isfuture(source) else None
dest_loop = _get_loop(destination) if isfuture(destination) else None
def _set_state(future, other):
if isfuture(future):
_copy_future_state(other, future)
else:
_set_concurrent_future_state(future, other)
def _call_check_cancel(destination):
if destination.cancelled():
if source_loop is None or source_loop is dest_loop:
source.cancel()
else:
source_loop.call_soon_threadsafe(source.cancel)
def _call_set_state(source):
if (destination.cancelled() and
dest_loop is not None and dest_loop.is_closed()):
return
if dest_loop is None or dest_loop is source_loop:
_set_state(destination, source)
else:
if dest_loop.is_closed():
return
dest_loop.call_soon_threadsafe(_set_state, destination, source)
destination.add_done_callback(_call_check_cancel)
source.add_done_callback(_call_set_state)
def wrap_future(future, *, loop=None):
"""Wrap concurrent.futures.Future object."""
if isfuture(future):
return future
assert isinstance(future, concurrent.futures.Future), \
f'concurrent.futures.Future is expected, got {future!r}'
if loop is None:
loop = events._get_event_loop()
new_future = loop.create_future()
_chain_future(future, new_future)
return new_future
try:
import _asyncio
except ImportError:
pass
else:
# _CFuture is needed for tests.
Future = _CFuture = _asyncio.Future
|
# Generated by Django 3.2.5 on 2021-07-24 01:25
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('shop', '0014_review_model'),
]
operations = [
migrations.RemoveField(
model_name='shipment',
name='customer',
),
]
|
class Barracks(object):
'''
def generate_knight(self):
#print("generate_knight")
return Knight(400, 5, 3, 1, "short sword")
def generate_archer(self):
#print("generate_knight")
return Archer(200, 7, 1, 3, "short bow")
'''
def generate_unit(self, unit_type, level):
if unit_type == "knight":
return Knight(level)
elif unit_type == "archer":
return Archer(level)
class Knight(object):
def __init__(self, level):
self.unit_type = "knight"
file_name = "{}_{}.dat".format(self.unit_type, level)
with open(file_name, 'r') as parameter_file:
lines = parameter_file.read().split("\n")
self.life = lines[0]
self.speed = lines[1]
self.attack_power = lines[2]
self.attack_range = lines[3]
self.weapon = lines[4]
def __str__(self):
return "Type: {0}\n" \
"Life: {1}\n" \
"speed {2}\n" \
"attack_range {3}\n" \
"attack_power {4}\n" \
"weapon {5}".format(
self.unit_type,
self.life,
self.attack_power,
self.attack_power,
self.attack_range,
self.weapon)
class Archer(object):
def __init__(self, level):
self.unit_type = "archer"
file_name = "{}_{}.dat".format(self.unit_type, level)
with open(file_name, 'r') as parameter_file:
lines = parameter_file.read().split("\n")
self.life = lines[0]
self.speed = lines[1]
self.attack_power = lines[2]
self.attack_range = lines[3]
self.weapon = lines[4]
def __str__(self):
return "Type: {0}\n" \
"Life: {1}\n" \
"speed {2}\n" \
"attack_range {3}\n" \
"attack_power {4}\n" \
"weapon {5}".format(
self.unit_type,
self.life,
self.attack_power,
self.attack_power,
self.attack_range,
self.weapon)
if __name__ == "__main__":
#print("begin")
barrack = Barracks()
knight1 = barrack.generate_unit("knight", 1)
Archer1 = barrack.generate_unit("archer", 2)
print ("[knight1] {}".format(knight1))
print ("[Archer1] {}".format(Archer1)) |
import statistics as s
#OR: from statistics import mean as m
#print(m(exList))
#OR: from statistics import mean, stdev
#print(mean(exList))
#print(stdev(exList))
#OR: from statistics import mean as m, stdev as s
#OR: from statistics import * #the * imports all functions from statistics
exList = [5,6,2,1,6,7,2,2,7,3,7,7,7]
print(s.mean(exList)) |
def bball_sub(heights, counter, team):
while not ((counter == 5) or (len(heights) == 0)):
height = heights[0]
heights = t(heights)
if height > 180:
team[counter] = height
counter += 1
def t(heights):
temp = []
for i in range(1, len(heights)):
temp.append(heights[i])
return temp
counter = 0
team = [0, 0, 0, 0, 0]
heights = [190, 135, 185, 200, 195, 190, 215]
bball_sub(heights, counter, team)
print team
|
"""
@author: Scarlett Zhang
This file has 2 classes:
InvalidFireRecordException
FireDumper
"""
import logging
from typing import Any
from typing import List, Dict, Tuple
import rootpath
rootpath.append()
from backend.data_preparation.dumper.dumperbase import DumperBase
from backend.connection import Connection
from backend.data_preparation.crawler.fire_crawler import FireEvent
logger = logging.getLogger('TaskManager')
class InvalidFireRecordException(Exception):
pass
class FireDumper(DumperBase):
"""
Fire Dumper class
In this function, there are mainly operations on 3 tables:
fire: schema: name, if_sequence, agency, state,id, time, geom_full, geom_1e4,geom_1e3, geom_1e2,geom_center, area.
primary key: (name, time)
(records in fire is only smaller fire event records that represent a single fire polygon record at a certain moment)
fire_history: schema: id, year, state, name, url.
primary key: id
(records in fire_history is a fire that takes a whole webpage on rmgsc, so 'name' here is the urlname)
fire_merged: schema: name, if_sequence, agency, state, id,start_time, end_time, geom_full, geom_1e4, geom_1e3,
geom_center, max_area
primary key: id
(records in fire_merged is a fire that takes a whole webpage on rmgsc, start_time is from the first fire record and
end_time is from the last fire record)
"""
# code for checking if a database exists
SQL_CHECK_IF_TABLE_EXISTS = 'SELECT table_name FROM information_schema.TABLES'
# code for creating a table if it doesn't exist
SQL_CREATE_HISTORY_TABLE = 'CREATE TABLE IF NOT EXISTS fire_history (id integer, year int4, state VARCHAR(40), ' \
'name VARCHAR (40), url text, PRIMARY KEY (id))'
# SQL_CREATE_HISTORY_TABLE: create fire_history table if it doesn't exist
SQL_CREATE_FIRE_MERGED_TABLE = 'CREATE TABLE IF NOT EXISTS fire_merged (name VARCHAR (40), if_sequence boolean, ' \
'agency VARCHAR (80), state VARCHAR(15), id INTEGER , start_time timestamp, ' \
'end_time timestamp, geom_full geometry, geom_1e4 geometry, geom_1e3 geometry, ' \
'geom_1e2 geometry,geom_center geometry, max_area float, PRIMARY KEY (id))'
# SQL_CREATE_FIRE_MERGED_TABLE: create fire_merged table if it doesn't exist
SQL_CREATE_FIRE_TABLE = 'CREATE TABLE IF NOT EXISTS fire (name VARCHAR (40), if_sequence boolean, agency ' \
'VARCHAR (20), state VARCHAR(15), id INTEGER , time timestamp, geom_full geometry, ' \
'geom_1e4 geometry, geom_1e3 geometry, geom_1e2 geometry, geom_center geometry, ' \
'area float, PRIMARY KEY (name, time))'
# SQL_CREATE_FIRE_TABLE: create fire table if it doesn't exist'
# code for updating records or inserting new records
# "%(id)s": when this statement is executed with cur.execute(), the second parameter is a dictionary with has "id"
# as the key and the value of id as the value
SQL_INSERT_FIRE_HISTORY = 'INSERT INTO fire_history (id, year, state, name,url) VALUES (%(id)s,%(year)s,' \
'%(state)s, %(firename)s, %(url)s) ON CONFLICT DO NOTHING'
# SQL_INSERT_FIRE_HISTORY: insert a new fire record into fire_history
SQL_INSERT_FIRE = 'INSERT INTO fire (name, if_sequence, agency, state, id, time, geom_full, geom_1e4, ' \
'geom_1e3, geom_1e2, geom_center, area) VALUES (%(firename)s,%(is_sequential)s,%(agency)s,' \
'%(state)s,%(fire_id)s,%(datetime)s,%(geopolygon_full)s,%(geopolygon_large)s,' \
'%(geopolygon_medium)s,%(geopolygon_small)s, ' \
'st_astext(st_centroid(st_geomfromtext(%(geopolygon_small)s))), %(area)s) ON CONFLICT DO NOTHING'
# SQL_INSERT_FIRE: insert a new fire record into fire
SQL_INSERT_FIRE_INTO_MERGED = 'INSERT INTO fire_merged(name, if_sequence, agency, state, id, start_time, ' \
'end_time, geom_full, geom_1e4, geom_1e3, geom_1e2, geom_center, max_area)' \
'VALUES (%s, %s, %s,%s, %s, %s,%s,%s,%s,%s,%s,%s,%s) ON CONFLICT (id) DO UPDATE SET' \
' if_sequence = TRUE, agency = EXCLUDED.agency, end_time = EXCLUDED.end_time, ' \
'geom_full = EXCLUDED.geom_full, geom_1e4 = EXCLUDED.geom_1e4, ' \
'geom_1e3 = EXCLUDED.geom_1e3, geom_1e2 = EXCLUDED.geom_1e2, ' \
'geom_center = EXCLUDED.geom_center,max_area= EXCLUDED.max_area'
# SQL_INSERT_FIRE_INTO_MERGED: insert a new fire record into fire_merged
SQL_UPDATE_FIRE_INFO = 'UPDATE fire SET id = %s WHERE name = %s AND time >= %s::timestamp AND ' \
'time <= %s::timestamp;'
# update fire info when: there are one fire in temp folder but records are from fireEvents with different name, one
# record will be separated into two records
# so id in fire table need to be updated with a larger number
# code for requesting fire information
# retrieve all fire information
SQL_RETRIEVE_ALL_FIRES = 'SELECT year,state,name FROM fire_history'
# get the last id from fire_history table
SQL_GET_LATEST_ID = 'SELECT MAX(id) FROM fire_history'
# get the recent fire whose last record is within 10 days of the current date
SQL_GET_LATEST_FIRE = 'SELECT a.id,h.year, h.state, h.name ' \
'from (SELECT id FROM fire_merged Where abs(DATE_PART(\'day\', ' \
'end_time - now())) < 10) a, fire_history h where h.id = a.id;'
# return the aggregated fire information with a given id
SQL_GET_LATEST_AGGREGATION = 'SELECT f.name, f.if_sequence,string_agg(distinct (f.agency), \', \'),f.state,f.id,' \
'min(f.time),Max(f.time), st_astext(st_union(st_makevalid(f.geom_full))) as geom_full,' \
'st_astext(st_union(st_makevalid(f.geom_1e4))),' \
'st_astext(st_union(st_makevalid(f.geom_1e3))),' \
'st_astext(st_union(st_makevalid(f.geom_1e2))),' \
'st_astext(st_centroid(st_union(st_makevalid(f.geom_center)))), ' \
'max(f.area) FROM (SELECT * FROM fire where id = {}) f ' \
'Group by f.name, f.if_sequence, f.state, f.id'
def __init__(self):
super().__init__()
logger.info(f"Looking for tables in database...")
# get all existing tables in database
self.existing_tables = set(table_tuple[0]
for table_tuple in Connection.sql_execute(FireDumper.SQL_CHECK_IF_TABLE_EXISTS))
logger.info(f"Table fire in database:{'fire' in self.existing_tables}")
logger.info(f"Table fire_history in database:{'fire_history' in self.existing_tables}")
logger.info(f"Table fire_merged in database:{'fire_merged' in self.existing_tables}")
logger.info(f"Table testing in database:{'new_table_testing' in self.existing_tables}")
@staticmethod
def _get_length_of_select_query_result(query: str) -> int:
"""
Takes a SELECT query and returns the number of rows the query returns.
:param query: str.
e.g. 'SELECT year,state,name FROM fire_history'
:return: int
"""
return sum(1 for _ in Connection.sql_execute(query))
def _create_table_if_not_exist(self, table_name: str) -> None:
"""
Checks if the a given table exists,
Creates fire_history table if not exist
If it exists, the function does nothing
:param table_name: name of the table to look for or create.
Only 3 possible values: "fire_history", "fire", "fire_merged"
"""
# check if the fire_history table exists
# if the pipeline is run for the first time
# then the fire_history table will not exist
is_table_exist = table_name in self.existing_tables
# table is the result of the statement: sql_check_if_history_table_exists
if is_table_exist:
logger.info(f"Found the {table_name} table, continue ...")
else:
# if table does not exist, create the table
logger.info(f"No {table_name} exists. Creating a new one.")
# choose the corresponding query to execute
table_name_to_create_query = {
"fire_history": FireDumper.SQL_CREATE_HISTORY_TABLE,
"fire": FireDumper.SQL_CREATE_FIRE_TABLE,
"fire_merged": FireDumper.SQL_CREATE_FIRE_MERGED_TABLE,
"new_table_testing": "CREATE TABLE new_table_testing(tid int)"
}[table_name]
# execute the query to create the table in database
Connection.sql_execute_commit(table_name_to_create_query)
# add the table name into the global variable to keep consistency with the database
self.existing_tables.add(table_name)
logger.info(f"Table {table_name} created.")
def retrieve_all_fires(self) -> List[FireEvent]:
"""
Retrieves all fires in the database.
:return: set of FireEvent objects
e.g. [FireEvent(-1, 2015, 'California', 'FireA'), FireEvent(-1, 2015, 'California', 'FireB')]
"""
# check if the fire_history table exists
# if not exist, executing sql_retrieve_all_fires will return an error
self._create_table_if_not_exist("fire_history")
# retrieve all fires in fire_history
set_of_fire_event_objects = list(map(lambda fire_event_tuple: FireEvent.from_tuple(fire_event_tuple),
Connection.sql_execute(FireDumper.SQL_RETRIEVE_ALL_FIRES)))
# result now is a set of FireEvent objects
# e.g. for https://rmgsc.cr.usgs.gov/outgoing/GeoMAC/2015_fire_data/California/Deer_Horn_2/
# the FireEvent object is: Fire Event: Deer_Horn_2 in year 2015, state California
return set_of_fire_event_objects
@staticmethod
def _generate_sql_statement_and_execute(sql_statement: str, data) -> None:
"""
Generates a SQL statement with the data given as a dictionary and execute, commit the changes.
:param sql_statement: string
e.g. "SELECT * FROM fire_history WHERE %{id}=199"
:param data: data as a dict.
e.g. {'year': 2019, 'firename': 'TRESTLE', 'agency': 'USFS', 'datetime': .....}
"""
with Connection() as conn:
cur = conn.cursor()
cur.execute(sql_statement, data)
conn.commit()
cur.close()
def insert(self, data: Dict[str, Any]) -> None:
"""
Inserts a single fire record from FireExtractor.extract() into fire table.
:param data:dict to insert, from extractor.extract()
e.g. {'year': 2019, 'firename': 'TRESTLE', 'agency': 'USFS', 'datetime': .....}
"""
logger.info(f"Inserting into fire table: {data['firename']} {data['datetime']}")
# check if the fire table exists
# if not exist, executing sql_insert will cause an error
self._create_table_if_not_exist("fire")
FireDumper._generate_sql_statement_and_execute(FireDumper.SQL_INSERT_FIRE, data)
logger.info(f"Finished inserting file: {data['firename']}{data['datetime']}")
def insert_history(self, fire: FireEvent) -> None:
"""
Inserts a fire record into fire_history table
:param fire: a FireEvent object representing a wild fire event
e.g. FireEvent(-1, 2015, 'California', 'FireQ')
:return: None
"""
logger.info(f"Inserting into fire_history table: {fire.url_name} in {fire.state} in {fire.year}")
self._create_table_if_not_exist("fire_history")
# make the dictionary to be inserted into fire_history
FireDumper._generate_sql_statement_and_execute(FireDumper.SQL_INSERT_FIRE_HISTORY, fire.to_dict())
logger.info(f"Finished inserting file: {fire.url_name} in {fire.state} in {fire.year}")
@staticmethod
def get_latest_fire_id() -> int:
"""
Gets the latest fire id to pass to the counter in data_from_fire
:return: int
e.g. 299
"""
# execute select statement
# the latest fire id is the first and only entry of the result
with Connection() as conn:
cur = conn.cursor()
cur.execute(FireDumper.SQL_GET_LATEST_ID)
result = cur.fetchone()[0]
cur.close()
return result
def get_recent_records(self) -> List[FireEvent]:
"""
Return the list of ids of most recent records.
:return: List of FireEvent objects
e.g. [FireEvent(-1, 2015, 'California', 'FireA'), FireEvent(-1, 2015, 'California',"FireQ")]
"""
# execute select statement to see if fire_merged table exists
# if it doesn't exist, create one
# if it exists, do nothing
self._create_table_if_not_exist("fire_merged")
# execute select statement, get the fire ids of those fire whose end_date is within 10 days
# these are fires that might update these days
logger.info("Retrieving recent fires...")
old_fires = list(map(lambda old_fire: FireEvent.from_tuple(old_fire),
Connection.sql_execute(FireDumper.SQL_GET_LATEST_FIRE)))
logger.info(f"Fires updated within 10 days:{[str(old_fire) for old_fire in old_fires]}")
return old_fires
@staticmethod
def _generate_data(aggregated_record: Tuple[Any], fire_id: int) -> Tuple[List[Any], List[Any]]:
"""
Generates the data dictionary to pass to sql statement.
:param aggregated_record: tuple
e.g. ("FireA",True,"USFA","California",999,minTime,maxTime,geom....,total_area)
:param fire_id: int
e.g. 998
:return: Tuple[List[Any], List[Any]]
e.g. ([99,"Fire","20190806 15:00", "20190806 16:00"], ["Fire", False, "UFDS"...])
"""
columns = ["name", "if_sequence", "agency", "state", "id", "start_time", "end_time", "geom_full", "geom_1e4",
"geom_1e3", "geom_1e2", "geom_center", "max_area"]
info = dict(zip(columns, aggregated_record))
info["id"] = fire_id
fire_record_update = [info["id"], info["name"], info["start_time"], info["end_time"]]
fire_merged_insert = [i for i in info.values()]
return fire_record_update, fire_merged_insert
def get_aggregated_fire_with_id(self, year: int, name: str, state: str, id: int) -> List[Tuple]:
"""
Merges fire events with this id from fire table into several big fire events.
Some pages might have fires from multiple fire events, so the return value is a list of tuples.
If there are more than one fire events, then there will be multiple tuples in returned list.
:param year: int
e.g. 1999
:param name: str
e.g. "FireA"
:param state: str
e.g. "California"
:param id: int
e.g. 9999
:return: list of tuples representing fire events
e.g. [(1999, "FireA",...), (1999, "FireB",....)]
"""
aggregated_fire_records_with_id = list(Connection.sql_execute(self.SQL_GET_LATEST_AGGREGATION.format(id)))
if not aggregated_fire_records_with_id:
# if this id is an empty record, then there is no aggregated fire records
# in this situation, we only mark the url as crawled, by inserting it into fire_history
self.insert_history(FireEvent(year, state, name, id))
# return the latest fire id
raise InvalidFireRecordException(f"Record {id} is an empty record. Skipping...")
logger.info(f"Successfully fetch Record #{id}, there are {len(aggregated_fire_records_with_id)} "
f"aggregated records in this id")
return aggregated_fire_records_with_id
def merge_fire_and_insert_history(self, id: int, year: int, name: str, state: str) -> int:
"""
A sequence of operations after inserting all records of a fire into fire table
including insertion into fire_merged, fire_history
:param id: int, id of the fire
:param year:int
:param name:str
:param state:str
:return:int
"""
self._create_table_if_not_exist("fire_merged")
try:
aggregated_records_with_id = self.get_aggregated_fire_with_id(year, name, state, id)
except InvalidFireRecordException:
logger.error(f"Met empty fire event, id:{id}")
return id
# the records can be dirty. Sometimes the folder of one fire includes fire with a different name
# then when merged, the return list has more than one records
# so a for loop is needed to deal with this situation
# if the number of records is 0 then return the last id
new_id = id
for index, record in enumerate(aggregated_records_with_id):
# if the number of records is larger than 1 then the id need to be updated
new_id = id + index
# Most situation, there is only one record, new_id = id
# if there is more than one, new_id will be id + i
# create the dictionary for all values in aggregated record
fire_info_update_params, fire_merged_insert_params = self._generate_data(record, new_id)
# update their id in fire_info
# here, if the new_id is different from id, the fire with that name will be updated with the new id
self._generate_sql_statement_and_execute(self.SQL_UPDATE_FIRE_INFO, fire_info_update_params)
# insert this set in fire_aggregate
self._generate_sql_statement_and_execute(self.SQL_INSERT_FIRE_INTO_MERGED, fire_merged_insert_params)
# insert this set into fire_crawl_history, mark it as crawled
self.insert_history(FireEvent(year, state, name, new_id))
return new_id
if __name__ == '__main__':
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
test_dumper = FireDumper()
# Test for _create_table_if_not_exist()
# FireDumper.create_history_table("fire_merged")
# Test for retrieve_all_fires()
# print(list(map(lambda fire: str(fire), test_dumper.retrieve_all_fires())))
# print(len(list(map(lambda fire: str(fire), test_dumper.retrieve_all_fires()))))
# test_dumper.insert_history(FireEvent(1999,"Sss","sss",198888))
# test_dumper.get_recent_records()
# get_aggregated_fire_with_id()
|
from django.shortcuts import render
# Create your views here.
def index(request):
my_dict = {'index':"hello from second app"}
return render(request,'second_app/index.html',context=my_dict) |
import torch
import torch.nn as nn
import torch.nn.functional as F
from gcake.models.gake import GAKE
from gcake.models.modules import Graph
from config import Config # TODO: remove device
class GAKEGraphEncoder(nn.Module):
def __init__(self, triples, num_entity, num_relation, dim):
super().__init__()
self.graph = Graph(triples)
self.gake = GAKE(num_entity, num_relation, dim)
def set_ent_embeddings(self, ent_embeddings: nn.Embedding): # TODO: only pass wieght
self.gake.set_ent_embeddings(ent_embeddings)
def forward(self, htrs, device=Config.device): # TODO: remove device
loss = torch.zeros(1)
for h, r, t in htrs:
for entity_id in (int(h), int(t)):
_neighbor_ids = self.graph.get_neighbor_context(entity_id)
_path_ids = self.graph.get_path_context(entity_id)
_edge_ids = self.graph.get_edge_context(entity_id)
#
entity_id = torch.tensor(
[entity_id], dtype=torch.long).to(device)
neighbor_ids = torch.tensor(
_neighbor_ids, dtype=torch.long).to(device)
path_ids = torch.tensor(_path_ids, dtype=torch.long).to(device)
edge_ids = torch.tensor(_edge_ids, dtype=torch.long).to(device)
global_weight_p, _loss = self.gake(
entity_id, neighbor_ids, path_ids, edge_ids)
loss += _loss
return loss
|
"""References:
Guangcan Mai, Kai Cao, Pong C. Yuen and Anil K. Jain.
"On the Reconstruction of Face Images from Deep Face Templates."
IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI) (2018)
Alec Radford, Luke Metz and Soumith Chintala.
"Unsupervised Representation Learning with Deep Convolutional Generative Adversarial Networks."
ICLR2016
"""
import mxnet as mx
import numpy as np
import matplotlib.pyplot as plt
import logging
import cv2
from datetime import datetime
from symbol_dcgan160 import make_dcgan_sym
from collections import namedtuple
import argparse
import pdb
mx.random.seed(128)
BatchEndParam = namedtuple('BatchEndParams',
['epoch',
'nbatch',
'eval_metric',
'locals'])
class RandIter(mx.io.DataIter):
def __init__(self, batch_size, ndim):
self.batch_size = batch_size
self.ndim = ndim
self.provide_data = [('rand', (batch_size, ndim, 1, 1))]
self.provide_label = []
def iter_next(self):
return True
def getdata(self):
return [mx.random.normal(0, 1.0, shape=(self.batch_size, self.ndim, 1, 1))]
class ImagenetIter(mx.io.DataIter):
def __init__(self, path, batch_size, data_shape):
self.internal = mx.io.ImageRecordIter(
path_imgrec = path,
data_shape = data_shape,
batch_size = batch_size,
)
self.provide_data = [('data', (batch_size,) + data_shape)]
self.provide_label = []
def reset(self):
self.internal.reset()
def iter_next(self):
return self.internal.iter_next()
def getdata(self):
data = self.internal.getdata()
data_tmp = data.asnumpy().transpose((0,2,3,1))/255.
data_tmp = np.concatenate([np.expand_dims(cv2.resize(x[20:140,20:140,:],(160,160)),axis=0) for x in data_tmp])
data_tmp = data_tmp.transpose((0,3,1,2))
data = data_tmp*2.0 - 1.0
return [mx.nd.array(data)]
def fill_buf(buf, i, img, shape):
n = buf.shape[0]/shape[1]
m = buf.shape[1]/shape[0]
sx = (i%m)*shape[0]
sy = (i/m)*shape[1]
buf[sy:sy+shape[1], sx:sx+shape[0], :] = img
def visual(title, X):
assert len(X.shape) == 4
X = X.transpose((0, 2, 3, 1))
X = np.clip((X+1.0)*(255.0/2.0), 0, 255).astype(np.uint8)
n = np.int(np.ceil(np.sqrt(X.shape[0])))
buff = np.zeros((n*X.shape[1], n*X.shape[2], X.shape[3]), dtype=np.uint8)
for i, img in enumerate(X):
fill_buf(buff, i, img, X.shape[1:3])
buff = cv2.cvtColor(buff, cv2.COLOR_BGR2RGB)
plt.imshow(title, buff)
#cv2.waitKey(1)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="command for training dcgan")
parser.add_argument('--gpus', type=str, help='the gpu will be used, e.g "2"')
parser.add_argument('--data-path', type=str, help='the rec file for training')
parser.add_argument('--model-save-prefix', type=str, help='prefix for model saving')
args = parser.parse_args()
#logging.basicConfig(level=logging.DEBUG)
head = '%(asctime)-15s %(message)s'
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
stamp = datetime.now().strftime('%Y_%m_%d-%H_%M')
log_file_full_name = '%s%s.log'%(args.model_save_prefix,stamp)
handler = logging.FileHandler(log_file_full_name)
formatter = logging.Formatter(head)
handler.setFormatter(formatter)
logger.addHandler(handler)
# =============setting============
dataset = args.data_path.split('/')[-1].split('.')[0]
imgnet_path = args.data_path
ndf = 64
ngf = 64
nc = 3
batch_size = 64
Z = 100
lr_G = 0.0002
lr_D = 0.00005
beta1 = 0.5
ctx = [mx.gpu(int(i)) for i in args.gpus.split(',')]
check_point = True
symG, symD = make_dcgan_sym(ngf, ndf, nc)
speedmeter = mx.callback.Speedometer(batch_size, 100)
ckp_G = mx.callback.do_checkpoint(args.model_save_prefix+'G')
ckp_D = mx.callback.do_checkpoint(args.model_save_prefix+'D')
# ==============data==============
train_iter = ImagenetIter(imgnet_path, batch_size, (3, 160, 160))
rand_iter = RandIter(batch_size, Z)
label = mx.nd.zeros((batch_size,), ctx=mx.cpu(0))
# =============module G=============
modG = mx.mod.Module(symbol=symG, data_names=('rand',), label_names=None, context=ctx)
modG.bind(data_shapes=rand_iter.provide_data)
modG.init_params(initializer=mx.init.Normal(0.02))
modG.init_optimizer(
optimizer='adam',
optimizer_params={
'learning_rate': lr_G,
'wd': 0.,
'beta1': beta1,
})
mods = [modG]
# =============module D=============
modD = mx.mod.Module(symbol=symD, data_names=('data',), label_names=('label',), context=ctx)
modD.bind(data_shapes=train_iter.provide_data,
label_shapes=[('label', (batch_size,))],
inputs_need_grad=True)
modD.init_params(initializer=mx.init.Normal(0.02))
modD.init_optimizer(
optimizer='adam',
optimizer_params={
'learning_rate': lr_D,
'wd': 0.,
'beta1': beta1,
})
mods.append(modD)
# ============printing==============
def norm_stat(d):
return mx.nd.norm(d)/np.sqrt(d.size)
mon = mx.mon.Monitor(10, norm_stat, pattern=".*output|d1_backward_data", sort=True)
mon = None
if mon is not None:
for mod in mods:
pass
def facc(label, pred):
pred = pred.ravel()
label = label.ravel()
return ((pred > 0.5) == (label>0.5)).mean()
def fentropy(label, pred):
pred = pred.ravel()
label = label.ravel()
return -(label*np.log(pred+1e-12) + (1.-label)*np.log(1.-pred+1e-12)).mean()
mG = mx.metric.CustomMetric(fentropy,name='Gfentropy')
mD = mx.metric.CustomMetric(fentropy,name='Dfentropy')
mACC = mx.metric.CustomMetric(facc)
print 'Training...'
stamp = datetime.now().strftime('%Y_%m_%d-%H_%M')
# =============train===============
for epoch in range(100):
train_iter.reset()
for t, batch in enumerate(train_iter):
rbatch = rand_iter.next()
if mon is not None:
mon.tic()
modG._exec_group.forward(rbatch, is_train=True)
outG = modG._exec_group.get_outputs()
# update discriminator on fake
mx.nd.random_uniform(low=0.,high=0.3,out=label)
#label[:] = 0
modD._exec_group.forward(mx.io.DataBatch(outG, [label]), is_train=True)
modD._exec_group.backward()
#modD.update()
gradD = [[grad.copyto(grad.context) for grad in grads] for grads in modD._exec_group.grad_arrays]
modD.update_metric(mD, [label])
modD.update_metric(mACC, [label])
# update discriminator on real
mx.nd.random_uniform(low=0.7,high=1.2,out=label)
#label[:] = 1
batch.label = [label]
modD._exec_group.forward(batch, is_train=True)
modD._exec_group.backward()
for gradsr, gradsf in zip(modD._exec_group.grad_arrays, gradD):
for gradr, gradf in zip(gradsr, gradsf):
gradr += gradf
modD.update()
modD.update_metric(mD, [label])
modD.update_metric(mACC, [label])
# update generator
mx.nd.random_uniform(low=0.7,high=1.2,out=label)
#label[:] = 1
modD._exec_group.forward(mx.io.DataBatch(outG, [label]), is_train=True)
modD._exec_group.backward()
diffD = modD._exec_group.get_input_grads()
modG._exec_group.backward(diffD)
modG.update()
mG.update([label], modD.get_outputs())
if mon is not None:
mon.toc_print()
t += 1
for mtc in [mACC,mG,mD]:
batch_end_params = BatchEndParam(epoch=epoch,
nbatch=t,
eval_metric=mtc,
locals=locals())
speedmeter(batch_end_params)
arg_params, aux_params = modG.get_params()
ckp_G(epoch,modG.symbol,arg_params, aux_params)
arg_params, aux_params = modD.get_params()
ckp_D(epoch,modD.symbol,arg_params, aux_params)
#if t % 10 == 0:
# print 'epoch:', epoch, 'iter:', t, 'metric:', mACC.get(), mG.get(), mD.get()
# mACC.reset()
# mG.reset()
# mD.reset()
#visual('gout', outG[0].asnumpy())
#diff = diffD[0].asnumpy()
#diff = (diff - diff.mean())/diff.std()
#visual('diff', diff)
#visual('data', batch.data[0].asnumpy())
# if check_point:
# print 'Saving...'
# modG.save_params('model_dcgan160/%s_G_%s-%04d.params'%(dataset, stamp, epoch))
# modD.save_params('model_dcgan160/%s_D_%s-%04d.params'%(dataset, stamp, epoch))
|
from airflow.hooks.postgres_hook import PostgresHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class DataQualityOperator(BaseOperator):
check_sql = """
SELECT
COUNT(*)
FROM
{table}
WHERE
{where}
"""
ui_color = '#89DA59'
@apply_defaults
def __init__(self,
redshift_conn_id="",
queries=[],
*args, **kwargs):
super(DataQualityOperator, self).__init__(*args, **kwargs)
self.redshift_conn_id = redshift_conn_id
self.queries = queries
def execute(self, context):
redshift = PostgresHook(postgres_conn_id = self.redshift_conn_id)
self.log.info("Redshift hook defined")
# Running the data quality queries
for query in self.queries:
if query.get("table") is None:
self.log.error("Table name wasn't supplied.")
else:
table = query.get("table")
if query.get("where") is None:
where = "1=1"
else:
where = query.get("where")
if query.get("result") is None:
expected_result = 0
else:
expected_result = query.get("result")
# Formatting the SQL
sql = DataQualityOperator.check_sql.format(table=table, where=where)
self.log.info(sql)
# Querying data to Redshift
records = redshift.get_first(sql)
if records == 0 or records[0] != expected_result:
self.log.error(f"""Table [{table}] with filters [{where}] failed passing the data quality test.
Expected Result: [{expected_result}]
Result: [{records[0]}]""")
else:
self.log.info(f"Table [{table}] with filters [{where}] has passed data quality test.")
|
import logging
import sqlite3
import sqlite3 as sqlite
from dataclasses import astuple
from typing import Generator
import psycopg2
from psycopg2.extensions import connection as _connection
from psycopg2.extras import DictCursor
from sqlite_to_postgres.db_settings import DSL
from sqlite_to_postgres.tables_db_ps import (FilmWork, FilmWorkGenre,
FilmWorkPersons, Genre, Person)
logger = logging.getLogger('log.load_from_sqlite')
logger.setLevel(logging.INFO)
TABLE_TO_SCHEMA = {
"person": Person,
"film_work": FilmWork,
"genre_film_work": FilmWorkGenre,
"person_film_work": FilmWorkPersons,
"genre": Genre,
}
class SQLiteLoader:
def __init__(self, connection: sqlite3.Connection, batch_size: int = 100):
connection.row_factory = sqlite.Row
self.cur = connection.cursor()
self.batch_size = batch_size
def get_data(self, table_name: str, schema) -> Generator[list, None, None]:
fields = ', '.join(schema.__slots__)
if table_name == 'person':
fields = ', '.join(('full_name', 'birth_date', 'id',
'created_at', 'updated_at'))
cursor = self.cur.execute(f"select {fields} from {table_name};""")
while data := cursor.fetchmany(self.batch_size):
transformed_data = self.transform(data=data, table_name=table_name,
schema=schema)
yield transformed_data
def transform(self, data: list, table_name: str, schema):
if table_name == 'person':
return self.custom_transform_person(data=data)
return [schema(*row) for row in data]
def custom_transform_person(self, data: list):
return [Person(
id=person['id'],
first_name=person['full_name'].split(' ')[0],
last_name=' '.join(person['full_name'].split(' ')[1:]),
patronymic=None,
birthdate=person['birth_date'],
created_at=person['created_at'],
updated_at=person['updated_at']
) for person in data]
class PostgresSaver:
def __init__(self, pg_conn: _connection):
self.cur = pg_conn.cursor()
self.cur.row_factory = sqlite.Row
def create_data(self, data: Generator, table_name: str, schema) -> None:
args_str = [astuple(film) for films in data for film in films]
fields = ', '.join(schema.__slots__)
values = ', '.join(['%s' for _ in range(len(schema.__slots__))])
if table_name == 'person':
values = '%s, %s, %s, %s, %s, %s, %s'
INSERT_SQL = f"""
INSERT INTO content.{table_name}({fields})
VALUES ({values})
ON CONFLICT DO NOTHING;
"""
self.cur.executemany(INSERT_SQL, tuple(args_str))
def load_from_sqlite(connection: sqlite3.Connection, pg_conn: _connection):
"""Основной метод загрузки данных из SQLite в Postgres"""
postgres_saver = PostgresSaver(pg_conn)
sqlite_loader = SQLiteLoader(connection)
for table_name, schema in TABLE_TO_SCHEMA.items():
try:
data = sqlite_loader.get_data(table_name, schema)
postgres_saver.create_data(data, table_name, schema)
except Exception as exp:
logger.info(exp)
if __name__ == '__main__':
with sqlite3.connect('db.sqlite') as sqlite_conn, \
psycopg2.connect(**DSL, cursor_factory=DictCursor) as pg_conn:
load_from_sqlite(sqlite_conn, pg_conn)
sqlite_conn.close()
pg_conn.close()
|
import compas
import compas_rhino
from compas.datastructures import Mesh
from compas.rpc import Proxy
from compas_rhino.artists import MeshArtist
numerical = Proxy('compas.numerical')
fd_numpy = numerical.fd_numpy
compas_rhino.clear()
mesh = Mesh.from_obj(compas.get('faces.obj'))
mesh.update_default_vertex_attributes(is_anchor=False)
mesh.update_default_vertex_attributes(px=0.0, py=0.0, pz=0.0)
mesh.update_default_vertex_attributes(rx=0.0, ry=0.0, rz=0.0)
mesh.update_default_edge_attributes(q=1.0, f=0.0, l=0.0)
corners = list(mesh.vertices_where({'vertex_degree': 2}))
mesh.vertices_attribute('is_anchor', True, keys=corners)
mesh.vertices_attribute('z', 7.0, keys=[0, 35])
mesh.edges_attribute('q', 10, keys=list(mesh.edges_on_boundary()))
key_index = mesh.key_index()
xyz = mesh.vertices_attributes('xyz')
loads = mesh.vertices_attributes(('px', 'py', 'pz'))
fixed = [key_index[key] for key in mesh.vertices_where({'is_anchor': True})]
edges = [(key_index[u], key_index[v]) for u, v in mesh.edges()]
q = mesh.edges_attribute('q')
xyz, q, f, l, r = fd_numpy(xyz, edges, fixed, q, loads)
for key, attr in mesh.vertices(True):
index = key_index[key]
mesh.vertex_attributes(key, 'xyz', xyz[index])
mesh.vertex_attributes(key, ['rx', 'ry', 'rz'], r[index])
for index, (key, attr) in enumerate(mesh.edges(True)):
attr['q'] = q[index][0]
attr['f'] = f[index][0]
attr['l'] = l[index][0]
artist = MeshArtist(mesh)
artist.draw_mesh()
|
import pytest
from server.database import db_session as session, engine
from server import app
from sqlalchemy import event
from server.database import SeedData
@pytest.fixture
def client():
app.testing = True
test_client = app.test_client()
def teardown():
pass
return test_client
@pytest.fixture(scope="session")
def seeds():
"""Load the test fixture/seed data once for the whole test session"""
seed_data = SeedData()
return seed_data
@pytest.yield_fixture(scope='function')
def db_session(): # used to have the 'db' fixture as a param
"""
Creates a new database session for a test. Note you must use this fixture
if your test connects to db.
Here we not only support commit calls but also rollback calls in tests,
:coolguy:.
"""
connection = engine.connect()
# connection = db.engine.connect()
transaction = connection.begin()
# options = dict(bind=connection, binds={})
# session = db.create_scoped_session(options=options)
session.begin_nested()
# session is actually a scoped_session
# for the `after_transaction_end` event, we need a session instance to
# listen for, hence the `session()` call
@event.listens_for(session(), 'after_transaction_end')
def restart_savepoint(sess, trans):
if trans.nested and not trans._parent.nested:
session.expire_all()
session.begin_nested()
# db.session = session
yield session
session.remove()
transaction.rollback()
connection.close()
|
import re
def solution(dartResult):
bonus={'S':1,'D':2,'T':3}
option={'':1,'*':2,'#':-1}
p=re.compile('(\d+)([SDT])([*#]?)')
dart=p.findall(dartResult)
for i in range(len(dart)):
if dart[i][2]=='*' and i:dart[i-1]*=2
dart[i]=int(dart[i][0])**bonus[dart[i][1]]*option[dart[i][2]]
return sum(dart) |
#Import TwythonError now too!
from twython import Twython, TwythonError
app_key = "2UbDfKCaAz8oRNwWbygDVVtNe"
app_secret = "MvWUHfAUHa1v6pOnabXJ4KpBvp4BG8xane4Ktjj3hG7qeyCUjx"
oauth_token = "898316520794841090-Gt7sN0L9SWSkVnwaKr4lMOJxDIoNgDT"
oauth_token_secret = "GZIK6KaVsitTHiyr7T7MZZMG8YWZd2O9DZuyJMp1Zxuqp"
#Let's gather a list of words we DON'T want to RT
naughty_words = [" -RT", "football", "Nazi", "fuck",
"White House", "Trump", "sexy", "kill", "horrible", "worst", "shit", "fuck"]
#And a list of words we WOULD like to RT
good_words = ["cute", "kitten", "meow", "love", "funny"]
#OR is Twitter's search operator to search for this OR that
#So let's join everything in good_words with an OR!
filter = " OR ".join(good_words)
# The - is Twitter's search operator for negative keywords
# So we want to prefix every negative keyword with a -
blacklist = " -".join(naughty_words)
#And finally our list of keywords that we want to search for
#This will search for any words in good_words minus any naughty_words
keywords = filter + blacklist
twitter = Twython(app_key, app_secret, oauth_token, oauth_token_secret)
#Setting Twitter's search results as a variable
search_results = twitter.search(q="cats", count=5)
try:
for tweet in search_results["statuses"]:
twitter.retweet(id = tweet["id_str"])
except TwythonError as e:
print = "e"
|
#Animación de la epidemia a través de agentes.
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import matplotlib.patches as patches
import os
colores = ['blue','red', 'green', ]
archivo = "data/animacion.txt"
##############################################################################################
#Animacion
##############################################################################################
def trayectoria(ni_steps=0, np_steps=0, tpause=0.01):
N=100
L=55
fig, ax = plt.subplots()
for i in range(ni_steps,np_steps):
if(i%100==0):
print(i)
x = np.loadtxt(archivo, usecols=0, skiprows=N*i, max_rows=N)
y = np.loadtxt(archivo, usecols=1, skiprows=N*i, max_rows=N)
estado = np.loadtxt(archivo, usecols=3, skiprows=N*i, max_rows=N, dtype=int)
plt.cla()
plt.title("Agents system")
plt.xlabel("x coordinate")
plt.ylabel("y coordinate")
plt.axis('square')
plt.grid()
plt.xlim(-1,L+1)
plt.ylim(-1,L+1)
try:
for j in range(N):
circ = patches.Circle((x[j],y[j]), 1, alpha=0.7, fc= colores[estado[j]])
ax.add_patch(circ)
except:
pass
plt.savefig("video/pic%.4i.png" %(i), dpi=100)
#plt.pause(tpause)
#########################################
def animacion(activada=False):
if activada:
path = "C:/Users/Admin/Desktop/Ident Evo-spatial-sir-CI. Dif inmu/video"
print(os.getcwd())
os.chdir(path)
print(os.getcwd())
os.system('cmd /k "ffmpeg -r 30 -f image2 -s 1920x1080 -i pic%04d.png -vcodec libx264 -crf 25 -pix_fmt yuv420p test.mp4"')
#########################################
def plot_data(activada=False):
if activada:
data = ["data/epid.txt"]
maxrows = None
N = 100
sane = np.loadtxt(data[0], usecols=0, max_rows=maxrows)
infected = np.loadtxt(data[0], usecols=1, max_rows=maxrows)
refractary = np.loadtxt(data[0], usecols=2, max_rows=maxrows)
time = np.loadtxt(data[0], usecols=3, max_rows=maxrows)
plt.xlabel("Time")
plt.ylabel("Populations")
plt.ylim(0,N)
plt.axhline(y=0, color="black")
plt.axvline(x=0, color="black")
plt.plot(time, sane , label = "Sane" )
plt.plot(time, infected , label = "Infected" )
plt.plot(time, refractary, label = "Refractary")
plt.legend()
plt.grid()
plt.show()
##############################################################################################
##############################################################################################
#trayectoria(12826,14131)
animacion(True)
plot_data()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class Dojo(models.Model):
"""docstring for Dojos"""
name = models.CharField(max_length=255)
city = models.CharField(max_length=255)
state = models.CharField(max_length=2)
def __repr__(self):
return "Name: {} city: {} state:{}".format(self.name, self.city, self.state)
class Ninja(models.Model):
"""docstring for Ninja"""
dojo = models.ForeignKey(Dojo, related_name="ninjas")
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
def __repr__(self):
return "Dojo: {} Name: {} {} ".format(self.dojo, self.first_name, self.last_name)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.