index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
24,400 | 28a5cb9a0d07bfd3208992c1f384cf3563145b41 | #
# Copyright (c) 2013-2016 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# coding=utf-8
#
from sysinv.db import api as db_api
from sysinv.objects import base
from sysinv.objects import utils
class Port(base.SysinvObject):
dbapi = db_api.get_instance()
fields = {
'id': int,
'uuid': utils.str_or_none,
'host_id': utils.int_or_none,
'host_uuid': utils.str_or_none,
'node_id': utils.int_or_none,
'node_uuid': utils.str_or_none,
'interface_id': utils.int_or_none,
'interface_uuid': utils.str_or_none,
'type': utils.str_or_none,
'name': utils.str_or_none,
'namedisplay': utils.str_or_none,
'pciaddr': utils.str_or_none,
'dev_id': utils.int_or_none,
'pclass': utils.str_or_none,
'pvendor': utils.str_or_none,
'pdevice': utils.str_or_none,
'psvendor': utils.str_or_none,
'dpdksupport': utils.bool_or_none,
'psdevice': utils.str_or_none,
'numa_node': utils.int_or_none,
'sriov_totalvfs': utils.int_or_none,
'sriov_numvfs': utils.int_or_none,
'sriov_vfs_pci_address': utils.str_or_none,
'sriov_vf_driver': utils.str_or_none,
'sriov_vf_pdevice_id': utils.str_or_none,
'driver': utils.str_or_none,
'capabilities': utils.dict_or_none,
}
_foreign_fields = {'host_uuid': 'host:uuid',
'node_uuid': 'node:uuid',
'interface_uuid': 'interface:uuid'}
@base.remotable_classmethod
def get_by_uuid(cls, context, uuid):
return cls.dbapi.port_get(uuid)
|
24,401 | 0a6ec66ae3a48eba347faa6aebb9f719664a855f | rounded=lambda list:[round(x) for x in list]
print(rounded(map(float,input().split(' ')))) |
24,402 | 382f8e1c0e99c1154bcddaa9a6452340eb8491c2 | from pyramid.view import view_defaults, view_config
@view_defaults(renderer='json', request_method='GET')
class FilmsView:
def __init__(self, request):
self.request = request
@view_config(route_name='get_films')
def get_films(self):
films = self.request.film_command.get_films()
films = [f.__dict__ for f in films]
return films
@view_config(route_name='get_film')
def get_film(self):
film_id = self.request.matchdict['film_id']
film = self.request.film_command.get_film(film_id)
return film.__dict__
@view_config(route_name='get_film_actors')
def get_film_actors(self):
film_id = self.request.matchdict['film_id']
actors = self.request.film_command.get_film_actors(film_id)
actors = [a.__dict__ for a in actors]
return actors
|
24,403 | 31fa40047977410e3ccbb638e745293dfec8d27c | # code for processing the image data
# By Hang Wang
import numpy as np
import glob, os
import random
import sys, getopt
from PIL import Image
from natsort import natsorted
SIZE = 200
TRAINING_SAMPLE = 12000
TEST_SAMPLE = 3000
path = 'Images/'
def find_folder(path):
data = []
label = []
folders = os.listdir(path)
folders = natsorted(folders, key=lambda y: y.lower())
for f in folders:
folder_path = path + f
l = folders.index(f)
data, label = load_image(data, label, folder_path, l)
print('folder: ' + f + ' finished...')
print('finish!')
return data, label
def load_image(data, label, folder_path, which_label):
images_path = glob.glob(folder_path + '/' + '*.jpg')
images_path = natsorted(images_path, key=lambda y: y.lower())
for image in images_path:
im = Image.open(image)
im = im.resize([SIZE, SIZE])
im_array = np.array(im)
if im_array.shape == (SIZE, SIZE, 3):
label.append(which_label)
data.append(im_array)
return data, label
def usage():
"""
usage : python3 load_image.py
options
--path <the input image path> default: Image/
--size <the out image size> default: 200
--train <the number of training samples> default: 12000
--test <the number of test samples> default: 3000
"""
def para(argv):
global SIZE, TRAINING_SAMPLE, TEST_SAMPLE, path
try:
opts, args = getopt.getopt(argv,"p:",["path=","size=", "train=", "test=", "help"])
except getopt.GetoptError as err:
print(str(err))
print(usage.__doc__)
sys.exit(1)
for opt, arg in opts:
if opt == '--help':
print(usage.__doc__)
sys.exit()
elif opt in('-p', '--path'):
path = arg
elif opt == '--size':
SIZE = arg
elif opt == '--train':
TRAINING_SAMPLE = arg
elif opt =='--test':
TEST_SAMPLE = arg
if __name__ == "__main__":
para(sys.argv[1:])
data, label = find_folder(path)
samples = list(range(len(label)))
train_sample = random.sample(samples, TRAINING_SAMPLE)
#train_sample.sort()
samples = [x for x in samples if x not in train_sample]
test_sample = random.sample(samples, TEST_SAMPLE)
#test_sample.sort()
data = np.array(data, dtype=np.float16)
label = np.array(label, dtype=np.int32)
train_data = data[train_sample, :, :, :]
test_data = data[test_sample, :, :, :]
train_label = label[train_sample]
test_label = label[test_sample]
np.save('train_data.npy', train_data)
np.save('train_label.npy', train_label)
np.save('test_data.npy', test_data)
np.save('test_label.npy', test_label)
|
24,404 | f71b6d48a78a7fcf904c4bc1a41d6d92a72be368 | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .models import *
@admin.register(Account)
class Account(UserAdmin):
list_display = ['username','id','email','is_staff','first_name']
fieldsets = (
(None, {'fields': ('username', 'password')}),
(('Personal info'), {'fields': ('first_name', 'last_name', 'email')}),
(('User Type'), {'fields': ('type', 'badge','badge_title')}),
(('Extra Profile Builder'), {'fields': ('sex','dob','profile_pic','phone', 'other_info')}),
(('Permissions'), {
'fields': ('is_active', 'is_staff', 'is_superuser', 'groups', 'user_permissions'),
}),
(('Important dates'), {'fields': ('last_login', 'date_joined')}),
)
admin.site.register(Story)
admin.site.register(EmergencyContact)
admin.site.register(AlertContact)
admin.site.register(Comment)
admin.site.register(Help)
|
24,405 | aca252086feca4114116dd85873d0766f5e9df31 | from DBOps import DBConnector
from EmployeeController import Employee
class Office:
connector, empID = None, None
def __init__(self):
self.connector = DBConnector()
#Get All Employees
def get_all_employees(self):
self.connector.selectAll()
#Get Certain Employee
def get_employee(self):
self.empID = int(input("Enter Employee ID: "))
self.connector.selectOne(self.empID)
#Get All Managers
def get_managers(self):
self.connector.selectManagers()
#Upgrade Employee
def promote(self):
self.empID = int(input("Enter Employee ID: "))
self.connector.upgrade(self.empID)
#Hire New Employee
def hire(self):
id = input('id: ')
name = input('name : ')
salary = input('salary : ')
work_hours = input('work hours: ')
self.connector.insert(id, name, salary, work_hours)
#Delete Employee
def fire(self):
self.empID = int(input("Enter Employee ID: "))
self.connector.delete(self.empID) |
24,406 | a3c5476070c42b36cf031d3190593d3744733c5e | # Implements a random subset of LSL. Woo.
from uuid import UUID
# Functions
def llVecNorm(vec):
return (vec / llVecMag(vec))
def llVecMag(vec):
return (vec.x ** 2 + vec.y ** 2 + vec.z ** 2) ** 0.5
def llVecDist(vec1, vec2):
return llVecMag(vec1 - vec2)
# Constants
NULL_KEY = '00000000-0000-0000-0000-000000000000'
AGENT_FLYING = 0x0001
AGENT_ATTACHMENTS = 0x0002
AGENT_SCRIPTED = 0x0004
AGENT_MOUSELOOK = 0x0008
AGENT_SITTING = 0x0010
AGENT_ON_OBJECT = 0x0020
AGENT_AWAY = 0x0040
AGENT_WALKING = 0x0080
AGENT_IN_AIR = 0x0100
AGENT_TYPING = 0x0200
AGENT_CROUCHING = 0x0400
AGENT_BUSY = 0x0800
AGENT_ALWAYS_RUN = 0x1000
# Types
class vector(object):
def __init__(self, x, y, z):
self.x = float(x)
self.y = float(y)
self.z = float(z)
def __add__(self, other):
if not isinstance(other, vector):
raise NotImplemented
return vector(self.x + other.x, self.y + other.y, self.z + other.z)
def __sub__(self, other):
if not isinstance(other, vector):
raise NotImplemented
return vector(self.x - other.x, self.y - other.y, self.z - other.z)
def __div__(self, other):
return vector(self.x / other, self.y / other, self.z / other)
def __str__(self):
return '<%s, %s, %s>' % (self.x, self.y, self.z)
def __repr__(self):
return 'vector(%s, %s, %s)' % (self.x, self.y, self.z)
@staticmethod
def parse(string):
parts = [float(x.strip()) for x in string.strip('()<>').split(',')]
return vector(*parts)
class rotation(object):
def __init__(self, x, y, z, w):
self.x = x
self.y = y
self.z = z
self.w = w
@staticmethod
def parse(string):
parts = [float(x.strip()) for x in string.strip('()<>').split(',')]
return rotation(*parts)
class key(object):
def __init__(self, uuid=NULL_KEY):
if not isinstance(uuid, UUID):
self.uuid = UUID(uuid)
else:
self.uuid = uuid
def __str__(self):
return self.uuid.urn.split(':')[2]
def __repr__(self):
return "key('%s')" % self.__str__()
def __hash__(self):
return self.uuid.__hash__()
def __eq__(self, other):
return str(self) == str(other)
def __ne__(self, other):
return not (self == other)
def __nonzero__(self):
return (self.uuid.int != 0)
@staticmethod
def random():
return key(UUID.uuid4()) |
24,407 | 75d7068b0df6472c34db4a9986a1ee458742de74 | ########################################################################
# Copyright 2018 FireEye Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
########################################################################
from .wmi import *
from .winapi import *
def create_variant(val, v_type=None):
'''
Creates a VARIANT instance from a given value
Args:
val (any): The value to use when creating the VARIANT instance
v_type (int): Variant type
Returns:
Initialized VARIANT instance
'''
var = winapi.VARIANT()
winapi.SET_VT(var, winapi.VT_NULL)
if v_type:
if v_type == winapi.VT_I4:
winapi.SET_VT(var, winapi.VT_I4)
winapi.V_VAR(var).lVal = ctypes.c_int32(val)
elif v_type == winapi.VT_R4:
winapi.SET_VT(var, winapi.VT_R4)
winapi.V_VAR(var).fltVal = ctypes.c_float(val)
elif v_type == winapi.VT_LPWSTR:
winapi.SET_VT(var, winapi.VT_LPWSTR)
winapi.V_VAR(var).bstrVal = val
elif v_type == winapi.VT_BSTR:
winapi.SET_VT(var, winapi.VT_BSTR)
bstr = winapi.SysAllocString(val)
winapi.V_VAR(var).bstrVal = bstr
else:
raise NotImplemented()
else:
if isinstance(val, int):
winapi.SET_VT(var, winapi.VT_I4)
winapi.V_VAR(var).lVal = ctypes.c_int32(val)
elif isinstance(val, float):
winapi.SET_VT(var, winapi.VT_R4)
winapi.V_VAR(var).fltVal = ctypes.c_float(val)
elif isinstance(val, str):
winapi.SET_VT(var, winapi.VT_BSTR)
bstr = winapi.SysAllocString(val)
winapi.V_VAR(var).bstrVal = bstr
else:
raise NotImplemented()
return var
def destroy_variant(var):
'''
Destroys an instance of a VARIANT
Args:
var (VARIANT): Instance to destroy
Returns:
Nothing
'''
if winapi.V_VT(var) == winapi.VT_BSTR:
winapi.SysFreeString(winapi.V_VAR3(var).bstrVal)
def obj_to_dict(obj, include_var_type=False, flags=0):
'''
Converts WMI object to python dictionary
Args:
obj (any): The object to convert
include_var_type (bool): Include variant type
flags (int): Flags to pass into WMI API(s)
Returns:
Dictionary containing object information
'''
ret = {}
obj.BeginEnumeration(flags)
while True:
try:
prop_name, var, _, _ = obj.Next(flags)
if include_var_type:
ret[prop_name] = winapi.V_TO_VT_DICT(var)
else:
ret[prop_name] = winapi.V_TO_TYPE(var)
except WindowsError:
break
obj.EndEnumeration()
return ret
def safe_array_to_list(safe_array, element_type):
'''
Converts SAFEARRAY structure to python list
Args:
safe_array (SAFEARRAY): Array structure to convert
element_type (any): Type of the elements contained in the structure
Returns:
List containing the converted array elements
'''
ret = []
data = winapi.SafeArrayAccessData(safe_array)
str_array = ctypes.cast(data, ctypes.POINTER(element_type))
try:
for i in range(safe_array.contents.cbElements):
ret.append(str_array[i])
finally:
winapi.SafeArrayUnaccessData(safe_array)
winapi.SafeArrayDestroy(safe_array)
return ret
def get_object_info(namespace, obj_name, values, user=None, password=None, include_var_type=False, flags=0):
'''
Gets desired values from an object.
Args:
namespace (str): Namespace to connect to
obj_name (str): Path to object instance/class
values (list): List of values to pull from object
user (str): Username to use when connecting to remote system
password (str): Password to use for remote connection
include_var_type (bool): Include variant type
flags (int): Flags to pass into WMI API(s)
Returns:
Dictionary containing object data
'''
ret = {}
with WMI(namespace, user, password) as svc:
with svc.GetObject(obj_name, flags, None) as obj:
for value in values:
var, _, _ = obj.Get(value, flags)
if include_var_type:
ret[value] = winapi.V_TO_VT_DICT(var)
else:
ret[value] = winapi.V_TO_TYPE(var)
return ret
def get_all_object_info(namespace, obj_name, user=None, password=None, include_var_type=False, flags=0):
'''
Gets all data from an object
Args:
namespace (str): Namespace to connect to
obj_name (str): Path to object instance/class
user (str): Username to use when connecting to remote system
password (str): Password to use for remote connection
include_var_type (bool): Include variant type
flags (int): Flags to pass into WMI API(s)
Returns:
Dictionary containing object data
'''
with WMI(namespace, user, password) as svc:
with svc.GetObject(obj_name, flags, None) as obj:
ret = obj_to_dict(obj, include_var_type, flags)
return ret
def get_object_names(namespace, obj_name, user=None, password=None, flags=0):
'''
Gets all names from a given object
Args:
namespace (str): Namespace to connect to
obj_name (str): Path to object instance/class
user (str): Username to use when connecting to remote system
password (str): Password to use for remote connection
flags (int): Flags to pass into WMI API(s)
Returns:
List containing object names
'''
with WMI(namespace, user, password) as svc:
with svc.GetObject(obj_name, flags, None) as obj:
ret = safe_array_to_list(obj.GetNames(None, flags, None), ctypes.c_wchar_p)
return ret
def query(namespace, query_str, query_type='WQL', user=None, password=None, include_var_type=False, flags=0):
'''
Performs a WMI query
Args:
namespace (str): Namespace to connect to
query_str (str): String to use for the query
query_type (str): Path to object instance/class
user (str): Username to use when connecting to remote system
password (str): Password to use for remote connection
include_var_type (bool): Include variant type
flags (int): Flags to pass into WMI API(s)
Returns:
Dictionary containing all objects returned by query
'''
ret = {}
with WMI(namespace, user, password) as svc:
with svc.ExecQuery(query_type, query_str, flags, None) as enum:
# do query, then get all data
while True:
try:
with enum.Next(winapi.WBEM_INFINITE) as obj:
obj.BeginEnumeration(flags)
inst_data = {}
while True:
try:
prop_name, var, _, _ = obj.Next(flags)
if include_var_type:
inst_data[prop_name] = winapi.V_TO_VT_DICT(var)
else:
inst_data[prop_name] = winapi.V_TO_TYPE(var)
except WindowsError:
break
obj.EndEnumeration()
var, _, _ = obj.Get('__RELPATH', flags)
inst_relpath = winapi.V_TO_STR(var)
# use instance relative path for key
ret[inst_relpath] = inst_data
except WindowsError:
break
return ret
def does_object_exist(namespace, obj_name, user=None, password=None, flags=0):
'''
Tests if a given object exists
Args:
namespace (str): Namespace to connect to
obj_name (str): Path to object instance/class
user (str): Username to use when connecting to remote system
password (str): Password to use for remote connection
flags (int): Flags to pass into WMI API(s)
Returns:
True if object exists, or False if not
'''
ret = True
with WMI(namespace, user, password) as svc:
try:
with svc.GetObject(obj_name, flags, None):
pass
except WindowsError:
ret = False
return ret
def get_method_names(namespace, obj_name, user=None, password=None, flags=0):
'''
Gets all method names from a given object
Args:
namespace (str): Namespace to connect to
obj_name (str): Path to object instance/class
user (str): Username to use when connecting to remote system
password (str): Password to use for remote connection
flags (int): Flags to pass into WMI API(s)
Returns:
List of method names
'''
ret = []
with WMI(namespace, user, password) as svc:
with svc.GetObject(obj_name, flags, None) as obj:
obj.BeginMethodEnumeration(flags)
while True:
try:
method_name, in_param, out_param = obj.NextMethod(flags)
ret.append(method_name)
except WindowsError:
break
obj.EndMethodEnumeration()
return ret
def get_method_info(namespace, obj_name, user=None, password=None, flags=0):
'''
Gets method information for a given object/method
Args:
namespace (str): Namespace to connect to
obj_name (str): Path to object instance/class
user (str): Username to use when connecting to remote system
password (str): Password to use for remote connection
flags (int): Flags to pass into WMI API(s)
Returns:
Dictionary containing method parameter info
'''
ret = {}
with WMI(namespace, user, password) as svc:
with svc.GetObject(obj_name, flags, None) as obj:
obj.BeginMethodEnumeration(flags)
while True:
try:
method_name, in_sig, out_sig = obj.NextMethod(flags)
in_sig_vals = {}
if in_sig:
in_sig_vals = obj_to_dict(in_sig)
in_sig.Release()
out_sig_vals = {}
if out_sig:
out_sig_vals = obj_to_dict(out_sig)
out_sig.Release()
ret[method_name] = {'in_signature': in_sig_vals,
'out_signature': out_sig_vals}
except WindowsError:
break
obj.EndMethodEnumeration()
return ret
def call_method(namespace, obj_path, method_name, input_params=None, user=None, password=None, flags=0):
'''
Calls object method
Args:
namespace (str): Namespace to connect to
obj_path (str): Path to object instance/class
method_name (str): Name of the method to call
input_params (dict): Method input parameters
user (str): Username to use when connecting to remote system
password (str): Password to use for remote connection
flags (int): Flags to pass into WMI API(s)
Returns:
Dictionary containing data returned from the called method
'''
ret = {}
with WMI(namespace, user, password) as svc:
class_name = obj_path
if '.' in class_name:
class_name = class_name.split('.')[0]
with svc.GetObject(class_name, flags, None) as obj:
if input_params:
in_obj_param, out_obj_param = obj.GetMethod(method_name, flags)
if in_obj_param:
for prop, var in input_params.items():
if isinstance(var, dict) or not isinstance(var, VARIANT):
if isinstance(var, dict):
if 'type' in var:
in_var = create_variant(var['value'], var['type'])
else:
raise Exception('Variant type must be specified')
else:
in_var = create_variant(var)
in_obj_param.Put(prop, flags, in_var, 0)
destroy_variant(in_var)
else:
in_obj_param.Put(prop, flags, var, 0)
else:
in_obj_param, out_obj_param = obj.GetMethod(method_name, flags)
if out_obj_param:
out_obj_param.Release()
out_obj = svc.ExecMethod(obj_path, method_name, flags, None, in_obj_param)
if in_obj_param:
in_obj_param.Release()
if out_obj:
ret = obj_to_dict(out_obj)
out_obj.Release()
return ret
|
24,408 | f6fe0643f5712e99d271807117b3038db2e4d50e |
nombre = input('\n Índice de masa corporal\n\nIngrese su nombre: ')
estatura= float(input('Ingrese su estatura en m: '))
peso= int(input('Ingrese su peso en Kg: '))
def IMC(x,y):
IMC=x/(y**2)
print(f'\n{nombre}')
print(f'Su IMC es: {IMC}\n\n')
print(Tabla())
return IMC
def Tabla():
x=(' IMC NIVEL DE PESO\n Por debajo de 18.5 Bajo peso\n 18.5 – 24.9 Normal\n 25.0 – 29.9 Sobrepeso\n 30.0 o más Obeso')
return x
IMC(peso,estatura)
|
24,409 | 58a30b11364771f5bc38faef4c60452ce11cd6f7 | # Generated by Django 2.0.5 on 2019-02-28 16:49
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('mrDatabaseModels', '0004_productlist_rankingingroup'),
]
operations = [
migrations.CreateModel(
name='ProductBatchNumbers',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('batchMRid', models.CharField(db_column='batchMRid', max_length=255, unique=True)),
('cleared', models.BooleanField(db_column='cleared', default=True)),
('productid', models.ForeignKey(db_column='productid', on_delete=django.db.models.deletion.CASCADE, to='mrDatabaseModels.Productlist')),
],
options={
'db_table': 'tbl_productbatchnumbers',
'ordering': ['batchMRid'],
'managed': True,
},
),
]
|
24,410 | b135366b9dcedbb05e937cbd55f293248e90995f | #!/Users/potiejun/workspace/python_workspace/spider/scrapy_code_learning/venc/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from scrapy.cmdline import execute
try:
import scrapy
print scrapy.__path__
except ImportError as e:
print(e)
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(execute())
|
24,411 | c9448994ea687b62655047ef90e563eb7eaa5b2f | import math
import numpy as np
import sys
import csv
import os
def distance_checker(xyz1, xyz2):
""" Returns distance between 2 threedimensional points"""
return math.sqrt((xyz1[0] - xyz2[0])**2 + (xyz1[1] - xyz2[1])**2 +
(xyz1[2] - xyz2[2])**2)
def normaliser(vec):
"""Normalises a vector"""
norm = np.linalg.norm(vec)
for i in range(len(vec)):
vec[i] = vec[i] / norm
return vec
def angle_checker(vec1, vec2):
"""
Calculates angle in radians between two vectors. Note: this is an absolute
angle.
"""
vec1 = normaliser(vec1)
vec2 = normaliser(vec2)
angle = np.arccos(np.clip(np.dot(vec1, vec2), -1, 1))
return angle
def rotation_matrix(axis, theta):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians. Taken from
https://stackoverflow.com/questions/6802577/rotation-of-3d-vector
"""
axis = np.asarray(axis)
axis = axis / math.sqrt(np.dot(axis, axis))
a = math.cos(theta / 2.0)
b, c, d = -axis * math.sin(theta / 2.0)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
return np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],
[2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],
[2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])
def check_bond_len(dict, el_a, el_b):
""" Make sure all elements are in bond_len_dict, and return the value"""
if el_a in dict:
if el_b in dict[el_a]:
return dict[el_a][el_b]
print()
print(el_a + " and " + el_b + " bond length currently unsupported. Add value to the csv file.")
sys.exit()
def csv2dict(filename):
"""
Transforms the bond_lengths.csv to a dict
"""
dis_dict = {}
with open(filename) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
el_a = row["Element Name"]
dis_dict[el_a] = {}
for entry in row:
if entry != "Element Name":
dis_dict[el_a][entry] = float(row[entry])
csvfile.close()
return dis_dict
def bond_checker(atom, dict, bond_dict):
"""Check for all atoms in bonding range"""
bound = []
for item, values in dict.items():
bond_range = check_bond_len(bond_dict, atom[0], values["element"]) + 0.2
if distance_checker(atom[1:], values["coor"]) <= bond_range:
bound.append(item)
return bound
def closest_atom(dict, coor):
"Given a dict and coordinates returns the closest atom"
min_dis = math.inf
for atom, values in dict.items():
dis = distance_checker(coor, values["coor"])
if dis < min_dis and dis > 0.01:
min_dis = dis
min_id = atom
return min_id
def print_lig():
""" Prints available ligands """
lig_list = os.listdir("../Ligands")
print()
for ligs in lig_list:
# Skip folders
if ligs[-4:] == ".xyz":
print(ligs[:-4])
print()
def file2dict(file, dict, start_id):
"""
Builds simple dict out of .xyz file, containing just id, elements and
coordinates
"""
id = start_id
line_number = 0
file.seek(0)
for line in file:
if line_number == 0:
n_atoms = int(float(line.strip()))
if line_number >= 2 and line_number < n_atoms + 2:
values_list = line.split()
for i in range(1, 4):
values_list[i] = float(values_list[i])
dict[id] = {
"coor": values_list[1:],
"element": values_list[0]
}
id += 1
line_number += 1
return dict
def dict2file(dict, filename, foldername):
"""
Takes an atom dict and writes it to an .xyz file in foldername in
/Created_QD with filename as name for the file
"""
if foldername:
if not os.path.exists("../Created_QD/" + foldername):
os.makedirs("../Created_QD/" + foldername)
file = open("../Created_QD/" + foldername + "/" + filename + ".xyz", "w")
else:
file = open("../Created_QD/" + filename + ".xyz", "w")
file.write(" \n\n")
for atom, values in dict.items():
file.write(values['element'] + "\t" + str(values['coor'][0]) + "\t\t" +
str(values['coor'][1]) + "\t\t" + str(values['coor'][2]) + "\n")
file.seek(0)
file.write(str(len(dict)))
file.close()
print("\nQuantum Dot created :)")
def base_atom(dict):
""" Finds atoms at the origin in a dict, returns its id"""
for atom, values in dict.items():
xyz = values["coor"]
if xyz[0] == xyz[1] == xyz[2] == 0:
return atom
def y2true(text):
"""Converts strings y and n to boolean"""
while True:
if text == 'y':
return True
elif text == 'n':
return False
else:
text = input("Wrong input, try again: ")
|
24,412 | 038162ba4fe36569099b71be73a5340cf293cd8c | import sys
def fun1(num):
num=int(float(num)*100)
cent=[25,10,5,1]
index=0
re=list()
while num:
re.append(num//cent[index])
num=num%cent[index]
index+=1
if re[0]!=0:
print('%d 个25美分'%re[0]+'\n')
if re[1]!=0:
print('%d 个10美分'%re[1]+'\n')
if re[2]!=0:
print('%d 个5美分'%re[2]+'\n')
if re[3]!=0:
print('%d 个1美分'%re[3])
return
if __name__=='__main__':
fun1(sys.argv[1])
|
24,413 | 494641e12c3489ac4e91188756a715b9585ea53a | import spacy
nlp = spacy.load('en')
# Add neural coref to SpaCy's pipe
import neuralcoref
neuralcoref.add_to_pipe(nlp)
sentences = ['My sister has a dog. She loves him.',
'Some like to play football, others are fond of basketball.',
'The more a man knows, the more he feels his ignorance.']
for s in sentences:
n = nlp(s)
print('Sentence:', s)
if n._.has_coref:
print('The sentence has coref.')
print('The coref clusters:')
print(n._.coref_clusters, '\n')
else:
print('The sentence has no coref.\n')
|
24,414 | 22e06202761dca0a8983cc6a069bab5e0530466b | import unittest
from parser.parse import Word, And, Infix, Literal, Optional
class Tests(unittest.TestCase):
def test_and(self):
actions = []
def and_action(parser, tokens):
actions.append(tokens)
return tokens
hede = Word() + Word()
hede.set_action(and_action)
hede.parse_string({}, 'a b')
self.assertEquals(actions, [['a', 'b']])
def test_infix(self):
actions = []
def infix_action(parser, tokens):
actions.append(tokens)
return tokens
var = Word()
def var_action(parser, tokens):
actions.extend(tokens)
return tokens[0]
var.set_action(var_action)
hede = Infix(var, Literal("+"))
hede.set_action(infix_action)
hede.parse_string({}, 'a + b')
self.assertEquals(actions, ['a', 'b', ['a', '+', 'b']])
def test_optional_and(self):
varname = Word()
actions = []
def varname_action(parser, tokens):
actions.append(tokens)
return tokens
varname.set_action(varname_action)
trailer = varname + Optional(Word())
def trailer_action(parser, tokens):
actions.append(tokens)
return tokens
trailer.set_action(trailer_action)
print trailer.parse_string({}, "var")
self.assertEquals(actions, [['var'], ['var', None]])
actions = []
print trailer.parse_string({}, "var var2")
print "sadasd" + str(actions)
if __name__ == "__main__":
unittest.main()
|
24,415 | f5ea897323b299792a229a6109d07e87387bcd50 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import ast
import pandas as pd
import numpy as np
CSV_PATH = '/data/mask/mask_data/quadrilateral_2'
SAVE_PATH = '/data/mask/mask_data/quadrilateral_2'
CSV_COLUMNS = [
'filename',
'top_left_height', 'top_left_width',
'top_right_height', 'top_right_width',
'bottom_left_height', 'bottom_left_width',
'bottom_right_height', 'bottom_right_width',
]
def main(dataset_split):
"""Modify csv file.
Args:
dataset_split: string representing dataset split.
"""
df = pd.DataFrame(columns=CSV_COLUMNS)
csv = pd.read_csv(os.path.join(CSV_PATH, dataset_split + '.csv'))
for i in range(len(csv)):
filename = csv.iloc[i]['filename']
top_left_height = ast.literal_eval(
str(csv.iloc[i]['top_left_height']))
top_left_width = ast.literal_eval(
str(csv.iloc[i]['top_left_width']))
top_right_height = ast.literal_eval(
str(csv.iloc[i]['top_right_height']))
top_right_width = ast.literal_eval(
str(csv.iloc[i]['top_right_width']))
bottom_left_height = ast.literal_eval(
str(csv.iloc[i]['bottom_left_height']))
bottom_left_width = ast.literal_eval(
str(csv.iloc[i]['bottom_left_width']))
bottom_right_height = ast.literal_eval(
str(csv.iloc[i]['bottom_right_height']))
bottom_right_width = ast.literal_eval(
str(csv.iloc[i]['bottom_right_width']))
coordinates = [top_left_height, top_left_width,
top_right_height, top_right_width,
bottom_left_height, bottom_left_width,
bottom_right_height, bottom_right_width]
# set the order of quadrilaterals.
if len(top_left_height) > 1:
height_list = list(zip(top_left_height, top_right_height))
width_list = list(zip(top_left_width, bottom_left_width))
threshold = 20
"""
if min(height_list[0]) > min(height_list[1]):
coordinates = [x[::-1] for x in coordinates]
height_list = list(zip(coordinates[0], coordinates[2]))
width_list = list(zip(coordinates[1], coordinates[5]))
if (abs(min(height_list[0]) - min(height_list[1])) < threshold
and min(width_list[0]) > min(width_list[1])):
coordinates = [x[::-1] for x in coordinates]
"""
# if delta height less than threshold, set the order of
# quadrilaterals according to width
if abs(min(height_list[0]) - min(height_list[1])) < threshold:
if min(width_list[0]) > min(width_list[1]):
coordinates = [x[::-1] for x in coordinates]
# if delta height greater than threshold, set the order of
# quadrilaterals according to height
elif min(height_list[0]) > min(height_list[1]):
coordinates = [x[::-1] for x in coordinates]
df = df.append(
pd.DataFrame(
[[filename] + coordinates],
columns=CSV_COLUMNS), ignore_index=True)
df.to_csv(os.path.join(SAVE_PATH, dataset_split + '.csv'))
if __name__ == '__main__':
if not os.path.exists(SAVE_PATH):
os.makedirs(SAVE_PATH)
dataset = ['train', 'test', 'val']
for dataset_split in dataset:
main(dataset_split)
print('Finish processing ' + dataset_split)
|
24,416 | d3a9e52bf6e31948610eafeef30f2b1b8c8e1112 | # Andy Dean
# andy.dean@preservica.com 13/02/2020
# Create an access token
#
# For use with Preservica version 6.x
#
# THIS SCRIPTS IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# Use at your own risk. If it works for you share it, and please provide feedback or share improvements at developers.preservica.com (going live May 2020)
#########################################################################################
import sys
import time
#########################################################################################
def securitytoken(config_input):
#check for existing valid token in token.file and if eithe the file does not exist or the token is out of date call the 'newtoken' function
import time
from pathlib import Path
tokenfilepath = config_input + ".token.file"
my_file = Path(tokenfilepath)
if my_file.is_file():
f = open(tokenfilepath) # Open file on read mode
lines = f.read().split("\n") # Create a list containing all lines
print(time.time())
print(float(lines[0]))
if time.time() - float(lines[0]) > 500:
sessiontoken = newtoken(config_input,tokenfilepath)
print(sessiontoken)
else:
sessiontoken = lines[1]
print(sessiontoken)
f.close() # Close file
if not my_file.is_file():
sessiontoken = newtoken(config_input,tokenfilepath)
print(sessiontoken)
return(sessiontoken)
#########################################################################################
#get new token function
def newtoken(config_input,tokenfilepath):
import configparser
import requests
print(config_input)
print(tokenfilepath)
#read from config file to get the correct parameters for the token request
config = configparser.ConfigParser()
config.sections()
config.read(config_input)
url = config['DEFAULT']['URL']
hostval = config['DEFAULT']['Host']
usernameval = config['DEFAULT']['Username']
passwordval = config['DEFAULT']['Password']
tenantval = config['DEFAULT']['Tenant']
#build the query string and get a new token
querystring = {"username":usernameval,"password":passwordval,"tenant":tenantval}
headers = {
'Accept': "*/*",
'Cache-Control': "no-cache",
'Host': hostval,
'Accept-Encoding': "gzip, deflate",
'Content-Length': "0",
'Connection': "keep-alive",
'cache-control': "no-cache"
}
response = requests.request("POST", url, headers=headers, params=querystring)
print(response.raise_for_status())
data = response.json()
tokenval = (data["token"])
timenow = str(time.time())
#write token to token.file for later reuse
tokenfile = open(tokenfilepath, "w")
tokenfile.write(timenow)
tokenfile.write("\n")
tokenfile.write(tokenval)
tokenfile.close()
return(tokenval)
#########################################################################################
|
24,417 | 6690cce2e375fd6391e2f0babe09109e1a2cfe27 | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest
import command_processor
class CommandProcessorTest(unittest.TestCase):
def test_process_command(self):
mock_a = mock.Mock()
mock_a.command_name = 'CommandA'
mock_a.process_commandline_request = mock.Mock()
mock_b = mock.Mock()
mock_b.command_name = 'CommandB'
mock_b.process_commandline_request = mock.Mock()
registry = [mock_a, mock_b]
options = {}
command_processor.process_command('CommandB', options, registry)
self.assertEquals(0, mock_a.process_commandline_request.call_count)
self.assertEquals(1, mock_b.process_commandline_request.call_count)
mock_b.process_commandline_request.assert_called_with(options)
def test_process_command_not_found(self):
mock_a = mock.Mock()
mock_a.command_name = 'CommandA'
with self.assertRaises(ValueError):
command_processor.process_command('CommandB', {}, [mock_a])
def accepts_content_type(self):
request = mock.Mock()
request.headers = {'accept': 'text/html'}
self.assertTrue(CommandHandler.accepts_content_type(request, 'text/html'))
request.headers = {'accept': 'text/plain,text/html'}
self.assertTrue(CommandHandler.accepts_content_type(request, 'text/html'))
def does_not_implicitly_accept_content_type(self):
request = mock.Mock()
self.assertFalse(CommandHandler.accepts_content_type(request, 'text/html'))
def does_not_accept_content_type(self):
request = mock.Mock()
request.headers = {'accept': 'text/plain'}
self.assertFalse(CommandHandler.accepts_content_type(request, 'text/html'))
if __name__ == '__main__':
unittest.main()
|
24,418 | ee9d554c8b264e59927b49b678ccec1e885a0882 | """
1. 라이브러리 임포트
- https://www.selenium.dev/documentation/en/webdriver/driver_requirements/#quick-reference
- v80 : https://chromedriver.storage.googleapis.com/index.html?path=80.0.3987.16/
2. 웹드라이버 세팅 - test
3. 브라우저 내부대기
4. 브라우저 사이즈 설정
5. 로딩
6. 가져온 페이지, 세션값, 타이틀, 현재 url, 쿠키 정보 출력
7. 검색창
- input 선택
- 검색어 입력
- 제출
8. 스크린샷
- 저장경로 설정
9. 브라우저 종료
"""
from selenium import webdriver
browser = webdriver.Chrome('./webdriver/chromedriver.exe')
print(dir(browser))
browser.implicitly_wait(5)
# browser.set_window_size(1920, 1280) # bug
browser.maximize_window()
browser.get('https://www.daum.net')
# print(browser.page_source)
# print(browser.session_id)
# print(browser.title)
# print(browser.current_url)
# print(browser.get_cookies())
elem = browser.find_element_by_css_selector('div.inner_search > input.tf_keyword')
elem.send_keys('오늘의 날씨')
elem.submit()
browser.save_screenshot('C:/Users/CrePASS/Documents/GitHub/python-study-note/p02crawling-basic/c12browser_screenshot/img01.png')
browser.quit()
|
24,419 | 57061f152b379316012503e01c5b17a48f56f157 | from abc import ABCMeta, abstractmethod
from enum import Enum
class ExprType(Enum):
CONST = "CONST"
VAR = "VAR"
ADD = "ADD"
MUL = "MUL"
SUB = "SUB"
DIV = "DIV"
class Expr(metaclass=ABCMeta):
@abstractmethod
def exprtype(self):
pass
@abstractmethod
def __repr__(self):
pass
def __add__(self, other):
return Add(self, other)
def __sub__(self, other):
return Sub(self, other)
def __mul__(self, other):
return Mul(self, other)
def __truediv__(self, other):
return Div(self, other)
class Const(Expr):
def __init__(self, val):
super()
self.val = val
def __repr__(self):
return str(self.val)
def exprtype(self):
return ExprType.CONST
class BinopExpr(Expr, metaclass=ABCMeta):
def __init__(self, lhs, rhs):
super()
self.lhs = lhs
self.rhs = rhs
@abstractmethod
def symbol(self):
pass
def __repr__(self):
return self.lhs.__repr__() + self.symbol() + self.rhs.__repr__()
class Add(BinopExpr):
def exprtype(self):
return ExprType.ADD
def symbol(self):
return "+"
class Mul(BinopExpr):
def exprtype(self):
return ExprType.MUL
def symbol(self):
return "*"
class Div(BinopExpr):
def exprtype(self):
return ExprType.DIV
def symbol(self):
return "/"
class Sub(BinopExpr):
def exprtype(self):
return ExprType.Sub
def symbol(self):
return "-"
class Var(Expr):
def __init__(self, var):
super()
self.var = var
def exprtype(self):
return ExprType.VAR
def __repr__(self):
return self.var
def expreval(expr, varmap):
return expr.match({
ExprType.CONST: lambda: expr.val,
ExprType.VAR: lambda: varmap[expr.var],
ExprType.ADD: lambda: expreval(expr.lhs, varmap) + expreval(expr.rhs, varmap),
ExprType.MUL: lambda: expreval(expr.lhs, varmap) * expreval(expr.rhs, varmap),
ExprType.DIV: lambda: expreval(expr.lhs, varmap) / expreval(expr.rhs, varmap),
ExprType.SUB: lambda: expreval(expr.lhs, varmap) - expreval(expr.rhs, varmap),
})
c = Const(2) * Const(5) + Const(1) * Var("x")
varmap = {"x": 10}
print(c)
print(expreval(c, varmap))
|
24,420 | 4c6942576517d44e5b718d8434869973b2dd59dd | #!/usr/bin/python
if __name__ == '__main__':
print sum([int(x) for x in str(reduce(lambda x, y: x*y, range(2, 101)))]) |
24,421 | 70aa3450570f4addb4b34d9eff1f90f19ebb8ba7 | import torch
from torch import nn
from torch.nn import functional as F
class CNN(nn.Module):
def __init__(self):
super().__init__()
self.dropout = nn.Dropout(p=0.25)
self.maxpool = nn.MaxPool2d(2, stride=2)
self.l1 = nn.LazyConv2d(32, 5) # l2 regularization?
self.l2 = nn.LazyConv2d(32, 5, bias=False)
self.l3 = nn.LazyConv2d(64, 3) # l2 regularization?
self.l4 = nn.LazyConv2d(64, 3, bias=False)
self.l5 = nn.LazyLinear(256, bias=False)
self.l6 = nn.LazyLinear(128, bias=False)
self.l7 = nn.LazyLinear(64, bias=False)
self.l8 = nn.LazyLinear(10, bias=False)
def forward(self, x):
# x = F.relu(self.l1(x))
# x = F.relu(self.l2(x))
# x = self.dropout(self.maxpool(x))
# x = F.relu(self.l3(x))
# x = F.relu(self.l4(x))
# x = self.dropout(self.maxpool(x))
x = torch.flatten(x, start_dim=1)
x = F.relu(self.l5(x))
x = F.relu(self.l6(x))
x = F.relu(self.l7(x))
x = F.softmax(self.l8(x))
return x |
24,422 | 727a2d1f3b354cff9ecef7f098301f8a8b4078f7 |
import board
from microcontroller import pin
import busio,time
from digitalio import DigitalInOut, Direction, Pull
led = DigitalInOut(board.LED)
sd_cs = DigitalInOut(board.xSDCS)
rts = DigitalInOut(board.RTS)
dtr = DigitalInOut(board.DTR)
rts.direction = Direction.OUTPUT
dtr.direction = Direction.OUTPUT
led.direction = Direction.OUTPUT
sd_cs.pull = Pull.UP
led.value = 0
rts.value = 0
dtr.value = 0
# Import the HT16K33 LED matrix module.
from adafruit_ht16k33 import matrix
# Create the I2C interface.
# I2C(SCL,SDA)
i2c = busio.I2C(board.A1, board.A0)
matrix = matrix.Matrix8x8x2(i2c)
while True:
# matrix.fill(0)
# matrix.pixel(3,4, 2)
# matrix.pixel(4,4, 2)
# time.sleep(0.3)
# # matrix.pixel(2,3, 2)
# matrix.pixel(3,3, 2)
# matrix.pixel(4,3, 2)
# # matrix.pixel(5,3, 2)
# # matrix.pixel(6,3, 2)
# # matrix.pixel(7,3, 2)
# # matrix.pixel(0,4, 2)
# # matrix.pixel(1,4, 2)
# # matrix.pixel(2,4, 2)
# matrix.pixel(3,4, 2)
# matrix.pixel(4,4, 2)
# # matrix.pixel(5,4, 2)
# # matrix.pixel(6,4, 2)
# # matrix.pixel(7,4, 2)
# # matrix.pixel(0,5, 2)
# # matrix.pixel(1,5, 2)
# # matrix.pixel(2,5, 2)
# # matrix.pixel(3,5, 2)
# # matrix.pixel(4,5, 2)
# time.sleep(0.3)
# # ------------------
# # matrix.pixel(0,0, 2)
# # matrix.pixel(1,0, 2)
# # matrix.pixel(2,0, 2)
# # matrix.pixel(3,0, 2)
# # matrix.pixel(4,0, 2)
# # matrix.pixel(5,0, 2)
# # matrix.pixel(6,0, 2)
# # matrix.pixel(7,0, 2)
# # matrix.pixel(0,1, 2)
# # matrix.pixel(1,1, 2)
# # matrix.pixel(2,1, 2)
# # matrix.pixel(3,1, 2)
# # matrix.pixel(4,1, 2)
# # matrix.pixel(5,1, 2)
# # matrix.pixel(6,1, 2)
# # matrix.pixel(7,1, 2)
# # matrix.pixel(0,2, 2)
# # matrix.pixel(1,2, 2)
# # matrix.pixel(2,2, 2)
# # matrix.pixel(3,2, 2)
# # matrix.pixel(4,2, 2)
# # matrix.pixel(5,2, 2)
# # matrix.pixel(6,2, 2)
# # matrix.pixel(7,2, 2)
# # matrix.pixel(0,3, 2)
# # matrix.pixel(1,3, 2)
# matrix.pixel(2,3, 2)
# matrix.pixel(3,3, 2)
# matrix.pixel(4,3, 2)
# matrix.pixel(5,3, 2)
# # matrix.pixel(6,3, 2)
# # matrix.pixel(7,3, 2)
# # matrix.pixel(0,4, 2)
# # matrix.pixel(1,4, 2)
# matrix.pixel(2,4, 2)
# matrix.pixel(3,4, 2)
# matrix.pixel(4,4, 2)
# matrix.pixel(5,4, 2)
# # matrix.pixel(6,4, 2)
# # matrix.pixel(7,4, 2)
# # matrix.pixel(0,5, 2)
# # matrix.pixel(1,5, 2)
# # matrix.pixel(2,5, 2)
# matrix.pixel(3,5, 2)
# matrix.pixel(4,5, 2)
# # matrix.pixel(5,5, 2)
# # matrix.pixel(6,5, 2)
# # matrix.pixel(7,5, 2)
# # matrix.pixel(0,6, 2)
# # matrix.pixel(1,6, 2)
# # matrix.pixel(2,6, 2)
# # matrix.pixel(3,6, 2)
# # matrix.pixel(4,6, 2)
# # matrix.pixel(5,6, 2)
# # matrix.pixel(6,6, 2)
# # matrix.pixel(7,6, 2)
# # matrix.pixel(0,7, 2)
# # matrix.pixel(1,7, 2)
# # matrix.pixel(2,7, 2)
# # matrix.pixel(3,7, 2)
# # matrix.pixel(4,7, 2)
# # matrix.pixel(5,7, 2)
# # matrix.pixel(6,7, 2)
# # matrix.pixel(7,7, 2)
# time.sleep(0.3)
#---------------------
# matrix.pixel(0,0, 2)
# matrix.pixel(1,0, 2)
# matrix.pixel(2,0, 2)
# matrix.pixel(3,0, 2)
# matrix.pixel(4,0, 2)
# matrix.pixel(5,0, 2)
# matrix.pixel(6,0, 2)
# matrix.pixel(7,0, 2)
# matrix.pixel(0,1, 2)
# matrix.pixel(1,1, 2)
# matrix.pixel(2,1, 2)
# matrix.pixel(3,1, 2)
# matrix.pixel(4,1, 2)
# matrix.pixel(5,1, 2)
# matrix.pixel(6,1, 2)
# matrix.pixel(7,1, 2)
# # matrix.pixel(0,2, 2)
# # matrix.pixel(1,2, 2)
# matrix.pixel(2,2, 2)
# # matrix.pixel(3,2, 2)
# # matrix.pixel(4,2, 2)
# matrix.pixel(5,2, 2)
# # matrix.pixel(6,2, 2)
# # matrix.pixel(7,2, 2)
# # matrix.pixel(0,3, 2)
# matrix.pixel(1,3, 2)
# matrix.pixel(2,3, 2)
# matrix.pixel(3,3, 2)
# matrix.pixel(4,3, 2)
# matrix.pixel(5,3, 2)
# matrix.pixel(6,3, 2)
# # matrix.pixel(7,3, 2)
# # matrix.pixel(0,4, 2)
# matrix.pixel(1,4, 2)
# matrix.pixel(2,4, 2)
# matrix.pixel(3,4, 2)
# matrix.pixel(4,4, 2)
# matrix.pixel(5,4, 2)
# matrix.pixel(6,4, 2)
# # matrix.pixel(7,4, 2)
# # matrix.pixel(0,5, 2)
# # matrix.pixel(1,5, 2)
# matrix.pixel(2,5, 2)
# matrix.pixel(3,5, 2)
# matrix.pixel(4,5, 2)
# matrix.pixel(5,5, 2)
# # matrix.pixel(6,5, 2)
# # matrix.pixel(7,5, 2)
# # matrix.pixel(0,6, 2)
# # matrix.pixel(1,6, 2)
# # matrix.pixel(2,6, 2)
# matrix.pixel(3,6, 2)
# matrix.pixel(4,6, 2)
# # matrix.pixel(5,6, 2)
# # matrix.pixel(6,6, 2)
# # matrix.pixel(7,6, 2)
# # matrix.pixel(0,7, 2)
# # matrix.pixel(1,7, 2)
# # matrix.pixel(2,7, 2)
# # matrix.pixel(3,7, 2)
# # matrix.pixel(4,7, 2)
# # matrix.pixel(5,7, 2)
# # matrix.pixel(6,7, 2)
# # matrix.pixel(7,7, 2)
# time.sleep(0.3)
#---------------------
# matrix.pixel(0,0, 2)
# matrix.pixel(1,0, 2)
# matrix.pixel(2,0, 2)
# matrix.pixel(3,0, 2)
# matrix.pixel(4,0, 2)
# matrix.pixel(5,0, 2)
# matrix.pixel(6,0, 2)
# matrix.pixel(7,0, 2)
# matrix.pixel(0,1, 2)
# matrix.pixel(1,1, 2)
# matrix.pixel(2,1, 2)
# matrix.pixel(3,1, 2)
# matrix.pixel(4,1, 2)
# matrix.pixel(5,1, 2)
# matrix.pixel(6,1, 2)
# matrix.pixel(7,1, 2)
# matrix.pixel(0,2, 2)
matrix.pixel(1,2, 2)
matrix.pixel(2,2, 2)
# matrix.pixel(3,2, 2)
# matrix.pixel(4,2, 2)
matrix.pixel(5,2, 2)
matrix.pixel(6,2, 2)
# matrix.pixel(7,2, 2)
matrix.pixel(0,3, 2)
matrix.pixel(1,3, 2)
matrix.pixel(2,3, 2)
matrix.pixel(3,3, 2)
matrix.pixel(4,3, 2)
matrix.pixel(5,3, 2)
matrix.pixel(6,3, 2)
matrix.pixel(7,3, 2)
matrix.pixel(0,4, 2)
matrix.pixel(1,4, 2)
matrix.pixel(2,4, 2)
matrix.pixel(3,4, 2)
matrix.pixel(4,4, 2)
matrix.pixel(5,4, 2)
matrix.pixel(6,4, 2)
matrix.pixel(7,4, 2)
# matrix.pixel(0,5, 2)
matrix.pixel(1,5, 2)
matrix.pixel(2,5, 2)
matrix.pixel(3,5, 2)
matrix.pixel(4,5, 2)
matrix.pixel(5,5, 2)
matrix.pixel(6,5, 2)
# matrix.pixel(7,5, 2)
# matrix.pixel(0,6, 2)
# matrix.pixel(1,6, 2)
matrix.pixel(2,6, 2)
matrix.pixel(3,6, 2)
matrix.pixel(4,6, 2)
matrix.pixel(5,6, 2)
# matrix.pixel(6,6, 2)
# matrix.pixel(7,6, 2)
# matrix.pixel(0,7, 2)
# matrix.pixel(1,7, 2)
# matrix.pixel(2,7, 2)
matrix.pixel(3,7, 2)
matrix.pixel(4,7, 2)
# matrix.pixel(5,7, 2)
# matrix.pixel(6,7, 2)
# matrix.pixel(7,7, 2)
time.sleep(0.3)
matrix.show()
|
24,423 | a7d7c228477f1268133bc6dc2e5f3602018f1003 |
list(zip(songs_list, songs_sizes/2**20, songs_fingerprint_time))
|
24,424 | 721a312917e7e9b118e3df7ba92bc097b3ffc05c |
# 0,0 を中心とするあるx, y があたえられたときの角度を求める
# ★y,xの順番に注意
# math.atan2(y, x)
# asin, acosだけを使うと、180度を超えるものが求められないので注意
# あるn角形があったとき、ある点と点の角度の差は360/n
import math
|
24,425 | 7aafcc7d0973917a14aadd2cf51966e9009bed53 | import os
import ujson
from fastapi_cache import FastAPICache
from fastapi_cache.backends.inmemory import InMemoryBackend
from fastapi_cache.decorator import cache
from fastapi.responses import UJSONResponse
from fastapi import Request, Depends
from fastapi import FastAPI
from controllers.fetch_and_handle_feed import fetch_and_handle_feed
from database.connection import create_pg_pool
from service.exception import MpeixException
from service.utils import get_institute_id_from_group, build_key
app = FastAPI(
title='Feed MicroService',
description='Feed MicroService For Mpeix',
default_response_class=UJSONResponse
)
@app.exception_handler(500)
async def handle_exception(req: Request, exc: Exception):
return UJSONResponse({
'data': None,
'error': str(exc)
}, status_code=500)
@app.on_event('startup')
async def on_startup():
global pool, recourses
dsn = os.getenv('DB_URL')
port = os.getenv('PORT')
user = os.getenv('DB_USER')
password = os.getenv('DB_PASSWORD')
pool = await create_pg_pool(dsn, password, user)
FastAPICache.init(InMemoryBackend())
with open('static/json/recourses.json') as f:
recourses = ujson.load(f)
@app.middleware('http')
async def add_pool_to_request_scope(req: Request, call_next):
req.scope['pg_pool'] = pool
response = await call_next(req)
return response
@app.get('/v1/feed/{group}/')
@cache(expire=3600, key_builder=build_key)
async def get_feed(faculty_id: str = Depends(get_institute_id_from_group)):
answer_keys = ['profcom_mpei', 'studsovet_mpei', faculty_id]
API_VERSION = '5.131'
TOKEN = os.getenv('VK_TOKEN', None)
if TOKEN is None:
raise MpeixException('define vk api token in system variables')
params = {
'v': API_VERSION,
'access_token': TOKEN,
'extended': 0,
'count': 10
}
BASE_URL = 'https://api.vk.com/method/wall.get'
data = await fetch_and_handle_feed(BASE_URL, params, recourses, pool, answer_keys)
return {
'feed': data
}
if __name__ == '__main__':
import uvicorn
uvicorn.run(app) |
24,426 | 63b6a219dac81550e002909623b170396c8cb2b6 | #!/usr/bin/env python3
# Copyright (C) 2018 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os import sys, path
import synth_common
trace = synth_common.create_trace()
trace.add_packet()
trace.add_process(1, 0, 'init')
trace.add_process(2, 1, 'system_server')
trace.add_process(3, 1, 'com.google.android.calendar', 10001)
trace.add_process(4, 3, 'com.google.android.calendar', 10001)
trace.add_package_list(
ts=1, name='com.google.android.calendar', uid=10001, version_code=123)
trace.add_ftrace_packet(cpu=0)
# Intent without any corresponding end state, will be ignored
trace.add_atrace_begin(
ts=100, tid=2, pid=2, buf='MetricsLogger:launchObserverNotifyIntentStarted')
trace.add_atrace_end(ts=101, tid=2, pid=2)
# Start intent for a successful launch of calendar
trace.add_atrace_begin(
ts=102, tid=2, pid=2, buf='MetricsLogger:launchObserverNotifyIntentStarted')
trace.add_atrace_end(ts=103, tid=2, pid=2)
trace.add_atrace_async_begin(
ts=110, tid=2, pid=2, buf='launching: com.google.android.calendar')
trace.add_sched(ts=110, prev_pid=0, next_pid=3)
# As the process already existed before intent started, this is a
# warm/hot start (we choose warm). Therefore, emit an activityStart
# slice.
trace.add_atrace_begin(ts=115, tid=3, pid=3, buf='activityStart')
trace.add_atrace_end(ts=117, tid=3, pid=3)
trace.add_atrace_begin(ts=117, tid=3, pid=3, buf='activityResume')
trace.add_atrace_end(ts=118, tid=3, pid=3)
# P1: 10ns running
trace.add_sched(ts=120, prev_pid=3, next_pid=0, prev_state='S')
# P1: 10ns sleep
trace.add_sched(ts=130, prev_pid=0, next_pid=3)
trace.add_sched(ts=130, prev_pid=3, next_pid=4)
# Create an unrelated task
trace.add_newtask(ts=155, tid=1, new_tid=5, new_comm='', flags=0)
# P2: 30ns running
trace.add_sched(ts=160, prev_pid=4, next_pid=0, prev_state='R')
# P2: 49ns runnable
trace.add_sched(ts=209, prev_pid=0, next_pid=4)
# P2: 1ns running
trace.add_sched(ts=210, prev_pid=4, next_pid=0)
trace.add_atrace_async_end(
ts=210, tid=2, pid=2, buf='launching: com.google.android.calendar')
trace.add_atrace_begin(
ts=211,
tid=2,
pid=2,
buf='MetricsLogger:launchObserverNotifyActivityLaunchFinished')
trace.add_atrace_end(ts=212, tid=2, pid=2)
# Some time after, add a slice for fully drawn frame.
trace.add_atrace_begin(
ts=300,
tid=3,
pid=3,
buf='reportFullyDrawn() for \{com.google.android.calendar\}')
trace.add_atrace_end(ts=305, tid=2, pid=2)
# Start intent for calendar, we failed to launch the activity.
trace.add_atrace_begin(
ts=402, tid=2, pid=2, buf='MetricsLogger:launchObserverNotifyIntentStarted')
trace.add_atrace_end(ts=403, tid=2, pid=2)
trace.add_atrace_async_begin(
ts=410, tid=2, pid=2, buf='launching: com.google.android.calendar')
trace.add_atrace_async_end(
ts=510,
tid=2,
pid=2,
buf='launching: com.google.android.apps.nexuslauncher')
trace.add_ftrace_packet(cpu=1)
trace.add_sched(ts=160, prev_pid=0, next_pid=1)
trace.add_sched(ts=200, prev_pid=1, next_pid=0)
sys.stdout.buffer.write(trace.trace.SerializeToString())
|
24,427 | 56861cffe85663feb127bf1bcaa4517906f02658 | #This script is intended to clean data on the hunt for the cyber threat Flitter Network
# The targets we want to look into should have upwards of 30 contacts
import csv
#Give option to enter value for threshold
threshold = raw_input("Enter threshold value for friend connections (default is 15): ")
threshold = threshold or '15'
#print(threshold)
#Links Table will help define how many important nodes we have
# aka. people with more than the threshold of connections
#read in Links_Table
with open('Links_Table.txt', 'r') as csvfile:
readCSV = csv.reader(csvfile, delimiter='\t')
for row in readCSV:
print(row)
print(row[0],row[1])
#count instances from COL 2 to COL 1 (other way around gets the wrong friendship model)
#keep list of users to be deleted
#remove rows with unimportant IDS from People_Cities and Flitter_Names
|
24,428 | 542d7e030cbf5d27e453961089dbafa834f1f71f | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# author: zzxun <xtkml.g@gamil.com>
# date: 16/3/19
""" Description: cookie of login user
"""
from sqlalchemy import Column, Integer, String, Sequence, Text, DateTime
from sqlalchemy.sql.expression import text, update
from .base import Base
from ..common import encode_passwd
from datetime import datetime
class Cookie(Base):
__tablename__ = 'cookie'
id = Column(Integer, Sequence('cookie_id_seq'), primary_key=True)
username = Column(String(50), nullable=False, unique=True)
password = Column(String(255), nullable=False, server_default='')
cookie = Column(Text)
created_at = Column(DateTime, nullable=False, server_default=text('NOW()'))
updated_at = Column(DateTime, nullable=False, server_default=text('NOW()'),
onupdate=datetime.now)
def __repr__(self):
return "(id='%d', user='%s', createdAt='%s', updatedAt='%s', cookie='%s',)>" % \
(self.id, self.username, self.createdAt, self.updatedAt, self.cookie)
@classmethod
def clean_cookies(cls, session, username):
""" clean password and cookie for username
:param session: db session
:param username: Cookie.username
"""
session.execute(update(Cookie).
where(Cookie.username == username).
values(password='', cookie='', updated_at=text('NOW()')))
@classmethod
def create_or_update(cls, session, username, password, cookies):
""" add new or update exist
:param username: email
:param session: db session
:param password: password origin
:param cookies: zhihu cookies
"""
cookie = session.query(Cookie). \
filter(Cookie.username == username).one_or_none()
password = encode_passwd(password)
if cookie:
cookie.password = password
cookie.cookie = cookies
cookie.updated_at = text('NOW()')
else:
cookie = Cookie(username=username, password=password, cookie=cookies)
session.add(cookie)
@classmethod
def get_all_cookies(cls, session):
""" show list of cookies by usernames
:param session: db session
:return: list of cookies
"""
return session.query(Cookie).all()
|
24,429 | 174f888a25e13c8737e6ced22daa5682655424e7 | #! -*- coding:utf-8 -*-
import os
# os.environ["CUDA_VISIBLE_DEVICES"] = '0' #use GPU with ID=0
import json
import numpy as np
from bert4keras.backend import keras, K
from bert4keras.models import build_transformer_model
from bert4keras.tokenizers import Tokenizer
from bert4keras.optimizers import Adam
from bert4keras.snippets import sequence_padding, DataGenerator
from keras.models import Model
from tqdm import tqdm
# 基本信息
maxlen = 128
epochs = 10
batch_size = 32
learning_rate = 1e-5
crf_lr_multiplier = 100 # 必要时扩大CRF层的学习率
# bert配置
config_path = 'pretrained_model/chinese_L-12_H-768_A-12/bert_config.json'
checkpoint_path = 'pretrained_model/chinese_L-12_H-768_A-12/bert_model.ckpt'
dict_path = 'pretrained_model/chinese_L-12_H-768_A-12/vocab.txt'
def load_data(filename):
D = []
with open(filename, encoding='utf-8') as f:
for line in f:
l = json.loads(line)
if len(l['label']) == 0 or l['label'][0]['label'] == 'correct':
D.append(
{
'text': l['raw_text'],
'labels': [{"label": "correct", "entity": ""}]
})
else:
D.append(
{
'text': l['raw_text'],
'labels': l['label']
}
)
return D
# 读取数据
train_data = load_data('data/train.json')
print(len(train_data))
valid_data = load_data('data/test.json')
print(len(valid_data))
# 读取schema
with open('data/train.json', encoding='utf-8') as f:
id2label, label2id= {}, {}
for line in f:
l = json.loads(line)
for label in l['label']:
if label['label'] not in label2id:
id2label[len(label2id)] = label['label']
label2id[label['label']] = len(label2id)
num_labels = len(id2label) * 2 + 1
# print(label2id)
# print(id2label)
# 建立分词器
tokenizer = Tokenizer(dict_path, do_lower_case=True)
def search(pattern, sequence):
"""从sequence中寻找子串pattern
如果找到,返回第一个下标;否则返回-1。
"""
n = len(pattern)
for i in range(len(sequence)):
if sequence[i:i + n] == pattern:
return i
return -1
class data_generator(DataGenerator):
"""数据生成器
"""
def __iter__(self, random=False):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for is_end, d in self.sample(random):
token_ids, segment_ids = tokenizer.encode(d['text'], maxlen=maxlen)
labels = [0] * len(token_ids)
for labels_entity in d['labels']:
if labels_entity['label'] != 'correct':
a_token_ids = tokenizer.encode(labels_entity['entity'])[0][1:-1]
start_index = search(a_token_ids, token_ids)
if start_index != -1:
labels[start_index] = label2id[labels_entity['label']] * 2 + 1
for i in range(1, len(a_token_ids)):
labels[start_index + i] = label2id[labels_entity['label']] * 2 + 2
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append(labels)
if len(batch_token_ids) == self.batch_size or is_end:
batch_token_ids = sequence_padding(batch_token_ids)
batch_segment_ids = sequence_padding(batch_segment_ids)
batch_labels = sequence_padding(batch_labels)
yield [batch_token_ids, batch_segment_ids], batch_labels
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
# 加载预训练模型
model = build_transformer_model(
config_path=config_path,
checkpoint_path=checkpoint_path,
# model = "albert", #预训练模型选择albert时开启
)
from models.layers import CRFGraph
output, CRF = CRFGraph(model.output, num_labels, crf_lr_multiplier)
model = Model(model.input, output)
# model.summary()
model.compile(
loss=CRF.sparse_loss,
optimizer=Adam(learning_rate),
metrics=[CRF.sparse_accuracy]
)
def viterbi_decode(nodes, trans):
"""Viterbi算法求最优路径
其中nodes.shape=[seq_len, num_labels],
trans.shape=[num_labels, num_labels].
"""
labels = np.arange(num_labels).reshape((1, -1))
scores = nodes[0].reshape((-1, 1))
scores[1:] -= np.inf # 第一个标签必然是0
paths = labels
for l in range(1, len(nodes)):
M = scores + trans + nodes[l].reshape((1, -1))
idxs = M.argmax(0)
scores = M.max(0).reshape((-1, 1))
paths = np.concatenate([paths[:, idxs], labels], 0)
return paths[:, scores[:, 0].argmax()]
def extract_arguments(text):
"""arguments抽取函数
"""
tokens = tokenizer.tokenize(text)
while len(tokens) > 128:
tokens.pop(-2)
mapping = tokenizer.rematch(text, tokens)
token_ids = tokenizer.tokens_to_ids(tokens)
segment_ids = [0] * len(token_ids)
nodes = model.predict([[token_ids], [segment_ids]])[0]
trans = K.eval(CRF.trans)
labels = viterbi_decode(nodes, trans)
arguments, starting = [], False
for i, label in enumerate(labels):
if label > 0:
if label % 2 == 1:
starting = True
arguments.append([[i], id2label[(label - 1) // 2]])
elif starting:
arguments[-1][0].append(i)
else:
starting = False
else:
starting = False
return {
text[mapping[w[0]][0]:mapping[w[-1]][-1] + 1]: l
for w, l in arguments
}
def evaluate(data):
"""评测函数
"""
X, Y, Z = 1e-10, 1e-10, 1e-10
pbar = tqdm()
for d in data:
entity_pred = []
entity_y = []
pred_arguments = extract_arguments(d['text'])
if len(pred_arguments) == 0:
entity_pred.append(("correct", ""))
for k, v in pred_arguments.items():
entity_pred.append((v, k))
for label_entity in d['labels']:
entity_y.append((label_entity['label'], label_entity['entity']))
R = set(entity_pred)
T = set(entity_y)
X += len(R & T)
Y += len(R)
Z += len(T)
f1, precision, recall = 2 * X / (Y + Z), X / Y, X / Z
pbar.update()
pbar.set_description('f1: %.5f, precision: %.5f, recall: %.5f' %
(f1, precision, recall))
pbar.close()
return f1, precision, recall
class Evaluator(keras.callbacks.Callback):
"""评估和保存模型
"""
def __init__(self):
self.best_val_f1 = 0.
def on_epoch_end(self, epoch, logs=None):
if not os.path.exists('save'):
os.makedirs('save')
if epoch >= 0:
f1, precision, recall = evaluate(valid_data)
if f1 >= self.best_val_f1:
self.best_val_f1 = f1
model.save_weights('./save/best_model.weights')
print(
'f1: %.5f, precision: %.5f, recall: %.5f, best f1: %.5f\n' %
(f1, precision, recall, self.best_val_f1)
)
if __name__ == '__main__':
train_generator = data_generator(train_data, batch_size)
evaluator = Evaluator()
model.fit_generator(
train_generator.forfit(),
steps_per_epoch=len(train_generator),
epochs=epochs,
callbacks=[evaluator]
) |
24,430 | 0b6af5d810fdefe7f27b7a1047bc9b5301622c14 | # Copied from https://www.kaggle.com/c/tgs-salt-identification-challenge/discussion/65938
# Full credit to Allen Qin for the basic implementation of the original Focal Loss
# paper: https://arxiv.org/abs/1708.02002
import torch
import torch.nn as nn
from torch.nn import functional as F
class FocalLoss(nn.Module):
'''
Pixel-wise loss, down-weighing easy negative samples, e.g. for high background-foreground imbalance
Works with binary as well as probabilistic input
'''
def __init__(self, alpha=1, gamma=2, logits=False, reduce=True):
'''
Arguments:
alpha: Hyperparam; refer to paper
gamma: Hyperparam; refer to paper
logits: boolean: True -> expecting binary input else -> values between [0,1]
reduce: boolean: function same as reduction in torch.nn.BCEWithLogitsLoss | refer to torch.nn.functional.binary_cross_entropy_with_logits
'''
super(FocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.logits = logits
self.reduce = reduce
def forward(self, inputs, targets):
'''
Arguments:
inputs: refer to torch.nn.functional.binary_cross_entropy_with_logits
target: refer to torch.nn.functional.binary_cross_entropy_with_logits
return:
loss
'''
if self.logits:
BCE_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction='none')
else:
BCE_loss = F.binary_cross_entropy(inputs, targets, reduction='none')
pt = torch.exp(-BCE_loss) # -BCE_loss = log(pt)
F_loss = self.alpha * (1-pt)**self.gamma * BCE_loss
if self.reduce:
return torch.mean(F_loss)
else:
return F_loss
class ClassWiseFocalLoss(FocalLoss):
'''
See FocalLoss
+ Allows to apply different alpha and gamma for each class
alpha: class-class imbalance
gamma: class-background imbalance
'''
def __init__(self, alpha=[1, 1, 1], gamma=[2, 2, 2], logits=True, reduce=False):
'''
See FocalLoss
Expects gamma and alpha to be of same length
'''
super().__init__(alpha, gamma, logits, reduce)
def forward(self, inputs, targets):
'''
Expects targets and inputs to be structured like: batches x classes x X x Y
Arguments:
see FocalLoss
return:
loss
'''
if self.logits:
BCE_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction='none')
else:
BCE_loss = F.binary_cross_entropy(inputs, targets, reduction='none')
pt = torch.exp(-BCE_loss) # -BCE_loss = log(pt)
F_loss = torch.zeros_like(pt)
for i, (alpha, gamma) in enumerate(zip(self.alpha, self.gamma)):
F_loss[:, i, :, :] = alpha * (1-pt[:, i, :, :])**gamma * BCE_loss[:, i, :, :]
if self.reduce:
return torch.mean(F_loss)
else:
return F_loss
|
24,431 | 8c79dc9ac90236fe92ec81fa25684759f2450cb1 | """
Broadcast Visualization
-----------------------
Figure A.1
A visualization of NumPy array broadcasting. Note that the extra memory
indicated by the dotted boxes is never allocated, but it can be convenient
to think about the operations as if it is.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
#
# Author: Red Liu (lli_njupt@163.com)
# Add more useful functions to draw vector and matrix
#
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
def setup_text_plots(fontsize=8, usetex=True):
"""
This function adjusts matplotlib settings so that all figures in the
textbook have a uniform format and look.
"""
from distutils.version import LooseVersion
matplotlib.rc('legend', fontsize=fontsize, handlelength=3)
matplotlib.rc('axes', titlesize=fontsize)
matplotlib.rc('axes', labelsize=fontsize)
matplotlib.rc('xtick', labelsize=fontsize)
matplotlib.rc('ytick', labelsize=fontsize)
matplotlib.rc('text', usetex=usetex)
matplotlib.rc('font', size=fontsize, family='serif',
style='normal', variant='normal',
stretch='normal', weight='normal')
matplotlib.rc('patch', force_edgecolor=True)
if LooseVersion(matplotlib.__version__) < LooseVersion("3.1"):
matplotlib.rc('_internal', classic_mode=True)
else:
# New in mpl 3.1
matplotlib.rc('scatter.edgecolors', 'b')
matplotlib.rc('grid', linestyle=':')
matplotlib.rc('errorbar', capsize=3)
matplotlib.rc('image', cmap='viridis')
matplotlib.rc('axes', xmargin=0)
matplotlib.rc('axes', ymargin=0)
matplotlib.rc('xtick', direction='in')
matplotlib.rc('ytick', direction='in')
matplotlib.rc('xtick', top=True)
matplotlib.rc('ytick', right=True)
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Draw a figure and axis with no boundary
fig = plt.figure(figsize=(5.2, 3), facecolor='w')
ax = plt.axes([0, 0, 1, 1], xticks=[], yticks=[], frameon=False)
# dray first dimension
fig_xsize, fig_ysize = fig.get_size_inches()
ax.set_xlim(0, fig_xsize * 3)
ax.set_ylim(0, fig_ysize * 3)
plt.style.use('ggplot')
# FIXME. 8 is the font size, how to get the text font size?
def font_height(fontsize=8):
return fontsize / fig.dpi
def font_width(fontsize=8):
return font_height(fontsize) / 2
def draw_cube(ax, xy, size, depth=0.3,
edges=None, label=None, label_kwargs=None, **kwargs):
"""draw and label a cube. edges is a list of numbers between
1 and 12, specifying which of the 12 cube edges to draw"""
if edges is None:
edges = range(1, 13)
x, y = xy
y -= size # set left/up corner as the first (0,0) for one cube
# first plot background edges
if 9 in edges:
ax.plot([x + depth, x + depth + size],
[y + depth + size, y + depth + size], **kwargs)
if 10 in edges:
ax.plot([x + depth + size, x + depth + size],
[y + depth, y + depth + size], **kwargs)
if 11 in edges:
ax.plot([x + depth, x + depth + size],
[y + depth, y + depth], **kwargs)
if 12 in edges:
ax.plot([x + depth, x + depth],
[y + depth, y + depth + size], **kwargs)
# second plot middile edges
if 5 in edges:
ax.plot([x, x + depth],
[y + size, y + depth + size], **kwargs)
if 6 in edges:
ax.plot([x + size, x + size + depth],
[y + size, y + depth + size], **kwargs)
if 7 in edges:
ax.plot([x + size, x + size + depth],
[y, y + depth], **kwargs)
if 8 in edges:
ax.plot([x, x + depth],
[y, y + depth], **kwargs)
# last plot foreground edges
if 1 in edges: # top edge
ax.plot([x, x + size],
[y + size, y + size], **kwargs)
if 2 in edges: # right
ax.plot([x + size, x + size],
[y, y + size], **kwargs)
if 3 in edges: # bottom
ax.plot([x, x + size],
[y, y], **kwargs)
if 4 in edges: # left
ax.plot([x, x],
[y, y + size], **kwargs)
if label:
if label_kwargs is None:
label_kwargs = {}
ax.text(x + 0.5 * size, y + 0.5 * size - font_height() / 2,
label, ha='center', va='center', **label_kwargs)
solid = dict(c='black', ls='-', lw=1, # solid border style and color
label_kwargs=dict(color='k')) # text color
dotted = dict(c='black', ls=':', lw=0.5, # virtual border style and color
label_kwargs=dict(color='gray')) # text color
depth = 0.3
#------------------------------------------------------------
# Draw top operation: vector plus scalar
'''
draw_cube(ax, (1, 10), 1, depth, [1, 2, 3, 4, 5, 6, 9], '0', **solid)
draw_cube(ax, (2, 10), 1, depth, [1, 2, 3, 6, 9], '1', **solid)
draw_cube(ax, (3, 10), 1, depth, [1, 2, 3, 6, 7, 9, 10], '2', **solid)
draw_cube(ax, (6, 10), 1, depth, [1, 2, 3, 4, 5, 6, 7, 9, 10], '5', **solid)
draw_cube(ax, (7, 10), 1, depth, [1, 2, 3, 6, 7, 9, 10, 11], '5', **dotted)
draw_cube(ax, (8, 10), 1, depth, [1, 2, 3, 6, 7, 9, 10, 11], '5', **dotted)
draw_cube(ax, (12, 10), 1, depth, [1, 2, 3, 4, 5, 6, 9], '5', **solid)
draw_cube(ax, (13, 10), 1, depth, [1, 2, 3, 6, 9], '6', **solid)
draw_cube(ax, (14, 10), 1, depth, [1, 2, 3, 6, 7, 9, 10], '7', **solid)
ax.text(5, 10.5, '+', size=12, ha='center', va='center')
ax.text(10.5, 10.5, '=', size=12, ha='center', va='center')
'''
# xy is the start point with style (x,y)
def draw_vector(vector, xy, title="1D Vector", with_axis=True, color='gray'):
if vector.ndim != 1:
print("{} is not a vector".format(vector))
return
x,y = xy
size = len(vector)
# draw title at the center
if len(title):
axisy = y + 0.1
if with_axis:
axisy = y + 1
ax.text(x + size / 2, axisy, title,
size=12, ha='center', va='bottom')
# draw axes
if with_axis:
starx = x - 0.5
endx = x + size + 0.5
axisy = y + 0.3
ax.annotate("", xy=(endx, axisy), xytext=(starx, axisy),
arrowprops=dict(arrowstyle="simple", color='black'))
ax.text(endx - 1, axisy, r'axis 0',
size=10, ha='center', va='bottom')
if size == 1:
draw_cube(ax, (x, y), 1, depth, [1, 2, 3, 4], str(vector[0]), **solid)
draw_square_mask(ax, (x, y), color=color)
else:
for i in range(size - 1):
draw_cube(ax, (x + i, y), 1, depth, [1, 3, 4], str(vector[i]), **solid)
draw_square_mask(ax, (x + i, y), color=color)
draw_cube(ax, (x + i + 1, y), 1, depth, [1, 2, 3, 4], str(vector[i+1]), **solid)
draw_square_mask(ax, (x + i + 1, y), color=color)
def draw_indices_vector(vector, xy, color='gray'):
if vector.ndim != 1:
print("{} is not a vector".format(vector))
return
dotted = dict(c='k', ls='--', lw=0.2, # virtual border style and color
label_kwargs=dict(color='k')) # text color
x,y = xy
size = len(vector)
if size == 1:
draw_cube(ax, (x, y), 1, depth, [1, 2, 3, 4], str(vector[0]), **dotted)
draw_square_mask(ax, (x, y), color=color)
else:
for i in range(size - 1):
draw_cube(ax, (x + i, y), 1, depth, [1, 3, 4], str(vector[i]), **dotted)
draw_square_mask(ax, (x + i, y), color=color)
draw_cube(ax, (x + i + 1, y), 1, depth, [1, 2, 3, 4], str(vector[i+1]), **dotted)
draw_square_mask(ax, (x + i + 1, y), color=color)
def draw_vector_head(xy, color='gray'):
x,y = xy
x -= 2
draw_cube(ax, (x, y), 1, depth, [1, 2, 3], '', **solid)
draw_cube(ax, (x + 1, y), 1, depth, [1, 3], '', **solid)
def draw_vector_tail(xy, color='gray'):
x,y = xy
draw_cube(ax, (x, y), 1, depth, [1, 2, 3], '', **solid)
draw_cube(ax, (x + 1, y), 1, depth, [1, 3], '', **solid)
def draw_vertical_vector(vector, xy, title="1D Vector", color='gray'):
if vector.ndim != 1:
print("{} is not a vector".format(vector))
return
x,y = xy
size = len(vector)
# draw title at the center
if len(title):
ax.text(x + 1 / 2, y + 1, title,
size=12, ha='center', va='bottom')
if size == 1:
draw_cube(ax, (x, y), 1, depth, [1, 2, 3, 4], str(vector[0]), **solid)
draw_square_mask(ax, (x, y), color=color)
else:
for i in range(size - 1):
draw_cube(ax, (x, y - i), 1, depth, [1, 2, 4], str(vector[i]), **solid)
draw_square_mask(ax, (x, y - i), color=color)
draw_cube(ax, (x, y - i - 1), 1, depth, [1, 2, 3, 4], str(vector[i+1]), **solid)
draw_square_mask(ax, (x, y - i - 1), color=color)
from matplotlib.patches import Polygon,Rectangle
def create_stype(color='gray', alpha=0.5):
if color == None:
return dict(edgecolor='k', lw=1, fill=False)
return dict(edgecolor=None, lw=0, facecolor=color, alpha=alpha)
def draw_square_mask(ax, xy, size=1, color='gray', alpha=0.1):
if color == 'gray' or color is None: #don't fill gray just let it be background
return
'''xy is the left-top corner'''
style = create_stype(color, alpha=alpha)
rect = Rectangle(xy, width=size, height=-size, **style)
ax.add_patch(rect)
# top diamond and square
def draw_top_mask(ax, xy, depth=0.3, size=1, color='gray', alpha=0.2):
# draw top diamond
x, y = xy
points = [(x,y), (x+depth, y+depth), (x+size+depth, y+depth), (x+size, y)]
style = create_stype(color=color, alpha=alpha)
top = Polygon(points, closed=True, **style)
ax.add_patch(top)
def draw_top_nmask(ax, xy, columns=1, color='gray'):
x, y = xy
for i in range(columns):
draw_top_mask(ax, (x + i, y), color=color)
# dir mean direction 'h' or 'v'
def draw_square_nmask(ax, xy, cubes=1, color='gray', dir='h'):
x, y = xy
if dir == 'h':
for i in range(cubes):
draw_square_mask(ax, (x + i, y), color=color)
else:
for i in range(cubes):
draw_square_mask(ax, (x, y - i), color=color)
def draw_square_nmask_column(ax, xy, rows=1, color='gray'):
x, y = xy
draw_top_nmask(ax, xy, 1, color=color)
draw_square_nmask(ax, xy, cubes=rows, color=color, dir='v')
def draw_right_nmask(ax, xy, rows=1, color='gray'):
x, y = xy
for i in range(rows):
draw_right_mask(ax, (x, y - i), color=color)
def draw_square_nmask_with_right(ax, xy, columns=1, color='gray'):
x, y = xy
for i in range(columns):
draw_square_mask(ax, (x + i, y), color=color)
draw_right_mask(ax, (x+i, y), color=color)
def draw_square_nmask_with_top(ax, xy, columns=1, color='gray'):
x, y = xy
draw_top_nmask(ax, xy, columns=columns, color=color)
for i in range(columns):
draw_square_mask(ax, (x + i, y), color=color)
draw_right_mask(ax, (x+i, y), color=color)
# right diamond and square
def draw_right_mask(ax, xy, depth=0.3, size=1, color='gray', alpha=0.3):
# draw right diamond
x, y = xy
points = [(x+size,y), (x+size+depth, y+depth), (x+size+depth, y-size+depth),
(x+size, y-size)]
style = create_stype(color=color, alpha=alpha)
top = Polygon(points, closed=True, **style)
ax.add_patch(top)
def draw_top(vector, xy, color='gray'):
if vector.ndim != 1:
print("{} is not a vector".format(vector))
return
x,y = xy
size = len(vector)
draw_cube(ax, (x, y), 1, depth, [5, 6, 9], str(vector[0]), **solid)
if size > 1:
for i in range(1,size):
draw_cube(ax, (x + i, y), 1, depth, [6, 9], str(vector[i]), **solid)
# fill top surface color
if color is not None:
for i in range(size):
draw_top_mask(ax, (x + i, y), color=color, alpha=0.2)
def draw_right(xy, size, color='gray'):
x,y = xy
for i in range(size):
draw_cube(ax, (x, y - i), 1, depth, [7, 10], **solid)
if color is not None:
draw_right_mask(ax, (x, y-i), color=color, alpha=0.4)
# draw a cloumn without bottom lines
def draw_vector_no_bottom(vector, xy, color='gray'):
if vector.ndim != 1:
print("{} is not a vector".format(vector))
return
x,y = xy
size = len(vector)
if size == 1:
draw_cube(ax, (x, y), 1, depth, [1, 2, 4], str(vector[0]), **solid)
draw_square_mask(ax, (x, y), color=color)
else:
for i in range(size - 1):
draw_cube(ax, (x + i, y), 1, depth, [1, 4], str(vector[i]), **solid)
draw_square_mask(ax, (x + i, y), color=color)
draw_cube(ax, (x + i + 1, y), 1, depth, [1, 2, 4], str(vector[i+1]), **solid)
draw_square_mask(ax, (x + i + 1, y), color=color)
# draw column comment : col0 col1 ...
def draw_column(cols, xy, size=1):
x,y = xy
x += 0.5 * size
x += 1
y += 0.5 * size - font_height() / 2
for i in range(cols):
ax.text(x + i, y, 'col' + str(i), ha='center', va='center', color='k')
# draw row comment : row0 row1 ...
def draw_row(rows, xy, size=1):
x,y = xy
x += 0.5 * size
y += 0.5 * size - font_height() / 2
y -= 1
for i in range(rows):
ax.text(x, y - i, 'row' + str(i), ha='center', va='center', color='k')
def draw_axes(array, xy, with_row_col=True):
x, y = xy
if array.ndim == 1: # vector just draw axis 0
size = len(array)
endx = x + size + 0.5
ax.annotate("", xy=(endx, y), xytext=xy,
arrowprops=dict(arrowstyle="simple", color='black'))
ax.text(endx - 1, y, r'axis 1',
size=10, ha='center', va='bottom')
return
# handle matrix
heigh = array.shape[0]
width = array.shape[1]
if with_row_col:
width += 1
heigh += 1
x -= 1
y += 1
starx = x
endx = x + width + 0.5
axisy = y
ax.annotate("", xy=(endx + depth, axisy), xytext=(starx, axisy),
arrowprops=dict(arrowstyle="simple", color='black'))
ax.text(endx - 1, axisy, r'axis 1',
size=10, ha='center', va='bottom')
axisx = x
starty = y
endy = y - heigh - 0.5
ax.annotate("", xy=(axisx, endy), xytext=(axisx, starty),
arrowprops=dict(arrowstyle="simple", color='black'))
ax.text(axisx, endy - 0.4, r'axis 0',
size=10, ha='center', va='bottom')
def draw_matrix(matrix, xy, title="2D Matrix", with_row_col=True, with_axis=True, color='gray'):
if matrix.ndim != 2:
print("{} is not a matrix".format(matrix))
return
x, y = xy
width = matrix.shape[1]
heigh = matrix.shape[0]
if with_row_col:
width += 1
heigh += 1
# draw title at the center
if len(title):
if with_row_col:
startx = x - 1 + (width + 1)/ 2
else:
startx = x + width / 2
axisy = y + depth + 0.1
if with_axis:
axisy = y + 1.5
ax.text(startx, axisy, title,
size=12, ha='center', va='bottom')
# draw axes
if with_axis:
if with_row_col:
draw_axes(matrix, (x, y), with_row_col=with_row_col)
else:
draw_axes(matrix, (x - 0.3, y + 0.3), with_row_col=with_row_col)
if with_row_col:
draw_row(matrix.shape[0], (x - 1, y))
draw_column(matrix.shape[1], (x - 1 + 0.2, y))
rows = matrix.shape[0]
draw_top(matrix[0], (x, y), color=color)
if rows == 1:
draw_vector(matrix[0], xy, title='', with_axis=False, color=color)
else:
for i in range(rows - 1):
draw_vector_no_bottom(matrix[i], (x, y - i), color=color)
draw_vector(matrix[i+1], (x, y - i - 1), title='', with_axis=False, color=color)
draw_right((x + matrix.shape[1] - 1, y), rows, color=color)
def draw_sum_axis0(a=None, xy=(5.5,7)):
if a is None:
a = np.arange(16).reshape(4,4)
x, y = xy
draw_matrix(a, (x,y), title=r'$np.sum(a, axis=0)$')
sum = np.sum(a, axis=0)
draw_vector(sum, (x, y - a.shape[0] - 0.5), title='', with_axis=False)
columns = a.shape[1]
# draw arrows
for i in range(columns):
ax.annotate("", xy=(x + 0.5 + i, y - a.shape[0] - 0.5), xytext=(x + 0.5 + i, y),
arrowprops=dict(arrowstyle="simple", alpha=0.3, color='red'))
ax.set_xlim(0, 14)
ax.set_ylim(0, 10)
plt.show()
def draw_sum_axis1(a=None, xy=(5,7)):
if a is None:
a = np.arange(16).reshape(4,4)
x, y = xy
draw_matrix(a, (x, y), title=r'$np.sum(a, axis=1)$')
sum = np.sum(a, axis=1)
draw_vertical_vector(sum, (x + a.shape[1] + 0.5 + depth, y), title='')
rows = a.shape[0]
# draw arrows
for i in range(rows):
ax.annotate("", xy=(x + a.shape[1] + depth + 0.5, y - 0.5 - i), xytext=(x, y - 0.5 - i),
arrowprops=dict(arrowstyle="simple", alpha=0.3, color='red'))
ax.set_xlim(0, 14)
ax.set_ylim(0, 10)
plt.show()
def draw_top_right_mask(ax, xy, depth=0.3, size=1, color='gray'):
# draw right diamond
x, y = xy
points = [(x+size,y), (x+size+depth, y+depth), (x+size+depth, y-size+depth),
(x+size, y-size)]
style = create_stype(color)
top = Polygon(points, closed=True, **style)
ax.add_patch(top)
# draw top diamond
x, y = xy
points = [(x,y), (x+depth, y+depth), (x+size+depth, y+depth), (x+size, y)]
style = create_stype(color)
top = Polygon(points, closed=True, **style)
ax.add_patch(top)
draw_square_mask(ax, xy, size, color)
# draw imgs/numpy/narraytypes.png
def draw_vector_matrix_sample():
a = np.arange(16).reshape(4,4)
v = np.arange(4)
draw_vector(v, (1,7))
draw_matrix(a, (8,6.6), with_row_col=True)
ax.set_xlim(0, 14)
ax.set_ylim(0, 10)
plt.show()
def draw_hstack_sample():
v1 = np.array([0,1,2])
v2 = np.array([3,4]) * 10
startx = 1
starty = 8
draw_vector(v1, (startx, starty), title='v1', with_axis=False)
startx += len(v1) + 1
draw_vector(v2, (startx, starty), title='v2', with_axis=False)
startx += len(v2) + 1
draw_vector(np.hstack([v1,v2]), (startx, starty), title='np.hstack([v1,v2])', with_axis=False)
m1 = np.arange(6).reshape(2,3)
m2 = (np.arange(4).reshape(2,2) + 3) * 10
startx = 1
starty = 4
draw_matrix(m1, (startx, starty), title='m1', with_axis=False, with_row_col=False)
startx += m1.shape[1] + 1
draw_matrix(m2, (startx, starty), title='m2', with_axis=False, with_row_col=False)
startx += m2.shape[1] + 1
draw_matrix(np.hstack([m1,m2]), (startx, starty), title='np.hstack([m1,m2])',
with_axis=False, with_row_col=False)
ax.set_xlim(0, 14)
ax.set_ylim(0, 10)
plt.show()
def draw_vstack_sample():
v1 = np.array([0,1])
v2 = np.array([3,4]) * 10
startx = 1
starty = 8
draw_vector(v1, (startx, starty), title='v1', with_axis=False)
starty -= 2
draw_vector(v2, (startx, starty), title='v2', with_axis=False)
starty -= 3
draw_matrix(np.vstack([v1,v2]), (startx, starty), title='np.vstack([v1,v2])',
with_axis=False, with_row_col=False)
m1 = np.arange(4).reshape(2,2)
m2 = (np.arange(6).reshape(3,2) + 3) * 10
startx = 7
starty = 9
draw_matrix(m1, (startx, starty), title='m1', with_axis=False, with_row_col=False)
draw_matrix(m2, (startx + m1.shape[1] + 1.5, starty), title='m2', with_axis=False, with_row_col=False)
starty -= m2.shape[0] + 0.5
draw_matrix(np.vstack([m1,m2]), (startx, starty), title='np.vstack([m1,m2])',
with_axis=False, with_row_col=False)
ax.set_xlim(0, 14)
ax.set_ylim(0, 10)
plt.show()
def draw_3d_diamond(xy, rl=(2,2), color='gray'):
startx, starty = xy
step = 0.5 * np.sin(np.pi/4)
rows = rl[0]
columns = rl[1]
ax.arrow(startx, starty, columns, 0, width=0.05,
color=color, alpha=0.5, head_width=0, head_length=0)
for i in range(1,rows,1):
ax.arrow(startx - i * step, starty - i * step, columns, 0, width=0.01, ls='--',
color=color, alpha=0.5, head_width=0, head_length=0)
ax.arrow(startx - rows * step, starty - rows * step, columns, 0, width=0.05,
color=color, alpha=0.5, head_width=0, head_length=0)
dx = - 0.5 * rows * np.sin(np.pi/4)
dy = - 0.5 * rows * np.cos(np.pi/4)
ax.arrow(startx, starty, dx, dy, width=0.05,
color=color, alpha=0.5, head_width=0, head_length=0)
for i in range(1,columns,1):
ax.arrow(startx + i, starty, dx, dy, width=0.01, ls='--',
color=color, alpha=0.5, head_width=0, head_length=0)
ax.arrow(startx + columns, starty, dx, dy, width=0.05,
color=color, alpha=0.5, head_width=0, head_length=0)
def draw_3d_matrix(xy, array, color='gray'):
shape = array.shape
x, y = xy
heigth = shape[0]
rl = (shape[1], shape[2])
for i in range(heigth):
draw_3d_diamond((x, y + i), rl, color=color)
# xy means the orgin point names can be 'x', 'y', 'z' array muse be 3D
def draw_3d_axes(xy, array, lens=[2,2,2], names=['0', '1', '2'], color='k'):
if array.ndim != 3:
print("only support 3D ndarray")
return
startx, starty = xy
'''
fig = plt.figure(figsize=(5, 3), facecolor='w')
ax = plt.axes([0, 0, 1, 1], xticks=[], yticks=[], frameon=False)
plt.style.use('ggplot')
'''
height, rows, columns = array.shape
lens[0] = rows * 0.5 + 0.5
lens[1] = columns + 0.5
lens[2] = height - 1 + 0.5
dx = - lens[0] * np.sin(np.pi/4)
dy = - lens[0] * np.cos(np.pi/4)
# latex style
axis_font_size = 12
axis_color = color
for i in range(len(names)):
names[i] = '${' + names[i] + '}$'
ax.arrow(startx, starty, dx, dy, width=0.05, color=axis_color, clip_on=False,
head_width=0.15, head_length=0.15)
ax.text(startx + dx - 0.3, starty + dy - 0.3, names[0],
size=axis_font_size, ha='center', va='center')
# second dimension
dx = lens[1]
endx = startx + dx
endy = starty
ax.arrow(startx, starty, dx, 0, width=0.05, color=axis_color, clip_on=False,
head_width=0.15, head_length=0.15)
ax.text(endx + 0.5, endy, names[1],
size=axis_font_size, ha='center', va='center')
# third dimension
dy = lens[2]
endx = startx
endy = starty + dy
ax.arrow(startx, starty, 0, dy, width=0.05, color=axis_color, clip_on=False,
head_width=0.15, head_length=0.15)
ax.text(endx, endy + 0.5, names[2],
size=axis_font_size, ha='center', va='center')
draw_3d_matrix(xy, array, color=axis_color)
def draw_3axes_sample():
a = np.arange(8).reshape(2,2,2)
draw_3d_axes((2,4.5), a, names=['x','y','z'], color='gray')
ax.text(2.5, 2, "Cartesian", size=12, ha='center', va='center')
draw_3d_axes((7,4.5), a, names=['0','1','2'], color='gray')
ax.text(7.5, 2, "Octave/Matlab", size=12, ha='center', va='center')
draw_3d_axes((12,4.5), a, names=['1','2','0'], color='gray')
ax.text(12.5, 2, "Numpy", size=12, ha='center', va='center')
plt.show()
def draw_octave_3d_axis_sample():
a = np.array([0,2,1,3,4,6,5,7]).reshape(2,2,2)
draw_3d_axes((3.5,4.5), a, names=['0','1','2'], color='gray')
height, rows, columns = a.shape
startx, starty = 8.5, 4.5
endx, endy = startx, starty
for i in range(height):
draw_matrix(a[i], (endx,endy), title="", with_row_col=False,
with_axis=False, color='gray')
endx += columns
endy += rows
endx -= columns
endy -= rows
if height > 1:
ax.plot([startx + depth, endx], [starty + depth, endy],
ls='--', lw=1, color='gray')
ax.plot([startx + depth + columns, endx + columns], [starty + depth, endy],
ls='--', lw=1, color='gray')
ax.plot([startx + depth + columns, endx + columns], [starty + depth - rows, endy - rows],
ls='--', lw=1, color='gray')
# draw height axes
ax.annotate("", xy=(endx - 0.5, endy), xytext=(startx - 0.5, starty),
arrowprops=dict(arrowstyle="simple", color='black'))
ax.text(endx - 0.5, endy, r'2',
size=11, ha='center', va='bottom')
# draw horizontal
ax.annotate("", xy=(startx + columns, starty), xytext=(startx - 0.5, starty),
arrowprops=dict(arrowstyle="simple", color='black'))
ax.text(startx + columns - 0.4, starty, r'1',
size=11, ha='center', va='bottom')
# draw vertical
ax.annotate("", xy=(startx - 0.5, starty - rows), xytext=(startx - 0.5, starty),
arrowprops=dict(arrowstyle="simple", color='black'))
ax.text(startx - 0.5, starty - rows - 0.5, r'0',
size=11, ha='center', va='bottom')
draw_square_mask(ax, (endx, endy), size=1, color='red', alpha=0.3)
plt.show()
def draw_numpy_3d_axis_sample():
a = np.arange(8).reshape(2,2,2, order='F')
names=['1','2','0']
draw_3d_axes((3.5,4.5), a, names=names, color='gray')
height, rows, columns = a.shape
startx, starty = 8.5, 4.5
endx, endy = startx, starty
for i in range(height):
draw_matrix(a[i], (endx,endy), title="", with_row_col=False,
with_axis=False, color='gray')
endx += columns
endy += rows
endx -= columns
endy -= rows
if height > 1:
ax.plot([startx + depth, endx], [starty + depth, endy],
ls='--', lw=1, color='gray')
ax.plot([startx + depth + columns, endx + columns], [starty + depth, endy],
ls='--', lw=1, color='gray')
ax.plot([startx + depth + columns, endx + columns], [starty + depth - rows, endy - rows],
ls='--', lw=1, color='gray')
# draw height axes
ax.annotate("", xy=(endx - 0.5, endy), xytext=(startx - 0.5, starty),
arrowprops=dict(arrowstyle="simple", color='black'))
ax.text(endx - 0.5, endy, names[2],
size=11, ha='center', va='bottom')
# draw horizontal
ax.annotate("", xy=(startx + columns, starty), xytext=(startx - 0.5, starty),
arrowprops=dict(arrowstyle="simple", color='black'))
ax.text(startx + columns - 0.4, starty, names[1],
size=11, ha='center', va='bottom')
# draw vertical
ax.annotate("", xy=(startx - 0.5, starty - rows), xytext=(startx - 0.5, starty),
arrowprops=dict(arrowstyle="simple", color='black'))
ax.text(startx - 0.5, starty - rows - 0.5, names[0],
size=11, ha='center', va='bottom')
draw_square_mask(ax, (startx+1, starty), size=1, color='red', alpha=0.3)
plt.show()
def draw_tree_index_sample():
names=['1','2','0']
a = np.arange(8).reshape(2,2,2)
height, rows, columns = a.shape
startx,starty = 5, 7
endy = starty
half = rows * 1.0 / 2
# draw horizontal
ax.annotate("", xy=(startx + columns, starty), xytext=(startx - 0.5, starty),
arrowprops=dict(arrowstyle="simple", color='black'))
ax.text(startx + columns - 0.4, starty, names[1],
size=11, ha='center', va='bottom')
# draw vertical
ax.annotate("", xy=(startx - 0.5, starty - rows), xytext=(startx - 0.5, starty),
arrowprops=dict(arrowstyle="simple", color='black'))
ax.text(startx - 0.5, starty - rows - 0.5, names[0],
size=11, ha='center', va='bottom')
for i in range(height):
draw_matrix(a[i], (startx, endy), title='', with_row_col=False, with_axis=False)
ax.plot([startx, startx - 1],
[endy - half, endy - half], c='black', ls='-', lw=2)
endy -= rows + 1
endy += rows + 1
endy -= half
ax.plot([startx - 1, startx - 1],
[starty - half, endy], c='black', ls='-', lw=2)
# draw vertical
ax.annotate("", xy=(startx - 1, starty - half + 0.3), xytext=(startx - 1, starty),
arrowprops=dict(arrowstyle="simple", color='black'))
ax.text(startx - 1, starty, names[2],
size=11, ha='center', va='bottom')
def create_indices_array(rows, columns, order='C'):
indices_list = []
if order == 'C':
for i in range(rows):
for j in range(columns):
indices_list.append(str(i) + ',' + str(j))
else:
for i in range(columns):
for j in range(rows):
indices_list.append(str(j) + ',' + str(i))
return np.array(indices_list)
def create_3indices_array(height, rows, columns, order='C'):
indices_list = []
if order == 'C':
for i in range(height):
for j in range(rows):
for k in range(columns):
indices_list.append(str(i) + ',' + str(j) + ',' + str(k))
else:
for i in range(columns):
for j in range(rows):
for k in range(height):
indices_list.append(str(k) + ',' + str(j) + ',' + str(i))
return np.array(indices_list)
def draw_row_first_sample():
a = np.arange(9).reshape(3,3)
startx,starty = 6, 7
rows, columns = a.shape
draw_matrix(a, (startx, starty), title='', with_row_col=True, with_axis=False, color=None)
draw_top_nmask(ax, (startx, starty), columns, color='red')
colors = ['red', 'green', 'blue']
for i in range(rows):
draw_square_nmask_with_right(ax, (startx, starty - i), columns, color=colors[i])
startx, starty = 3, 2.5
draw_vector_head((startx, starty), color=None)
draw_vector(a.ravel(), (startx, starty), title="", with_axis=False, color=None)
draw_vector_tail((startx + len(a.ravel()), starty), color=None)
for i in range(rows):
draw_square_nmask(ax, (startx + columns * i, starty), columns, color=colors[i])
indices = create_indices_array(3, 3, order='C')
draw_indices_vector(indices, (startx, starty - 1), color=None)
def draw_column_first_sample():
a = np.arange(9).reshape(3,3,order='F')
startx,starty = 6, 7
rows, columns = a.shape
draw_matrix(a, (startx, starty), title='', with_row_col=True, with_axis=False, color=None)
#draw_top_nmask(ax, (startx, starty), columns, color='red')
colors = ['red', 'green', 'blue']
for i in range(rows):
draw_square_nmask_column(ax, (startx + i, starty), rows, color=colors[i])
draw_right_nmask(ax, (startx + columns - 1, starty), rows, color=colors[-1])
startx, starty = 3, 2.5
draw_vector_head((startx, starty), color=None)
draw_vector(a.T.ravel(), (startx, starty), title="", with_axis=False, color=None)
draw_vector_tail((startx + len(a.T.ravel()), starty), color=None)
for i in range(rows):
draw_square_nmask(ax, (startx + columns * i, starty), columns, color=colors[i])
indices = create_indices_array(3, 3, order='F')
draw_indices_vector(indices, (startx, starty - 1), color=None)
def draw_indices_sample():
startx, starty = 2, 7.5
indices = create_indices_array(3, 3, order='C')
draw_indices_vector(indices, (startx, starty), color=None)
starty -= 1.5
indices = create_indices_array(3, 3, order='F')
draw_indices_vector(indices, (startx, starty), color=None)
starty -= 2
indices = create_3indices_array(2, 2, 3, order='C')
draw_indices_vector(indices, (startx, starty), color=None)
starty -= 1.5
indices = create_3indices_array(2, 2, 3, order='F')
draw_indices_vector(indices, (startx, starty), color=None)
if __name__ == "__main__":
draw_tree_index_sample()
plt.show()
|
24,432 | 8a3522525a3c6394ab204124decdc9c600e26e9f | n, u = map(int, input().split())
table = [i for i in range(n+1)] # 맨 앞 제외
print(table)
def find(table, nd):
if table[nd] != nd:
table[nd] = find(table, table[nd])
return table[nd]
def union(a, b, table): # b가 더 큰 수가 되도록
r1 = find(table, a)
r2 = find(table, b)
if r1 > r2:
table[a] = r2
else:
table[b] = r1
for _ in range(u):
a, b = map(int, input().split())
union(a, b, table)
print([i for i in range(1, n+1)])
print(table[1:]) |
24,433 | 1378a4ada7ffb5017fdd1b43aa9da78199fcd689 | import unittest
from com.ea.pages import brand_in_city_page
from com.ea.common import tools, web_login
from com.ea.resource import globalparameter as gl
import time
import os
import sys
class MyTestCase(unittest.TestCase):
u"""按省区各品牌投放城市"""
screenshot_path = os.path.join(gl.screenshot_path, os.path.splitext(os.path.basename(__file__))[0])
start_date = '2018-01-01'
end_date = '2018-01-31'
count = '294'
@classmethod
def setUpClass(cls):
cls.webdriver = tools.get_chrome_driver()
web_login.login(cls.webdriver)
def setUp(self):
pass
def tearDown(self):
pass
@classmethod
def tearDownClass(cls):
cls.webdriver.quit()
def test_brand_in_city(self):
u"""按省区各品牌投放城市"""
casename = sys._getframe().f_code.co_name
try:
brandincitypage = brand_in_city_page.BrandInCityPage(self.webdriver)
url = brandincitypage.get_url()
self.webdriver.get(url)
brandincitypage.input_loan_date_start(self.start_date)
brandincitypage.input_loan_date_end(self.end_date)
brandincitypage.click_search_button()
time.sleep(5)
brandincitypage.page_down()
time.sleep(1)
assert self.count == brandincitypage.get_count()
except Exception as e:
tools.screenshot(self.webdriver, self.screenshot_path, casename)
raise e
if __name__ == '__main__':
unittest.main()
|
24,434 | 69d087dd50e31f51a73c0b9ed8c04e89d125a818 | # ===============================================================================
#
# Copyright (c) 2013-2017 Qualcomm Technologies, Inc.
# All Rights Reserved.
# Confidential and Proprietary - Qualcomm Technologies, Inc.
#
# ===============================================================================
from sectools.common.utils.c_data import remove_duplicates
from sectools.common.utils.c_rule import CoreRuleBase
from sectools.features.isc.iot.cfgparser.auto_gen_xml_config import complex_metadata
from sectools.features.isc.iot.cfgparser.auto_gen_xml_config import complex_images_list
from sectools.features.isc.iot.cfgparser.auto_gen_xml_config import complex_general_properties
from . import defines
class IoTConfigRulesManager(CoreRuleBase):
"""
This is the main SecImage config rules manager that runs the rules
"""
def __init__(self):
self.configs = {}
self.configs['images_list'] = _ImageList()
self.configs['general_properties'] = _GeneralProperties()
self.configs['metadata'] = None
self.configs['parsegen'] = None
def validate(self, data, data_dict):
retval = True
error_str = ''
# based on the dictionary structure, go through each section from root
for name in data_dict:
# check if there's a registered rule object for this config section
if name in self.configs and self.configs[name] is not None:
config = getattr(data.root, name)
if name == 'images_list':
ret, error = self.configs[name].validate(
config, getattr(data.root, 'general_properties'),
getattr(data.root, 'metadata'))
else:
ret, error = self.configs[name].validate(config, config)
# accumulate the return values
if ret == False:
retval &= ret
error_str += '\n\n<%s>%s' % (name, error)
if retval == False:
raise RuntimeError(
'\nIoT config validation failed with following error(s): %s' %
error_str)
class _Signing(object):
"""
Defines the rules for signing default attributes
"""
def __init__(self):
pass
def validate(self, signing, *args):
retval = True
error_str = ''
return retval, error_str
class _GeneralProperties(object):
"""
Defines the rules for general properties
"""
def __init__(self):
pass
def validate(self, general_properties, *args):
assert(isinstance(general_properties, complex_general_properties))
retval = True
error_str = ''
return retval, error_str
class _ImageList(object):
"""
Defines the rules for image parameters to be signed
"""
def __init__(self):
self.mask_warning = True
self.version_warning = True
def validate(self, images, *args):
assert(isinstance(images, complex_images_list))
image_list = images.get_image()
retval = [True]
errors = []
def add_error(sign_id, error):
retval[0] = False
errors.append("\nsign_id={0}: ".format(sign_id) + error)
# expect args[0] to be instance of signing
assert(isinstance(args[0], complex_general_properties))
assert(isinstance(args[1], complex_metadata))
general_properties = args[0]
# Not Overridable
use_serial_number_in_signing =\
general_properties.get_use_serial_number_in_signing()
attributes = dict(
hash_algorithm=general_properties.get_hash_algorithm(),
hmac=general_properties.get_hmac(),
rsa_padding=general_properties.get_rsa_padding(),
num_root_certs=general_properties.get_num_root_certs(),
secboot_version=general_properties.get_secboot_version(),
dsa_type=general_properties.get_dsa_type(),
key_size=general_properties.get_key_size(),
exponent=general_properties.get_exponent())
# Overridable
default_debug = general_properties.get_debug()
default_sw_id = general_properties.get_sw_id()
default_oem_id = general_properties.get_oem_id()
default_model_id = general_properties.get_model_id()
default_msm_part = general_properties.get_msm_part()
default_soc_hw_version = general_properties.get_soc_hw_version()
default_in_use_soc_hw_version = general_properties.get_in_use_soc_hw_version()
for image in image_list:
sign_id = image.get_sign_id()
overrides = image.get_general_properties_overrides()
# Update all the overridable attributes
for attr in defines.CONFIG_STRUCTURE["images_list"]["image"][0][
"general_properties_overrides"].keys():
attr_override = getattr(overrides, "get_" + attr)()
if attr_override is None:
attributes[attr] = locals()["default_" + attr]
else:
attributes[attr] = attr_override
return retval[0], "".join(remove_duplicates(errors))
|
24,435 | 2e0002c3d6cfebdd4b450572b1f4e2c9c91b1ef4 | # coding=utf8
# 给定一个Excel表格中的列名称,返回其相应的列序号。
# 例如,
# A -> 1
# B -> 2
# C -> 3
# ...
# Z -> 26
# AA -> 27
# AB -> 28
# ...
# 示例 1:
# 输入: "A"
# 输出: 1
# 示例 2:
# 输入: "AB"
# 输出: 28
# 示例 3:
# 输入: "ZY"
# 输出: 701
class Solution(object):
def titleToNumber(self, s):
"""
26进制,与168题对应
:type s: str
:rtype: int
"""
l = len(s)
r = 0
for i in range(0, l):
r += (ord(s[i])-64) * 26**(l-i-1)
return r
s = Solution()
print(s.titleToNumber('A'))
print(s.titleToNumber('AB'))
print(s.titleToNumber('ZY'))
print(s.titleToNumber('ABC'))
# 1 2 3
# 1*10**(3-1) 2*10**(2-1) 3*10**(1-1)
# ABC
# 1*26**(3-1)+2*26**(2-1)+3*26**(1-1) = 26**2 + 26*2+3 = 731
|
24,436 | 9d7a15f19aced4f55275786f3f5effb519e26d97 | import pandas as pd
from sklearn.naive_bayes import MultinomialNB
from collections import Counter
# usando pandas para facilitar organizacao dos dados
df = pd.read_csv('buscas.csv') # nosso DataFrame, nossa 'tabela' de dados
# Pegando e separando nossas colunas entre as features(X) e nossos resultados (y)
X_df = df[['home','busca','logado']]
y_df = df ['comprou']
# Temos que transformar a coluna de busca, que é uma variavel categórica e transformar em 3 colunas (chamadas dummies)
Xdummies_df = pd.get_dummies(X_df)
ydummies_df = y_df # não é necessário, mas para seguir o padrão iremos fazer dessa forma
# Nosso metodo modelo.fit do sklearn utiliza dois arrays, o de X e o y. E o que nós temos são dois dataframes,
# por isso é necessária a conversão para arrays:
X = Xdummies_df.values
y = ydummies_df.values
# Separação de dados para treino e para teste
# Treino = 90% da lista
porcentagem_treino = 0.9
tamanho_treino = int(porcentagem_treino * len(X)) # queremos 90% dos dados para treino
tamanho_teste = int(len(X) - tamanho_treino) # o restante dos dados será para teste
X_treino = X[:tamanho_treino]
y_treino = y[:tamanho_treino]
# Teste = 10%
X_teste = X[-tamanho_teste:]
y_teste = y[-tamanho_teste:]
# Agora iremos treinar nossos dados com o modelo do sklearn MultinomialNB
modelo = MultinomialNB()
modelo.fit(X_treino, y_treino) # treino de acordo com os dados
# Iremos tentar prever os resultados para em seguida verificar a quantidade de acertos
resultado = modelo.predict(X_teste)
acertos = (resultado == y_teste)
total_acertos = sum(acertos)
total_elementos = len(X_teste)
taxa_acerto = 100.0 * total_acertos / total_elementos
# Como saber se nosso algoritmo de ML está se saindo bem?
# acertos_um = sum(y) #pegamos todos os casos '1', como se o algoritmo chutasse tudo '1'
#acertos_zero = len(y) - acertos_um # fazemos o mesmo para o '0'
# acertos_um = len(y[y==1])
# acertos_zero = len(y[y==0])
acertos_base = max(Counter(y_teste).values()) # O Counter faz o papel de separacao dos valores e com o max pegamos o maior valor
taxa_acerto_base = 100* acertos_base / len(y_teste) #pegamos o maior valor para comparação e depois transformamos em %
print ("Taxa de acerto algoritmo: %.2f%%" % taxa_acerto)
print("Taxa de acerto base: %.2f%%" % taxa_acerto_base)
print("Total de elementos no teste: %d" % total_elementos)
|
24,437 | 72e02e50db8bb066da3418ec0bf5b56af5fbbcf8 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 15 08:05:03 2020
@author: mojdeh
"""
def int_to_roman(number):
roman_result = ""
#define a 2D List for distinguishing Tens, Hundreds, Thousand
roman_numeral = [
['','I','II','III','IV','V','VII','VII','VIII','IX'],
['','X','XX','XXX','XL','L','LX','LXX','LXXX','XC'],
['','C','CC','CCC','CD','D','DC','DCC','DCCC','CM'],
['','M','MM','MMM']
]
# i is a counter that determine the row in which the roman number exists
i = 0
#doing Sequential division and considering remainders as internal elemets of each row
while (number/10 != 0):
remainder = int(number%10)
number = int(number/10)
#find new letter and join it to letters finding it previous steps
roman_result = roman_numeral[i][remainder]+roman_result
i=i+1
#after quotient become zere, our woek is done
return(roman_result)
int_to_roman(794)
def roman_to_int(roman_number):
result = 0
#defining each letter in Roman number as an int number in a dictionary
int_dict = {
'I':1,
'V':5,
'X':10,
'L':50,
'C':100,
'D':500,
'M':1000
}
#considering Roman number as a string
for i in range(0,len(roman_number)-1):
#because of substractive notation with 4,9,40,90,400,900 values, we should compare two letters wich are next te each other
if int_dict[roman_number[i]]>=int_dict[roman_number[i+1]]:
result=result + int_dict[roman_number[i]]
else:
result=result+int_dict[roman_number[i+1]]-int_dict[roman_number[i]]
return(result)
#call functions
print(roman_to_int('XXXIX'))
print(int_to_roman(roman_to_int('XXXIX')))
|
24,438 | ba628478a3698d7696db882f8412bb73b2be7bc0 | from collections import Counter
from itertools import accumulate
def part1(sequence):
return list(accumulate(sequence))[-1]
assert part1([+1, +1, +1]) == 3
assert part1([+1, +1, -2]) == 0
assert part1([-1, -2, -3]) == -6
def part2(sequence, start=0, c=None):
accum = list(accumulate([start] + sequence))
if c is None:
c = Counter(accum)
else:
c.update(accum[1:])
candidates = []
for key, val in c.most_common(None):
if val > 1:
candidates.append(key)
if len(candidates) == 0:
return part2(sequence, start=accum[-1], c=c)
else:
for elem in accum:
if elem in candidates:
return elem
assert False
assert part2([1, -1]) == 0
assert part2([+3, +3, +4, -2, -4]) == 10
assert part2([-6, +3, +8, +5, -6]) == 5
assert part2([+7, +7, -2, -7, -4]) == 14
input_file = open('day01.in')
sequence = [int(line) for line in input_file]
print("part1:", part1(sequence))
print("part2:", part2(sequence))
|
24,439 | afd1fd00bfef305c0acdef10ba5fb77b9167a545 | import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from rl_nexus.utils.ope_utils import choose_estimate_from_sequence
from rl_nexus.components.models.Simple_MLP.Simple_MLP import Simple_MLP
import pdb
dtype = torch.float
class Model_Core(nn.Sequential):
def __init__(self, input_dim, hidden_layers, activation):
layers = []
input_layer = nn.Linear(input_dim, hidden_layers[0])
if activation == 'tanh':
activation_layer = nn.Tanh()
elif activation == 'relu':
activation_layer = nn.ReLU()
layers.append(input_layer)
layers.append(activation_layer)
for i in range(len(hidden_layers)-1):
hidden = nn.Linear(hidden_layers[i], hidden_layers[i+1])
layers.append(hidden)
layers.append(activation_layer)
super(Model_Core, self).__init__(*layers)
class FQE():
def __init__(self, dataset, obs_dim, act_dim, gamma, horizon,
policy_net, hidden_layers, activation,
norm = 'std', use_delayed_target = False,
keep_terminal_states = True, debug = True):
self.obs_dim = obs_dim
self.act_dim = act_dim
self.gamma = gamma
self.horizon = horizon
self.norm = norm
self.policy_net = policy_net
self.hidden_layers = hidden_layers
self.use_delayed_target = use_delayed_target
self.n_episode = dataset['init_obs'].shape[0]
if keep_terminal_states:
self.included_idx = torch.arange(dataset['obs'].shape[0])
self.end_idx = np.arange(self.horizon-1, dataset['obs'].shape[0], self.horizon)
# self.absorbing_idx = np.where(dataset['info'][:,0] == True)[0]
self.absorbing_idx = np.array([])
else:
pass
self.n_samples = self.included_idx.shape[0]
self.non_absorbing_mask = torch.ones(self.n_samples, dtype=torch.bool)
self.non_absorbing_mask[self.absorbing_idx] = False
self.data_acts = torch.tensor(dataset['acts'], dtype=torch.long)[self.included_idx]
self.rews = torch.tensor(dataset['rews'], dtype=dtype)[self.included_idx]
if self.policy_net is not None:
raise NotImplementedError
else:
self.pi_current = torch.tensor(dataset['target_prob_obs'],dtype=dtype)[self.included_idx]
self.pi_next = torch.tensor(dataset['target_prob_next_obs'], dtype=dtype)[self.included_idx]
self.pi_init = torch.tensor(dataset['target_prob_init_obs'], dtype=dtype)
self.pi_term = torch.tensor(dataset['target_prob_term_obs'], dtype=dtype)
if self.norm == 'std':
raise NotImplementedError
else:
self.obs = torch.tensor(dataset['obs'], dtype = dtype)[self.included_idx]
self.next_obs = torch.tensor(dataset['next_obs'], dtype=dtype)[self.included_idx]
self.init_obs = torch.tensor(dataset['init_obs'], dtype=dtype)
self.term_obs = torch.tensor(dataset['term_obs'], dtype=dtype)
#* whiten the included observation data
obs_mean = torch.mean(self.obs, dim=0, keepdims= True)
obs_std = torch.std(self.obs, dim=0, keepdims= True)
self.obs = (self.obs - obs_mean) / obs_std
self.next_obs = (self.next_obs - obs_mean) / obs_std
self.init_obs = (self.init_obs - obs_mean) / obs_std
self.term_obs = (self.term_obs - obs_mean) / obs_std
if use_delayed_target:
self.q_net = Simple_MLP(input_dim = self.obs_dim, output_dim = self.act_dim, hidden_layers = hidden_layers,\
activation= activation, output_transform = None)
else:
self.q_net = Model_Core(input_dim = self.obs_dim, hidden_layers = hidden_layers, activation= activation)
self.debug = debug
def train(self, num_iter = 1000, lr = 1.0e-3, batch_size = 500, tail_average=10, reg = 1e-3):
if self.use_delayed_target:
value_est = self.train_delayed_target(num_iter, lr, batch_size, tail_average, reg)
else:
value_est = self.train_non_linear(num_iter, lr, batch_size, tail_average, reg)
return value_est
def train_delayed_target(self, num_iter = 1000, lr = 1.0e-3, batch_size = 500, tail_average=10,\
reg = 1e-3, use_separate_target_net=False):
optimizer_q = optim.Adam(self.q_net.parameters(), lr = lr, betas = (0.9,0.999), eps=1e-8, weight_decay = reg)
value_est_list = []
if not batch_size:
batch_size = self.n_samples #* use the whole batch if no batchsize declared
for i in range(num_iter):
# decayed_lr = lr / np.sqrt(i+1)
# for param_group in optimizer_q.param_groups:
# param_group['lr'] = decayed_lr
perm = torch.randperm(self.n_samples)
num_batches = self.n_samples // batch_size
for j in range(num_batches):
idx = perm[j*batch_size:(j+1)*batch_size]
obs = self.obs[idx]
acts = self.data_acts[idx]
next_obs = self.next_obs[idx]
pi_next = self.pi_next[idx]
rews = self.rews[idx]
non_absorbing_mask = self.non_absorbing_mask[idx]
state_action_values = self.q_net(obs).gather(1, acts)
next_state_values = torch.zeros(batch_size,1, dtype=dtype)
non_absorbing_next_states = next_obs[non_absorbing_mask]
if use_separate_target_net:
next_state_values[non_absorbing_mask] = \
(self.q_net_target(non_absorbing_next_states)*pi_next[non_absorbing_mask]).sum(dim=1, keepdims = True).detach()
else:
next_state_values[non_absorbing_mask] = \
(self.q_net(non_absorbing_next_states)*pi_next[non_absorbing_mask]).sum(dim=1, keepdims = True).detach()
expected_state_action_values = (next_state_values * self.gamma) + rews
# Huber loss
loss = F.smooth_l1_loss(state_action_values, expected_state_action_values)
optimizer_q.zero_grad()
loss.backward()
for param in self.q_net.model.parameters():
param.grad.data.clamp_(-1,1)
optimizer_q.step()
if use_separate_target_net:
self.q_net_target.load_state_dict(self.q_net.state_dict())
if i % 10 == 0:
q_s0 = self.q_net(self.init_obs)
q_s0_pi = (q_s0 * self.pi_init).sum()
q_sterm = self.q_net(self.term_obs)
q_sterm_pi = (q_sterm * self.pi_term).sum()
value_est = (q_s0_pi - self.gamma**self.horizon*q_sterm_pi) / self.n_episode
value_est_list.append(value_est.detach().numpy())
if i %10 == 0 and i>0 and self.debug:
print('\n')
print('iter {} Trailing estimate: '.format(i), np.mean(value_est_list[-10:]))
print('loss {}'.format(loss.detach().numpy()))
final_value_estimate = choose_estimate_from_sequence(value_est_list)
return final_value_estimate
def train_non_linear(self, num_iter = 1000, lr = 1.0e-3, batch_size = 500, tail_average=10, reg = 1e-3):
optimizer_q = optim.Adam(self.q_net.parameters(), lr = lr, betas = (0.0,0.99), eps=1e-8)
value_est_list = []
feature_dim = self.hidden_layers[-1]
I = torch.eye(self.act_dim*feature_dim)
min_loss = np.inf
if not batch_size:
batch_size = self.n_samples #* use the whole batch if no batchsize declared
# pdb.set_trace()
for i in range(num_iter):
decayed_lr = lr / np.sqrt(i+1)
for param_group in optimizer_q.param_groups:
param_group['lr'] = decayed_lr
perm = torch.randperm(self.n_samples)
num_batches = self.n_samples // batch_size
current_iter_loss = 0
for j in range(num_batches):
idx = perm[j*batch_size:(j+1)*batch_size]
obs = self.obs[idx]
acts = self.data_acts[idx]
next_obs = self.next_obs[idx]
pi_next = self.pi_next[idx]
rews = self.rews[idx]
non_absorbing_mask = self.non_absorbing_mask[idx]
#* extract the non-linear features
Z = self.q_net(obs)
Z_prime = self.q_net(next_obs)
#* solve for the last linear layer using least square regression
Phi = torch.zeros(batch_size, feature_dim*self.act_dim, dtype=dtype)
Phi_prime_pi = torch.zeros(batch_size, feature_dim*self.act_dim, dtype=dtype)
for a in range(self.act_dim):
act_idx = torch.where(acts == a)[0]
Phi[act_idx, a*feature_dim:(a+1)*feature_dim] = Z[act_idx]
Phi_prime_pi[:, a*feature_dim:(a+1)*feature_dim] = pi_next[:, a][:,None] * Z_prime
regularized_inverse = torch.inverse( torch.mm(Phi.T, Phi-self.gamma*Phi_prime_pi) + reg*I)
featurized_reward = torch.mm(Phi.T, rews)
linear_coeff = torch.mm(regularized_inverse, featurized_reward)
#* Now that we solve the linear layer, form the loss function
linear_layer = linear_coeff.view(-1, feature_dim).permute(1,0)
state_action_values = (Z @ linear_layer).gather(1, acts)
next_state_values = torch.zeros(batch_size,1, dtype=dtype)
next_state_values[non_absorbing_mask] = ((Z_prime @ linear_layer)*pi_next).sum(dim=1, keepdims = True)[non_absorbing_mask]
expected_state_action_values = (next_state_values * self.gamma) + rews
# Huber loss or MSE loss
loss = F.smooth_l1_loss(state_action_values, expected_state_action_values)
# loss = F.mse_loss(state_action_values, expected_state_action_values)
current_iter_loss += loss.clone().detach()
#* differentiate and update
optimizer_q.zero_grad()
loss.backward()
for param in self.q_net.parameters():
param.grad.data.clamp_(-1,1)
optimizer_q.step()
#* see if total loss is the best so far, if so, record the estimate
if current_iter_loss < min_loss:
min_loss = current_iter_loss
Z_init = self.q_net(self.init_obs)
Z_term = self.q_net(self.term_obs)
q_s0 = (Z_init @ linear_layer)
q_s0_pi = ( q_s0 * self.pi_init).sum()
q_sterm = (Z_term @ linear_layer)
q_sterm_pi = ( q_sterm * self.pi_term).sum()
value_est = (q_s0_pi - self.gamma**self.horizon * q_sterm_pi) / self.n_episode
value_est_list.append(value_est.detach().numpy())
max_grad = 0
max_q_weight = 0
if self.debug:
for param in self.q_net.parameters():
# param.grad.data.clamp_(-1,1)
if param.grad is not None:
max_grad = max(max_grad, param.grad.data.max())
max_q_weight = max(max_q_weight, param.data.max())
print('\n')
print('iter {} Trailing estimate: '.format(i), np.mean(value_est_list[-tail_average:]))
print('current estimate: ', value_est)
print('current loss: ', current_iter_loss.detach().numpy())
print('max linear weight:', linear_layer.max())
print('max q gradient:', max_grad)
print('max q weight:', max_q_weight)
if i % 100 == 0 and self.debug:
print('Current loss in iter {}: {:4f}'.format(i, current_iter_loss.numpy()))
return np.mean(value_est_list[-tail_average:])
|
24,440 | c1dc8cd71e3876225cdb6d1a05ca5dac812d09d9 | import os
import shutil
import requests
from flask import Flask
from flask_restful import Api, Resource, reqparse
from pymongo import MongoClient
app = Flask(__name__)
api = Api(app)
client = MongoClient()
db = client.datasets_db
# Bing API
subscription_key = 'b6f1f528a8d7468fa2baa1e3e29fbfba'
search_url = 'https://api.cognitive.microsoft.com/bing/v7.0/images/search'
headers = {'Ocp-Apim-Subscription-Key': subscription_key}
TRAIN_TEST_RATIO = 0.8 # Соотношение train и test выборок
ACCEPTABLE_SIZE_DIFFERENCE = 1024 # Байт
class Dataset(Resource):
def put(self):
"""
Поиск датасета по названию в Bing
Загрузка изображений и сохранение информации в MongoDB
"""
# Парсим запрос
parser = reqparse.RequestParser()
parser.add_argument('dataset_name', required=True)
parser.add_argument('dataset_size', required=True, type=int)
params = parser.parse_args()
dataset_name = params['dataset_name']
dataset_size = params['dataset_size']
# Если уже есть папка датасета - удаляем
if os.path.exists(dataset_name):
shutil.rmtree(dataset_name)
# Создаем папку датасета
while True:
try:
os.mkdir(dataset_name)
break
except PermissionError: # Win 10 permission error ¯\_(ツ)_/¯
pass
# Если уже есть коллекция в БД - удаляем
if dataset_name in db.list_collection_names():
db[dataset_name].drop()
completed = 0 # Количество успешно полученных изображений
offset = 0 # Пропустить первых n результатов
# Пока не будет получено необходимое количество изображений
while completed < dataset_size:
# Ищем в Bing
params = {'q': dataset_name, 'count': dataset_size - completed, 'offset': offset}
response = requests.get(search_url, headers=headers, params=params).json()
search_results = response['value']
# Если не нашлось необходимое количество изображений по запросу
if len(search_results) < dataset_size - completed:
if os.path.exists(dataset_name):
shutil.rmtree(dataset_name) # Удаляем папку датасета
if dataset_name in db.list_collection_names():
db[dataset_name].drop() # Удаляем коллекцию датасета в БД
return {'error': 'Can not find requested number of images!'}, 400
offset = response['nextOffset'] # Обновляем offset для следующего поиска
# Проходим по результатам поиска
for img in search_results:
# Пытаемся получить изображение
try:
response = requests.get(img['contentUrl'], stream=True, timeout=1)
response.raise_for_status() # raise HTTPError
path = dataset_name + '/' + str(completed) + '.' + img['encodingFormat']
# Загружаем изображение
with open(path, 'wb') as file:
response.raw.decode_content = True
shutil.copyfileobj(response.raw, file)
# Исключение если размер файла не совпадает с полученным от Bing
bing_size = int(img['contentSize'][:-2])
if abs(os.path.getsize(path) - bing_size) > ACCEPTABLE_SIZE_DIFFERENCE:
raise Exception('File size does not match!')
# Сохраняем в БД
document = {'url': img['contentUrl'],
'path': path}
db[dataset_name].insert_one(document)
# Если не было исключений
completed += 1
except Exception: # Если были - переходим к следующему результату поиска
pass
# Когда получено необходимое количество изображений
return {'info': 'Dataset collected'}, 201
def get(self):
"""
Просмотр сохраненного датасета
Возвращает train и test выборки (соотношение 80:20)
"""
# Парсим запрос
parser = reqparse.RequestParser()
parser.add_argument('dataset_name', required=True)
params = parser.parse_args()
dataset_name = params['dataset_name']
# Если такого датасета нет в БД
if dataset_name not in db.list_collection_names():
return {'error': 'Dataset not found!'}, 404
dataset_size = db[dataset_name].count_documents({})
train_size = round(dataset_size * TRAIN_TEST_RATIO)
# Получаем train и test выборки без поля _id
train = list(db[dataset_name].find({}, projection={'_id': False}, limit=train_size))
test = list(db[dataset_name].find({}, projection={'_id': False}, skip=train_size))
# Возвращаем train и test
return {'train': train, 'test': test}, 200
def delete(self):
"""
Удаление изображения из датасета по URL
"""
# Парсим запрос
parser = reqparse.RequestParser()
parser.add_argument('dataset_name', required=True)
parser.add_argument('url', required=True)
params = parser.parse_args()
dataset_name = params['dataset_name']
url = params['url']
# Если есть такой датасет
if dataset_name in db.list_collection_names():
# Если есть изображение с таким URL - удаляем
if db[dataset_name].find_one({'url': url}):
path = db[dataset_name].find_one({'url': url})['path']
os.remove(path)
db[dataset_name].delete_one({'url': url})
return {'info': 'Image deleted'}, 200
else:
return {'error': 'URL not found!'}, 404
else:
return {'error': 'Dataset not found!'}, 404
api.add_resource(Dataset, '/datasets/api')
if __name__ == '__main__':
app.run(debug=False)
|
24,441 | 1b55cf81143ff90e64e7ca999fe42a13c0700362 | #%%
import random
from numpy.random import shuffle
import numpy as np
import time
import math
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
#%% These functions are used for data preprocessing
def to_pairs(doc):
'''
split a loaded document into sentences
:param doc: wiktionary input line
:return: list of input --> output pairs
'''
pairs = [[line['word'], line['X_SAMPA']] for line in doc]
return pairs
# Train, dev, test splits
def cv_splits(pairs, ndev, ntest, seed):
'''
Create cross-validation splits
:param pairs: input data
:param ndev: number of observations in dev
:param ntest: number of observations in test
:param seed: seed to use for pseudo-random number generator
:return: (train, dev, test) data as tuple
'''
# Define train/dev/test set
n_train = len(pairs) - ntest - ndev
n_dev = len(pairs) - ntest
n_test = len(pairs)
# random shuffle
random.seed(seed)
shuffle(pairs)
# split into train/test
return pairs[:n_train], pairs[n_train:n_dev], pairs[n_dev: n_test]
# Filter out homophones from the dataset
def filter_homophone(pairs, dev_pronunciation_words, test_pronunciation_words):
'''
Filter homophones from the train data
:param pairs: input data
:param dev_pronunciation_words: pronunciations that occur in the dev set
:param test_pronunciation_words: pronunciations that occur in the test set
:return: (train, dev, test) splits for homophones
'''
# Subset data
dev = [pair for pair in pairs if pair[1] in dev_pronunciation_words]
tst = [pair for pair in pairs if pair[1] in test_pronunciation_words]
# Make word list that should be removed from the data
remove = dev_pronunciation_words + test_pronunciation_words
# Subset
trn = np.array([list(pair) for pair in pairs if pair[1] not in remove])
# Return
return ((trn, dev, tst))
|
24,442 | ad72d61ac406f9d8c7df53ff54207509cf7d09b0 | # Rotate
# https://code.google.com/codejam/contest/544101/dashboard#s=p0
def check_h(c, k, B):
for row in B:
for i in range(len(row) - k + 1):
if (row[i:i + k].count(c) == k):
return True
def check_d(c, k, B):
n = len(B)
for i in range(n - k + 1):
P = []
P2 = []
for j in range(n - 1 - i, -1, -1):
P.append(B[j + i][n - j - 1])
P2.append(B[n - j - 1 - i][j])
for z in range(len(P)):
if P[z:z + k].count(c) == k or P2[z:z + k].count(c) == k:
return True
def check_d2(c, k, B):
n = len(B)
for i in range(n - k + 1):
P = []
P2 = []
for j in range(n - i):
P.append(B[j][j + i])
P2.append(B[j + i][j])
for z in range(len(P)):
if P[z:z + k].count(c) == k or P2[z:z + k].count(c) == k:
return True
def check_win(c, k, B):
return check_h(c, k, B) or check_h(c, k, zip(*B)) \
or check_d(c, k, B) or check_d2(c, k, B)
def get_winner(r, b):
if r and b:
return 'Both'
elif not r and not b:
return 'Neither'
elif r:
return 'Red'
else:
return 'Blue'
for tc in range(int(input())):
n, k = map(int, input().split())
B = [input().strip() for _ in range(n)]
B2 = []
for row in B:
row2 = [c for c in row[::-1] if c != '.']
row2 += ['.'] * (n - len(row2))
B2.append(row2)
r = check_win('R', k, B2)
b = check_win('B', k, B2)
print("Case #{}: {}".format(tc + 1, get_winner(r, b)))
|
24,443 | 72a8e239652896d81941458c3aaaa5ec264f3fc6 | #!/usr/bin/env python
'''
Returns a string with no vowels
'''
def anti_vowel(text):
no_vowels = []
for c in text:
if c not in "aeiouAEIOU":
no_vowels.append(c)
return "".join(no_vowels)
astring = 'Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry'\
's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to'\
'make a type specimen book. It has survived not only five centuries, but also the leap into electronic typese'\
'tting, remaining essentially unchanged. It was popularised in the 1960s with the release of Letraset sheets c'\
'ontaining Lorem Ipsum passages, and more recently with desktop publishing software like Aldus PageMaker inclu'\
'ding versions of Lorem Ipsum'
print(anti_vowel(astring)) |
24,444 | 9b9f3411c36efcd7c019c3a76459b63e8c8e0696 | import pandas as pd
from moztelemetry import get_pings_properties
def windows_only(p):
return p["environment/system/os/name"] == "Windows_NT"
def e10s_enabled_only(p):
return p["environment/settings/e10sEnabled"]
def long_spinners_keyed_by_build_and_client(ping):
return (
(ping["application/buildId"][:8], ping["clientId"]),
(
ping["payload/histograms/FX_TAB_SWITCH_SPINNER_VISIBLE_LONG_MS"],
ping["payload/histograms/FX_TAB_SWITCH_SPINNER_VISIBLE_MS"],
),
)
def add_tuple_series(x, y):
long_x = x[0]
long_y = y[0]
short_x = x[1]
short_y = y[1]
if long_x is None:
long_x = pd.Series()
if long_y is None:
long_y = pd.Series()
if short_x is None:
short_x = pd.Series()
if short_y is None:
short_y = pd.Series()
return (long_x.add(long_y, fill_value=0.0), short_x.add(short_y, fill_value=0.0))
def bucket_by_long_severity_per_client(spinner_pair):
buildId = spinner_pair[0][0]
hist = spinner_pair[1][0]
named_index = [
"unaffected",
"0ms - 999ms",
"1000ms - 2296ms",
"2297ms - 5276ms",
"5277ms - 12123ms",
"12124ms - 27855ms",
"27856ms - 63999ms",
"64000ms+",
]
severity = pd.Series([0, 0, 0, 0, 0, 0, 0, 0], index=named_index)
if hist is None or hist.empty:
severity[named_index[0]] = 1
elif hist[hist.index >= 64000].sum() > 0:
severity[named_index[7]] = 1
elif hist[hist.index >= 27856].sum() > 0:
severity[named_index[6]] = 1
elif hist[hist.index >= 12124].sum() > 0:
severity[named_index[5]] = 1
elif hist[hist.index >= 5277].sum() > 0:
severity[named_index[4]] = 1
elif hist[hist.index >= 2297].sum() > 0:
severity[named_index[3]] = 1
elif hist[hist.index >= 1000].sum() > 0:
severity[named_index[2]] = 1
elif hist[hist.index >= 0].sum() > 0:
severity[named_index[1]] = 1
return (buildId, severity)
def bucket_by_short_severity_per_client(spinner_pair):
buildId = spinner_pair[0][0]
long_hist = spinner_pair[1][0]
hist = spinner_pair[1][1]
named_index = [
"unaffected",
"not short",
"0ms - 49ms",
"50ms - 99ms",
"100ms - 199ms",
"200ms - 399ms",
"400ms - 799ms",
"800ms+",
]
severity = pd.Series([0, 0, 0, 0, 0, 0, 0, 0], index=named_index)
if hist is None or hist.empty or long_hist is None or long_hist.empty:
severity[named_index[0]] = 1
elif long_hist[long_hist.index >= 1000].sum() > 0:
severity[named_index[1]] = 1
elif hist[hist.index >= 800].sum() > 0:
severity[named_index[7]] = 1
elif hist[hist.index >= 400].sum() > 0:
severity[named_index[6]] = 1
elif hist[hist.index >= 200].sum() > 0:
severity[named_index[5]] = 1
elif hist[hist.index >= 100].sum() > 0:
severity[named_index[4]] = 1
elif hist[hist.index >= 50].sum() > 0:
severity[named_index[3]] = 1
elif hist[hist.index >= 0].sum() > 0:
severity[named_index[2]] = 1
return (buildId, severity)
def to_percentages(build_severities):
severities = build_severities[1]
total_clients = severities.sum()
if total_clients > 0:
return (build_severities[0], severities / total_clients)
def collect_aggregated_spinners(rdd, map_func):
collected_percentages = (
rdd.map(map_func)
.reduceByKey(lambda x, y: x + y)
.repartition(200)
.map(to_percentages)
.collect()
)
return sorted(collected_percentages, key=lambda result: result[0])
def get_short_and_long_spinners(pings):
properties = [
"clientId",
"payload/histograms/FX_TAB_SWITCH_SPINNER_VISIBLE_LONG_MS",
"payload/histograms/FX_TAB_SWITCH_SPINNER_VISIBLE_MS",
"environment/system/os/name",
"application/buildId",
"environment/settings/e10sEnabled",
]
ping_props = get_pings_properties(pings, properties)
windows_pings_only = ping_props.filter(windows_only)
e10s_enabled_on_windows_pings_only = windows_pings_only.filter(e10s_enabled_only)
grouped_spinners = (
e10s_enabled_on_windows_pings_only.repartition(200)
.map(long_spinners_keyed_by_build_and_client)
.reduceByKey(add_tuple_series)
)
final_result_long = collect_aggregated_spinners(
grouped_spinners, bucket_by_long_severity_per_client
)
final_result_short = collect_aggregated_spinners(
grouped_spinners, bucket_by_short_severity_per_client
)
if round(final_result_short[0][1][2:].sum(), 3) == round(
final_result_long[0][1][1], 3
):
print("Short and long counts match")
else:
print("Error: Short and long counts do not match")
return {"long": final_result_long, "short": final_result_short}
|
24,445 | a4088ac3e4bd721b3b0dd8964f3bd88e8aed5f0c | from functools import partial
from pathlib import Path
import ffmpeg
from choirless_lib import create_signed_url
def main(args):
notification = args.get('notification', {})
key = args.get('key', notification.get('object_name', ''))
src_bucket = args['bucket1']
dst_bucket = args['bucket2']
geo = args['geo']
host = args.get('endpoint', args.get('ENDPOINT'))
cos_hmac_keys = args['__bx_creds']['cloud-object-storage']['cos_hmac_keys']
cos_api_key = cos_hmac_keys['access_key_id']
cos_api_secret = cos_hmac_keys['secret_access_key']
get_input_url = partial(create_signed_url,
host,
'GET',
cos_api_key,
cos_api_secret,
geo,
src_bucket)
get_output_url = partial(create_signed_url,
host,
'PUT',
cos_api_key,
cos_api_secret,
geo,
dst_bucket)
output_key = str(Path(key).with_suffix('.wav'))
stream = ffmpeg.input(get_input_url(key),
seekable=0)
audio = stream.audio
audio = audio.filter('volumedetect')
pipeline = ffmpeg.output(audio,
get_output_url(output_key),
format='wav',
method='PUT',
seekable=0,
)
cmd = pipeline.compile()
print("ffmpeg command to run: ", cmd)
pipeline.run()
return {'status': 'ok'}
|
24,446 | 88a525f78b6cd83fa835b1b778d895b2128efb1c | # -*- coding: utf-8 -*-
"""Test for :mod:`sirepo.template.sdds_util`
:copyright: Copyright (c) 2023 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
def test_sdds_lineplot():
from pykern.pkcollections import PKDict
from pykern import pkunit, pkjson
from pykern.pkunit import pkeq
from sirepo.template.sdds_util import SDDSUtil
def _format_plot(plot, sdds_units):
return plot.col_name
# reduced original elegant output file size using sddsprocess:
# sddsprocess B1.output_file.sdds -delete=columns,GammaDeriv,DeltaGammaT1,DeltaGammaT2 -delete=parameters,* -sparse=10 csrcsbend.sdds
files = ("csrcsbend.sdds",)
with pkunit.save_chdir_work() as d:
for f in files:
actual = SDDSUtil(str(pkunit.data_dir().join(f))).lineplot(
PKDict(
model=PKDict(
x="s",
y1="LinearDensity",
y2="LinearDensityDeriv",
y3="DeltaGamma",
),
format_plot=_format_plot,
)
)
out = f"{f}.json"
pkjson.dump_pretty(actual, out)
expect = pkjson.load_any(pkunit.data_dir().join(out))
pkeq(expect, actual)
|
24,447 | 306297d7c18dd974ed85c092566063d57a16a5d6 | from django.urls import path
from members.views import (
register_page,
login_page,
logout_page,
dashboard_page,
profile_page,
home_page,
delete_photo_profile, upload_photo_profile
)
app_name = 'members'
urlpatterns = [
path('register/', register_page, name='register'),
path('login/', login_page, name='login'),
path('logout/', logout_page, name='logout'),
path('dashboard/', dashboard_page, name='dashboard'),
path('profile/<username>', profile_page, name='profile'),
path('home', home_page, name='home'),
path('dashboard/upload/', upload_photo_profile, name='upload_photo_profile'),
path('dashboard/delete/', delete_photo_profile, name='delete_photo_profile'),
] |
24,448 | 07ea40abf9df2068f06109ca82c32c06fd633d9a | # mysite/asgi.py
from channels.routing import get_default_application
import os
import django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.base")
django.setup()
application = get_default_application()
|
24,449 | 1481955633636a26fd758bde0187f8f9aea50417 | from flask import Flask
from flask_sslify import SSLify
from flask import request
from flask import jsonify
import misc
import requests
import json
app = Flask(__name__)
ssllify = SSLify(app)
URL = 'https://api.telegram.org/bot' + misc.token
def write_json(date, filename='answer.json'):
with open(filename, 'w') as f:
json.dump(date, f, indent=2, ensure_ascii=False)
def send_message(chat_id, text='Hello!'):
url = URL + 'sendMessage'
answer = {'chat_id': chat_id, 'text': text}
r = requests.post(url, json=answer)
return r.json()
# def get_update():
# url = URL + 'getUpdates'
# r = requests.get(url)
# #
# return r.json()
@app.route('/', methods=['POST', 'GET'])
def index():
if request.method == 'POST':
r = request.get_json()
chat_id = r['message']['chat']['id']
message = r['message']['text']
if 'hello' in message:
send_message(chat_id, text='Hello Дорогуша :)')
#write_json(r)
return jsonify(r)
return "<H1>Test Bot</H1>"
# def main():
# r = get_update()
# chat_id = r['result'][-1]['message']['chat']['id']
# send_message(chat_id)
# pass
if __name__ == '__main__':
app.run()
|
24,450 | 393a3db50741c0e8b3686339ee0dd7bb8d5c026f | ###exercicio 56
mediai = 0
homemvelho = ''
ihomemvelho = 0
fmenor = 0
for p in range (1,5):
print ("----- {}° Pessoa -----".format(p))
nomes = str(input('Qual seu nome: ')).strip()
idade = int(input('Qual sua idade: '))
sexo = str(input('Sexo [M/F]: ')).strip()
mediai = mediai + idade
if p == 1 and sexo in 'Mm':
homemvelho = nomes
ihomemvelho = idade
if sexo in 'Mm' and idade > ihomemvelho:
homemvelho = nomes
ihomemvelho = idade
if sexo in 'Ff' and idade < 20:
fmenor += 1
print ('A media de idade do grupo é {}'.format(mediai/4))
print ('O homem mais velho tem {} anos e se chama {}'.format(ihomemvelho, homemvelho))
print ('No grupo tem {} mulheres com menos de 20 anos'.format(fmenor)) |
24,451 | 2458bd10820179534d5d1799a8f740ad985c965e | #!/bin/python3
import sys
n = int(input().strip())
c = [int(c_temp) for c_temp in input().strip().split(' ')]
d = set(c)
count = 0
for i in range(len(d)):
x = d.pop()
while c.count(x) >= 2:
for i in range(2):
c.remove(x)
count += 1
print(count)
|
24,452 | c5510cc4128f4e644e5482340dfc687e17af4d97 | import unittest
data = """uxcplgxnkwbdwhrp
suerykeptdsutidb
dmrtgdkaimrrwmej
ztxhjwllrckhakut
gdnzurjbbwmgayrg
gjdzbtrcxwprtery
fbuqqaatackrvemm
pcjhsshoveaodyko
lrpprussbesniilv
mmsebhtqqjiqrusd
vumllmrrdjgktmnb
ptsqjcfbmgwdywgi
mmppavyjgcfebgpl
zexyxksqrqyonhui
npulalteaztqqnrl
mscqpccetkktaknl
ydssjjlfejdxrztr
jdygsbqimbxljuue
ortsthjkmlonvgci
jfjhsbxeorhgmstc
vdrqdpojfuubjbbg
xxxddetvrlpzsfpq
zpjxvrmaorjpwegy
laxrlkntrukjcswz
pbqoungonelthcke
niexeyzvrtrlgfzw
zuetendekblknqng
lyazavyoweyuvfye
tegbldtkagfwlerf
xckozymymezzarpy
ehydpjavmncegzfn
jlnespnckgwmkkry
bfyetscttekoodio
bnokwopzvsozsbmj
qpqjhzdbuhrxsipy
vveroinquypehnnk
ykjtxscefztrmnen
vxlbxagsmsuuchod
punnnfyyufkpqilx
zibnnszmrmtissww
cxoaaphylmlyljjz
zpcmkcftuuesvsqw
wcqeqynmbbarahtz
kspontxsclmbkequ
jeomqzucrjxtypwl
ixynwoxupzybroij
ionndmdwpofvjnnq
tycxecjvaxyovrvu
uxdapggxzmbwrity
csskdqivjcdsnhpe
otflgdbzevmzkxzx
verykrivwbrmocta
ccbdeemfnmtputjw
suyuuthfhlysdmhr
aigzoaozaginuxcm
ycxfnrjnrcubbmzs
fgbqhrypnrpiizyy
taoxrnwdhsehywze
echfzdbnphlwjlew
jhmomnrbfaawicda
fywndkvhbzxxaihx
aftuyacfkdzzzpem
yytzxsvwztlcljvb
iblbjiotoabgnvld
kvpwzvwrsmvtdxcx
ardgckwkftcefunk
oqtivsqhcgrcmbbd
wkaieqxdoajyvaso
rkemicdsrtxsydvl
sobljmgiahyqbirc
pbhvtrxajxisuivj
ggqywcbfckburdrr
gmegczjawxtsywwq
kgjhlwyonwhojyvq
bpqlmxtarjthtjpn
pxfnnuyacdxyfclr
isdbibbtrqdfuopn
vucsgcviofwtdjcg
ywehopujowckggkg
mzogxlhldvxytsgl
mllyabngqmzfcubp
uwvmejelibobdbug
brebtoppnwawcmxa
fcftkhghbnznafie
sqiizvgijmddvxxz
qzvvjaonnxszeuar
abekxzbqttczywvy
bkldqqioyhrgzgjs
lilslxsibyunueff
ktxxltqgfrnscxnx
iwdqtlipxoubonrg
twncehkxkhouoctj
bdwlmbahtqtkduxz
smbzkuoikcyiulxq
bjmsdkqcmnidxjsr
icbrswapzdlzdanh
eyszxnhbjziiplgn
pdxhrkcbhzqditwb
nfulnpvtzimbzsze
glayzfymwffmlwhk
bejxesxdnwdlpeup
ukssntwuqvhmsgwj
hoccqxlxuuoomwyc
rapztrdfxrosxcig
cxowzhgmzerttdfq
yzhcurqhdxhmolak
kqgulndpxbwxesxi
yjkgcvtytkitvxiu
xnhfqhnnaceaqyue
qkuqreghngfndifr
xesxgeaucmhswnex
occbvembjeuthryi
dmefxmxqjncirdwj
ystmvxklmcdlsvin
pplykqlxmkdrmydq
cbbjkpbdvjhkxnuc
embhffzsciklnxrz
asrsxtvsdnuhcnco
xcbcrtcnzqedktpi
mglwujflcnixbkvn
mnurwhkzynhahbjp
cekjbablkjehixtj
kbkcmjhhipcjcwru
usifwcsfknoviasj
rsfgocseyeflqhku
prgcyqrickecxlhm
asbawplieizkavmq
sylnsirtrxgrcono
nzspjfovbtfkloya
qfxmsprfytvaxgtr
yckpentqodgzngnv
ycsfscegcexcnbwq
kbmltycafudieyuh
tpahmvkftilypxuf
qivqozjrmguypuxu
gdhbfradjuidunbk
vxqevjncsqqnhmkl
rpricegggcfeihst
xucvzpprwtdpzifq
egyjcyyrrdnyhxoo
kfbrzmbtrrwyeofp
qpjdsocrtwzpjdkd
reboldkprsgmmbit
vwkrzqvvhqkensuy
ydvmssepskzzvfdp
vqbigplejygdijuu
mzpgnahrhxgjriqm
uiejixjadpfsxqcv
tosatnvnfjkqiaha
yipuojpxfqnltclx
pcxwvgcghfpptjlf
shrudjvvapohziaj
jdckfjdtjsszdzhj
hgisfhcbdgvxuilk
gytnfjmrfujnmnpp
ohflkgffnxmpwrrs
jzxajbkwwjknasjh
xrcxfollmejrislv
djjlwykouhyfukob
rittommltkbtsequ
lpbvkxdcnlikwcxm
vkcrjmcifhwgfpdj
dkhjqwtggdrmcslq
swnohthfvjvoasvt
yrzoksmcnsagatii
duommjnueqmdxftp
inlvzlppdlgfmvmx
xibilzssabuqihtq
inkmwnvrkootrged
ldfianvyugqtemax
gbvwtiexcuvtngti
temjkvgnwxrhdidc
askbbywyyykerghp
onezejkuwmrqdkfr
kybekxtgartuurbq
ubzjotlasrewbbkl
stueymlsovqgmwkh
lhduseycrewwponi
yohdmucunrgemqcu
onnfbxcuhbuifbyc
odrjkigbrsojlqbt
imqkqqlkgmttpxtx
sxmlkspqoluidnxw
akaauujpxhnccleb
xvgpghhdtpgvefnk
jdxeqxzsbqtvgvcq
mdusenpygmerxnni
agihtqvgkmgcbtaw
dovxcywlyvspixad
uulgazeyvgtxqkfz
ndhmvrwuflhktzyo
hcaqkmrbvozaanvm
tvfozbqavqxdqwqv
rlkpycdzopitfbsv
dmyjtmjbtnvnedhs
fmwmqeigbzrxjvdu
twgookcelrjmczqi
grxosmxvzgymjdtz
zsstljhzugqybueo
jpeapxlytnycekbd
iasykpefrwxrlvxl
azohkkqybcnsddus
aoaekngakjsgsonx
awsqaoswqejanotc
sgdxmketnjmjxxcp
ylnyuloaukdrhwuy
ewoqjmakifbefdib
ytjfubnexoxuevbp
ewlreawvddptezdd
vmkonztwnfgssdog
ahbpuqygcwmudyxn
kmahpxfjximorkrh
otjbexwssgpnpccn
aewskyipyztvskkl
urqmlaiqyfqpizje
nrfrbedthzymfgfa
vndwwrjrwzoltfgi
iiewevdzbortcwwe
qiblninjkrkhzxgi
xmvaxqruyzesifuu
yewuzizdaucycsko
hmasezegrhycbucy
dwpjrmkhsmnecill
hnffpbodtxprlhss
avmrgrwahpsvzuhm
nksvvaswujiukzxk
zzzapwhtffilxphu
vwegwyjkbzsrtnol
qurpszehmkfqwaok
iknoqtovqowthpno
brlmpjviuiagymek
efxebhputzeulthq
mzkquarxlhlvvost
xsigcagzqbhwwgps
qufztljyzjxgahdp
dlfkavnhobssfxvx
hgdpcgqxjegnhjlr
fboomzcvvqudjfbi
wnjuuiivaxynqhrd
nhcgzmpujgwisguw
wjeiacxuymuhykgk
qmeebvxijcgdlzpf
nmmnxsehhgsgoich
ejluaraxythbqfkl
mdbsbwnaypvlatcj
nnfshfibmvfqrbka
dvckdmihzamgqpxr
foztgqrjbwyxvewk
okpryqcbvorcxhoh
fpiwsndulvtthctx
zrbiovlmzdmibsiq
setwafbnnzcftutg
nyvqghxhgkxfobdm
enpvqadzarauhajl
twblhpvkazpdmhmr
lbhlllsgswvhdesh
tdfwkgxnqjxcvsuo
lnvyjjbwycjbvrrb
jsxqdvmzaydbwekg
xirbcbvwlcptuvoa
hwnukxenilatlfsk
khwopjqkxprgopmd
sljzdoviweameskw
stkrdmxmpaijximn
fdilorryzhmeqwkc
mfchaaialgvoozra
gjxhoxeqgkbknmze
beowovcoqnginrno
mkgmsgwkwhizunxo
phnhfusyoylvjdou
csehdlcmwepcpzmq
pgojomirzntgzohj
fkffgyfsvwqhmboz
mrvduasiytbzfwdn
epzrmsifpmfaewng
ooqxnoyqrlozbbyf
ahcxfmgtedywrbnx
ibqktvqmgnirqjot
xarssauvofdiaefn
xradvurskwbfzrnw
nxklmulddqcmewad
twichytatzoggchg
qmgvroqwrjgcycyv
yvezgulgrtgvyjjm
jgmcklzjdmznmuqk
bytajdwwconasjzt
apjttucpycyghqhu
flfejjzihodwtyup
gmrtrwyewucyqotv
nlohdrlymbkoenyl
wxcmqwbrwgtmkyfe
njtzlceyevmisxfn
htbbidsfbbshmzlt
gxhjeypjwghnrbsf
cifcwnbtazronikv
ezvjijcjcyszwdjy
srffeyrvyetbecmc
xpjefrtatrlkbkzl
yhncvfqjcyhsxhbb
pqhcufzlcezhihpr
qtdsfvxfqmsnzisp
dfonzdicxxhzxkrx
mqqqzhxkyfpofzty
dodjadoqyxsuazxt
jjwkrlquazzjbvlm
ttosfloajukoytfb
llateudmzxrzbqph
criqihrysgesmpsx
npszvlittbcxxknj
qmzojrvraitrktil
cfyoozzpwxwkwoto
daxohtcgvtktggfw
vthkpkoxmiuotjaj
pkfkyobvzjeecnui
ojcjiqrfltbhcdze
scbivhpvjkjbauun
ysowvwtzmqpjfwyp
laeplxlunwkfeaou
jufhcikovykwjhsa
xrucychehzksoitr
pyaulaltjkktlfkq
oypfrblfdhwvqxcv
zybrgxixvhchgzcf
puoagefcmlxelvlp
xjnhfdrsbhszfsso
ocgvzryoydaoracw
bxpnqllmptkpeena
pziyeihxlxbbgdio
bvtrhtlbfzmglsfc
ggpuvtseebylsrfk
pukenexjqecnivfj
jswabfbzpnhhdbpn
enojrtwqpfziyqsv
rjtmxudgcudefuiz
iqmjxynvtvdacffc
uheywxlsusklitvl
kwhxduejafdpmqdc
rspgblenbqlmcltn
rczhurnrqqgjutox
dqhytibjzxkdblzl
hpbieadydiycvfys
pucztfoqvenxiuym
nqpfzgpblwijiprf
ltgseeblgajbvltk
mwxukbsnapewhfrc
dvxluiflicdtnxix
pexfbpgnqiqymxcq
dakudfjjwtpxuzxy
letlceyzlgmnrewu
ojktahbsdifdfhmd
anezoybbghjudbih
sawxtlvzysaqkbbf
ttnkctcevpjiwqua
edrwrdvbaoqraejd
wnbfilvuienjxlcr
wqhzwvyybyxhhtsm
jxbgvyaqczwdlxfo
wbypqfmbwrsvfmdv
izdxjyfpidehbets
vbxbggqseurknjor
egpmpoxickhvwdlz
ivfrzklvpwoemxsy
xkziseheibmrpdww
xnrmtoihaudozksa
efemdmbxdsaymlrw
yjdjeckmsrckaagx
vlftqxxcburxnohv
fwyquwgajaxebduj
dwpmqvcxqwwnfkkr
isduxxjfsluuvwga
avdtdppodpntojgf
vrcoekdnutbnlgqk
kbhboxjmgomizxkl
cgsfpjrmewexgzfy
usdtnhjxbvtnafvp
bjoddgxbuxzhnsqd
hoyqdzofddedevsb
rwiwbvqfjajotaoj
iabomphsuyfptoos
bubeonwbukprpvhy
xurgunofmluhisxm
puyojzdvhktawkua
dbvqhztzdsncrxkb
oaeclqzyshuuryvm
nmgwfssnflxvcupr
vjkiwbpunkahtsrw
romyflhrarxchmyo
yecssfmetezchwjc
qwtocacqdslhozkd
mesexvfbtypblmam
mtjucgtjesjppdtt
pvodhqqoeecjsvwi
vvlcwignechiqvxj
wiqmzmmjgjajwgov
kwneobiiaixhclev
lkdeglzrrxuomsyt
oqovuwcpwbghurva
lfsdcxsasmuarwwg
awkbafhswnfbhvck
sztxlnmyvqsiwljg
hozxgyxbcxjzedvs
oifkqgfqmflxvyzn
mfvnehsajlofepib
delgbyfhsyhmyrfa
uenimmwriihxoydv
vjqutpilsztquutn
kfebsaixycrodhvl
coifyqfwzlovrpaj
xiyvdxtkqhcqfsqr
hoidcbzsauirpkyt
fiumhfaazfkbaglq
fzwdormfbtkdjgfm
faxqrortjdeihjfv
ljhaszjklhkjvrfi
pzrxsffkuockoqyl
immbtokjmwyrktzn
lzgjhyiywwnuxpfx
vhkocmwzkfwjuzog
ghntjkszahmdzfbl
gbcthxesvqbmzggy
oyttamhpquflojkh
nbscpfjwzylkfbtv
wnumxzqbltvxtbzs
jfhobjxionolnouc
nrtxxmvqjhasigvm
hweodfomsnlgaxnj
lfgehftptlfyvvaj
ccoueqkocrdgwlvy
euhgvirhsaotuhgf
pdlsanvgitjvedhd
seokvlbhrfhswanv
pntdqaturewqczti
jkktayepxcifyurj
dhzzbiaisozqhown
wehtwakcmqwczpbu
zwvozvspqmuckkcd
efucjlrwxuhmjubr
lzodaxuyntrnxwvp
qdezfvpyowfpmtwd
mizijorwrkanesva
txmitbiqoiryxhpz
xhsqgobpouwnlvps
muixgprsknlqaele
disgutskxwplodra
bmztllsugzsqefrm
ymwznyowpaaefkhm
ebfifzloswvoagqh
pkldomvvklefcicw
ziqzbbfunmcgrbtq
iuekfpbkraiwqkic
jflgjidirjapcuqo
achsfbroyrnqnecg
udbhouhlgjjzapzr
arerrohyhhkmwhyo
txyjzkqexgvzdtow
ogzrjwibvzoucrpg
rfdftaesxdnghwhd
axdhwmpuxelmpabo
gtktemowbsvognac
wkfuclilhqjzxztk
qbwjouutzegaxhrz
opfziwqqbwhzzqhj
pvcvcsupfwsmeacs
xsbohvbguzsgpawn
sczoefukwywxriwj
oqkhcqfdeaifbqoc
vtsrholxbjkhwoln
yuvapljnwbssfbhi
dxdfwccqvyzeszyl
gdbmjtonbiugitmb
qunirtqbubxalmxr
zzxsirhdaippnopr
fibtndkqjfechbmq
gqgqyjvqmfiwiyio
ihwsfkwhtzuydlzw
eygyuffeyrbbhlit
zdlsaweqomzrhdyy
ptbgfzuvxiuuxyds
llxlfdquvovzuqva
wfrltggyztqtyljv
kwipfevnbralidbm
gbhqfbrvuseellbx
obkbuualrzrakknv
hlradjrwyjgfqugu
vtqlxbyiaiorzdsp
tedcbqoxsmbfjeyy
cxdppfvklbdayghy
gjnofexywmdtgeft
ldzeimbbjmgpgeax
egrwsmshbvbawvja
vadfrjvcrdlonrkg
mojorplakzfmzvtp
jyurlsoxhubferpo
ijwqogivvzpbegkm
cnmetoionfxlutzg
lawigelyhegqtyil
mqosapvnduocctcd
eqncubmywvxgpfld
vigfretuzppxkrfy
ncwynsziydoflllq
cbllqinsipfknabg
ndtbvdivzlnafziq
iqrrzgzntjquzlrs
damkuheynobqvusp
jxctymifsqilyoxa
ylritbpusymysmrf
paoqcuihyooaghfu
obhpkdaibwixeepl
igrmhawvctyfjfhd
ybekishyztlahopt
vkbniafnlfqhhsrq
kltdigxmbhazrywf
ufhcoyvvxqzeixpr
klcxdcoglwmeynjt
funpjuvfbzcgdhgs
akgyvyfzcpmepiuc
zhlkgvhmjhwrfmua
ibsowtbnrsnxexuz
vpufbqilksypwlrn
ngrintxhusvdkfib
ziuwswlbrxcxqslw
sucledgxruugrnic
zwnsfsyotmlpinew
oaekskxfcwwuzkor
qjmqwaktpzhwfldu
tmgfgqgpxaryktxo
qfaizepgauqxvffk
addkqofusrstpamf
shdnwnnderkemcts
gwfygbsugzptvena
fpziernelahopdsj
bkkrqbsjvyjtqfax
gxrljlqwxghbgjox
ipfwnqaskupkmevm
nnyoyhnqyfydqpno
lgzltbrrzeqqtydq
fgzxqurhtdfucheb
jvpthtudlsoivdwj
bmlhymalgvehvxys
fhklibetnvghlgnp
hfcyhptxzvblvlst
donanindroexgrha
oqawfmslbgjqimzx
jzgehjfjukizosep
bhlgamcjqijpvipb
jrcrdjrvsyxzidsk
ouwfwwjqezkofqck
wrvsbnkhyzayialf
knhivfqjxrxnafdl
hbxbgqsqwzijlngf
qlffukpfmnxpfiyq
evhxlouocemdkwgk
baxhdrmhaukpmatw
nwlyytsvreqaminp
ljsjjzmlsilvxgal
onunatwxfzwlmgpk
njgolfwndqnwdqde
ngdgcjzxupkzzbqi
ieawycvvmvftbikq
ccyvnexuvczvtrit
enndfwjpwjyasjvv
tcihprzwzftaioqu
bkztdkbrxfvfeddu
qkvhtltdrmryzdco
rurtxgibkeaibofs
mjxypgscrqiglzbp
unpkojewduprmymd
csqtkhjxpbzbnqog
mednhjgbwzlhmufi
sfrwfazygygzirwd
ijqeupbrhhpqxota
cmhpncanwudyysyh
wwcxbwzrplfzrwxd
jriomldifuobjpmq
radonyagpulnnyee
ryqjwxsspbbhnptd
yeoqpnsdhludlmzf
qsqlkeetyalenueh
qnnedenwsjdrcrzt
lejkuhsllxbhfcrx
anddbvllrrqefvke
wdtljquijaksvdsv
adslgvfuqqdkzvbc
whbccefjpcnjwhaq
kqrfuankaibohqsg
fyxisfwihvylgnfd
rwqdrddghyqudcif
syhzowthaaiiouaf
zjmrtgrnohxmtidu
deecwkfvjffxrzge
dztmvolqxkhdscxe
cdghcrgavygojhqn
pepqmdbjhnbugqeu
pnumdjpnddbxhieg
jzfhxeyahiagizfw
hdkwugrhcniueyor
gmgudeqlbmqynflu
toidiotdmfkxbzvm
pyymuoevoezlfkjb
etrbwuafvteqynlr
usvytbytsecnmqtd
dfmlizboawrhmvim
vrbtuxvzzefedlvs
vslcwudvasvxbnje
xdxyvoxaubtwjoif
mduhzhascirittdf
cqoqdhdxgvvvxamk
dshnfwhqjbhuznqr
zimthfxbdmkulkjg
luylgfmmwbptyzpj
iujpcgogshhotqrc
caqcyzqcumfljvsp
sprtitjlbfpygxya
fnconnrtnigkpykt
irmqaqzjexdtnaph
bbqrtoblmltvwome
ozjkzjfgnkhafbye
hwljjxpxziqbojlw
zahvyqyoqnqjlieb
dptshrgpbgusyqsc
uzlbnrwetkbkjnlm
yccaifzmvbvwxlcc
wilnbebdshcrrnuu
evxnoebteifbffuq
khbajekbyldddzfo
kjivdcafcyvnkojr
wtskbixasmakxxnv
uzmivodqzqupqkwx
rxexcbwhiywwwwnu
rowcapqaxjzcxwqi
fkeytjyipaxwcbqn
pyfbntonlrunkgvq
qiijveatlnplaifi
ltnhlialynlafknw
urrhfpxmpjwotvdn
xklumhfyehnqssys
civrvydypynjdoap
fvbmxnfogscbbnyd
oznavyflpzzucuvg
iyshrpypfbirahqo
qmzbfgelvpxvqecy
xkkxaufomsjbofmk
irlouftdmpitwvlq
csjoptbdorqxhnjg
bkryeshfsaqpdztm
guxbdqzfafsjoadl
tgrltexgrzatzwxf
cwsgsijqdanubxad
xafnexgturwrzyrg
apcrsqdbsbaxocxr
pspgxnzcevmvvejk
szephmeegvegugdt
ndjsoloeacasxjap
bdnfksliscnirjfu
ehglacmzpcgglpux
jwweijomqfcupvzw
yesblmmkqhbazmdu
sjsmalypmuslzgac
fkiqatyttlnuhdho
tlhnyuzdocvfdihq
ngehtjmycevnybga
obxodzcdgtrycgry
stkyrvdfbwovawmk
bdkhqcfrqaxhxloo
gpvumnuoiozipnrk
jbhanddinpqhxeol
hwkzkmbmsrvunzit
rfuomegkxbyamjpw
yzbljuksletipzwm
eafedkagwitzqigl
prenqvsbotqckgwy
spedpbwzphdrfxfz
cmsuqwemhwixkxet
xgdyeqbqfldvaccq
eooxgsrfsbdaolja
kyhqylxooewrhkho
mswieugqpoefmspt
uszoqundysdyeqlc
hkmjdggxefdyykbq
dtuhjnlaliodtlvh
oalbueqbhpxoxvvx
oowxtxsoqdwhzbya
lclajfsrpmtwvzkm
fxmjufpqtpyazeqo
ozlmreegxhfwwwmf
mqzrajxtxbaemrho
nfglecsyqduhakjr
nkxqtmasjjkpkqbp
jjfonbqimybvzeus
vjqkhkhjlmvpwkud
wxxhnvfhetsamzjr
pladhajujzttgmsw
dbycgxeymodsdlhm
qxszeuaahuoxjvwu
adultomodzrljxve
dmhgrbhvvpxyzwdn
slohrlwxerpahtyp
mngbocwyqrsrrxdb
facyrtflgowfvfui
hyvazpjucgghmmxh
twtrvjtncmewcxit
uejkrpvilgccfpfr
psqvolfagjfvqkum
nvzolslmiyavugpp
lpjfutvtwbddtqiu
fkjnfcdorlugmcha
eaplrvdckbcqqvhq
xrcydhkockycburw
iswmarpwcazimqxn
kicnnkjdppitjwrl
vwywaekzxtmeqrsu
dxlgesstmqaxtjta
pmeljgpkykcbujbb
vhpknqzhgnkyeosz
jprqitpjbxkqqzmz
fiprxgsqdfymyzdl
dzvfwvhfjqqsifga
aeakhfalplltmgui
frqrchzvenhozzsu
hsvikeyewfhsdbmy
puedjjhvxayiwgvg
zmsonnclfovjoewb
bnirelcaetdyaumi
szvudroxhcitatvf
sccfweuyadvrjpys
yiouqrnjzsdwyhwa
xyjhkqbnfmjjdefz
fjwgemkfvettucvg
aapqpwapzyjnusnr
dytxpkvgmapdamtc
hgocpfoxlheqpumw
twzuiewwxwadkegg
qdbosnhyqmyollqy
fclbrlkowkzzitod
sgxnrrpwhtkjdjth
xckvsnkvnvupmirv
nioicfeudrjzgoas
lcemtyohztpurwtf
oyjxhhbswvzekiqn
idkblbyjrohxybob
rthvloudwmktwlwh
oyzhmirzrnoytaty
ysdfhuyenpktwtks
wxfisawdtbpsmwli
vgmypwlezbmzeduk
rpepcfpelvhzzxzj
zxbovsmixfvmamnj
cpkabmaahbnlrhiz
jvomcbqeoqrmynjj
iqdeisnegnkrkdws
ilhemlrtxdsdnirr
fjimtscrwbfuwmpo
lmfiylebtzwtztmx
ddouhysvomrkcpgu
xtjwvzdhgnwwauwi
cntzuwcumbsebwyy
hieqvdlvnxkygeda
hushfszxskjdrjxi
xvdfzqblccfoxvyq
nldnrtieteunyxnb
vszpidfocenlhzqb
ofcuvtwhortxesoq
bwniqemqwxlejcfq
wkqiwdjnytjnomps
rbadoommlmrictte
nsmxhpothlulxivt
bvzbfcvenskqxejr
sdqeczmzpqqtqabq
bjveyzniaaliatkw
zxsqlntyjajjxytk
jkoxlerbtidsuepg
ewtlibdkeqwgxnqt
lmrshemwxrdwzrgc
nekcdyxmftlymfir
edaqvmulzkskzsfy
znmvqaupykjmyebx
ximtebuxwhqpzubd
rrlstppkknqyxlho
uyibwcitxixjfwcr
chrvoierkimesqmm
dltxmwhheldvxwqe
xfuthxjuuizanfjy
vtiwavmxwonpkpug
phchnujfnxewglht
owvmetdjcynohxtw
cbtujdrumixxatry
iirzildsfxipfipe
sqxcscqyofohotcy
sbubnekndkvovuqg
jzhsqqxqdrtibtcd
mscwasyvxkhlvwbn
bpafxtagbuxivbwz
uhvueesygaxrqffw
trrxlibhtmzuwkkl
yktkmkokmfslgkml
gfzzzdptaktytnqg
pgqmaiwzhplnbyhg
qjiptlkwfshunsfb
lewvlpescsyunxck
tywsfatykshogjas
qtrnwjjgxdektjgi
arypcritpwijczkn
jwxvngigbhfpiubf
upsjdctitlbqlnhf
lvpjlrpnmdjiscrq
jvzchdrsnkgpgsti
wuoesbwunpseyqzu
xuqspvoshgxmrnrb
icdawnmfnpnmyzof
hwcwtibgpvctznuo
bzdjrniddyamfloq
hffkxtzuazageruv
deixfxjvzbitalnc
zihsohukiqrgsnvw
nwoondfnlgowavkg
qnuulsywgnoillgn
koozejhfjyzuhviy
oetcoipohymhpump
cizwpfczfoodwuly
jghlinczhtaxifau
svjejifbidnvvdvy
rxmbsnaqhzcnbfcl
vveubmiecvdtrket
sbihpvrcnzjtgfep
iqbuljuxkwrlebvw
ptrhvxrpezqvmmvv
duwzugnhktpiybjw
lijafjnujfeflkva
coylvegferuuyfop
fowsjrgammrqkkof
pgmcruaioccmbrbz
osejwflxagwqtjoi
otqflckqgxzvtper
slwyntdcrncktoka
hzcdzsppcfkrblqg
jksdmmvtzkqaompg
galwwwgugetdohkg
zbghtjvuikmfjuef
dmqwcamjtlcofqib
zbczldlfdzemxeys
mdlqoklybhppdkwe
tuyajhkexrrrvnlb
ylfolaubymxmkowo
nnsyrfnoyrxswzxn
zkhunhhhigbsslfk
spbokzdfkbmflanz
zmzxvrwdhiegfely
imywhfczvmgahxwl
fnvabvxeiqvsarqq
yschramprctnputs
ubyjrgdzsvxzvouj
qnvdhpptympctfer
smipxcntyhjpowug
ouhjibgcmotegljy
zpflubaijjqqsptz
fgysnxrnfnxprdmf
pbpznrexzxomzfvj
thhzjresjpmnwtdv
sbmokolkhvbfqmua
sxxpdohxlezmqhhx
pevvsyqgoirixtqh
wdxrornmhqsbfznb
zjqziqbctxkshqcn
nbqcwpzfwfaahylk
bxbvkonpcxprxqjf
xplbpqcnwzwqxheb
prsakggmnjibrpoy
xoguxbpnrvyqarjl
ilrgryrmgwjvpzjy
efwrmokaoigjtrij
yhcncebopycjzuli
gwcmzbzaissohjgn
lggmemwbbjuijtcf
fkqedbfrluvkrwwl
jcbppekecevkwpuk
onvolrckkxeyzfjt
zzousprgrmllxboy
cajthmamvxuesujl
rmiozfsikufkntpg
lvekypkwjbpddkcv
dwaqzfnzcnabersa
pcdsskjopcqwhyis
uabepbrrnxfbpyvx
yxlgdomczciiunrk
ccerskfzctqxvrkz
edvmkntljlncwhax
xtcbwecdwygrvowo
axqgqjqkqwrgcqot
tyjrynolpzqwnjgj
thrtmlegdjsuofga
mpgoeqkzzqqugait
emuslxgoefdjyivl
klehpcehdznpssfb
xfgvugyrdxolixkc
acenyrbdwxywmwst
yqgperajsfsamgan
dbjxlnumrmhipquw
hsnhirmswcenewxm
qehqkbhmgucjjpwo
gprjdglsbtsfzqcw
wvqkyrkoratfmvfi
myhzlerupqbduqsl
couyazesiuhwwhht
scxzehubxhkfejrr
gqlitwfriqkmzqdd
pxtbmqelssoagxko
dzhklewjqzmrfzsw
yxgeypduywntnbji
kwzbgzhkzbgedlfh
vukmuyfstgmscuab
vcmaybfvdgwnasgt
qmybkqqdhjigzmum
cbnuicuncvczyalu
qdgpsdpdlgjasjqr
kdzxqqheurupejjo
mcatrxfchbqnxelm
badunwkeggdkcgco
ntaeanvcylpoqmxi
ghnyfytpzgvuokjn
ozepydixmjijdmts
qefcfwzdhwmcyfvp
ycyktmpaqgaxqsxt
edpizkxnsxeeebfl
uwciveajsxxwoqyr
rbvjkljpxtglqjsh
nbplrskduutrptfk
vewrbadvkseuloec
upaotnjxquomoflx
qfwxkinrousqywdd
mqzxvvskslbxvyjt
oxicszyiqifoyugx
bkitxwzjpabvhraj
ydrbyjecggynjpir
hezyteaublxxpamq
hxkuektnoovsehnd
cwtbbavnhlpiknza
qrwvkhbyasgfxwol
qryjbohkprfazczc
wjksnogpxracrbud
znmsxbhliqxhvesr
gkippedrjzmnnwkp
pklylwsnsyyxwcwg
osdpwbxoegwaiemr
kpslrrrljgtjiqka
vuqkloqucpyzfxgk
bvtdsisgvkuzghyl
qlcayluuyvlhdfyy
kbimqwnzanlygaya
nvoeanlcfhczijed
kqvcijcuobtdwvou
pmhdpcmxnprixitl
yueilssewzabzmij
zqxhafrvjyeyznyg
mhdounmxkvnnsekx
hnacyglnzicxjakg
iaxfdqibnrcjdlyl
iypoelspioegrwix
uiqouxzmlnjxnbqt
kslgjfmofraorvjo
bgvotsdqcdlpkynk
huwcgxhvrrbvmmth
vpqyfnkqqjacpffw
hpjgdfovgmrzvrcl
vbntbhbvdeszihzj
nrbyyuviwyildzuw
wckeoadqzsdnsbox
xgsobwuseofxsxox
anvhsxdshndembsd
iygmhbegrwqbqerg
ylrsnwtmdsrgsvlh
zvvejnrarsavahvc
yncxhmmdtxxeafby
kekgiglblctktnes
uoqgymsrlrwdruzc
saaoymtmnykusicw
bqvcworpqimwglcp
zbpgtheydoyzipjv
pkykzslwsjbhcvcj
jhwxxneyuuidrzvl
pafeyajcrlehmant
klszcvtmcdeyfsmj
ledsltggvrbvlefn
hubpbvxknepammep
gthxhaapfpgtilal
jtfhbozlometwztj
jrhshycyenurbpwb
fyaxbawrsievljqv
lgfcgbenlqxqcxsd
dhedabbwbdbpfmxp
mxzgwhaqobyvckcm
qboxojoykxvwexav
jcpzfjnmvguwjnum
ohpsxnspfwxkkuqe
nyekrqjlizztwjqp
thuynotacpxjzroj
wymbolrlwosnbxqx
iyaqihnqvewxdtjm
hdvdbtvfpdrejenu
gtjscincktlwwkkf
wtebigbaythklkbd"""
VOWELS = {"a", "e", "i", "o", "u"}
def NumVowels(string):
i = 0
for c in string:
if c in VOWELS:
i += 1
return i
def HasRepeatLetter(string):
if len(string) < 2:
return False
prevLetter = string[0]
for c in string[1:]:
if c == prevLetter:
return True
prevLetter = c
return False
FORBIDDEN = {"ab", "cd", "pq", "xy"}
def HasForbiddenStrings(string):
for i in xrange(len(string) - 1):
if string[i : i+2] in FORBIDDEN:
return True
return False
def HasReappearingPair(string):
for i in xrange(len(string) - 1):
pair = string[i:i+2]
for j in xrange(i + 2, len(string) - 1):
if string[j:j+2] == pair:
return True
return False
def HasSandwich(string):
for i in xrange(len(string) - 2):
if string[i] == string[i + 2]:
return True
return False
def IsNice(string):
return NumVowels(string) >= 3 \
and HasRepeatLetter(string) \
and not HasForbiddenStrings(string)
def IsNiceTwo(string):
return HasReappearingPair(string) and HasSandwich(string)
i = 0
for line in data.split():
if IsNiceTwo(line):
i += 1
print i
class MyTests(unittest.TestCase):
def test_Examples(self):
self.assertTrue(IsNice("ugknbfddgicrmopn"))
self.assertTrue(IsNice("aaa"))
self.assertFalse(IsNice("jchzalrnumimnmhp"))
self.assertFalse(IsNice("haegwjzuvuyypxyu"))
self.assertFalse(IsNice("dvszwmarrgswjxmb"))
def test_NumVowels(self):
self.assertEqual(NumVowels("ugknbfddgicrmopn"), 3)
self.assertEqual(NumVowels("aaa"), 3)
def test_HasRepeatLetter(self):
self.assertTrue(HasRepeatLetter("ugknbfddgicrmopn"))
def test_HasForbiddenStrings(self):
self.assertFalse(HasForbiddenStrings("ugknbfddgicrmopn"))
self.assertTrue(HasForbiddenStrings("haegwjzuvuyypxyu"))
self.assertTrue(HasForbiddenStrings("ab"))
self.assertTrue(HasForbiddenStrings("abx"))
self.assertTrue(HasForbiddenStrings("xab"))
def test_HasReappearingPair(self):
self.assertTrue(HasReappearingPair("aaaaa"))
self.assertTrue(HasReappearingPair("aaaa"))
self.assertTrue(HasReappearingPair("aaxaa"))
self.assertFalse(HasReappearingPair("aaa"))
def test_HasSandwich(self):
self.assertTrue(HasSandwich("aba"))
self.assertTrue(HasSandwich("aaa"))
self.assertTrue(HasSandwich("xaba"))
self.assertTrue(HasSandwich("abax"))
def test_IsNiceTwo(self):
self.assertTrue(IsNiceTwo("qjhvhtzxzqqjkmpb"))
self.assertTrue(IsNiceTwo("xxyxx"))
self.assertFalse(IsNiceTwo("uurcxstgmygtbstg"))
self.assertFalse(IsNiceTwo("ieodomkazucvgmuy"))
if __name__ == "__main__":
unittest.main() |
24,453 | cee99ec988bbbea7b9729dd9dc0c0eba495e9f9f | L1.sort()
last_val=L1[0]
Last_count=1
max_val=
vals=[3,5,3,5,5,5,6,2,6,5,2,8,4,6,6,8,1,4,7,2,6]
x=[1,3,1,2]
print mode_list(x) |
24,454 | 1c97dc484aee44548633812bf59e3b7620739201 | from measures.generic.ReceivedXXXMessages import ReceivedXXXMessages
class ReceivedBundleMessages(ReceivedXXXMessages):
"""Total number of received bundle messages"""
def __init__(self, period, simulationTime):
ReceivedXXXMessages.__init__(self, 'message.BundleMessage', period, simulationTime)
|
24,455 | c715b44f6d33cc7533a6a16f77c19b74c0ea3f59 | import json
import typing as t
if t.TYPE_CHECKING:
T_SELF = t.TypeVar('T_SELF', bound='Grade')
EXTRA_CLAIMS = t.Mapping[str, t.Any]
class Grade(object):
_score_given = None # type: t.Optional[float]
_score_maximum = None # type: t.Optional[float]
_activity_progress = None # type: t.Optional[str]
_grading_progress = None # type: t.Optional[str]
_timestamp = None # type: t.Optional[str]
_user_id = None # type: t.Optional[str]
_extra_claims = None # type: t.Optional[EXTRA_CLAIMS]
def get_score_given(self):
# type: () -> t.Optional[float]
return self._score_given
def set_score_given(self, value):
# type: (T_SELF, float) -> T_SELF
self._score_given = value
return self
def get_score_maximum(self):
# type: () -> t.Optional[float]
return self._score_maximum
def set_score_maximum(self, value):
# type: (T_SELF, float) -> T_SELF
self._score_maximum = value
return self
def get_activity_progress(self):
# type: () -> t.Optional[str]
return self._activity_progress
def set_activity_progress(self, value):
# type: (T_SELF, str) -> T_SELF
self._activity_progress = value
return self
def get_grading_progress(self):
# type: () -> t.Optional[str]
return self._grading_progress
def set_grading_progress(self, value):
# type: (T_SELF, str) -> T_SELF
self._grading_progress = value
return self
def get_timestamp(self):
# type: () -> t.Optional[str]
return self._timestamp
def set_timestamp(self, value):
# type: (T_SELF, str) -> T_SELF
self._timestamp = value
return self
def get_user_id(self):
# type: () -> t.Optional[str]
return self._user_id
def set_user_id(self, value):
# type: (T_SELF, str) -> T_SELF
self._user_id = value
return self
def set_extra_claims(self, value):
# type: (T_SELF, EXTRA_CLAIMS) -> T_SELF
self._extra_claims = value
return self
def get_extra_claims(self):
# type: () -> t.Optional[EXTRA_CLAIMS]
return self._extra_claims
def get_value(self):
# type: () -> str
data = {
'scoreGiven': self._score_given if self._score_given else None,
'scoreMaximum': self._score_maximum if self._score_maximum else None,
'activityProgress': self._activity_progress,
'gradingProgress': self._grading_progress,
'timestamp': self._timestamp,
'userId': self._user_id
}
if self._extra_claims is not None:
data.update(self._extra_claims)
return json.dumps({k: v for k, v in data.items() if v is not None})
|
24,456 | de77d5cdbf6e50aead23d2f044ffd9492de18f71 | #!/usr/bin/env python
import pygame
from pygame.locals import QUIT, MOUSEBUTTONDOWN, MOUSEBUTTONUP, KEYDOWN, KEYUP, K_ESCAPE, K_F1, K_F2, K_F3
from unboundmethods import timestamp, within
class Controller:
def __init__(self, game):
self.game = game
def handle_event(self, event):
if event.type == QUIT: #If the little x in the window was clicked...
self.game.run = False
if event.type == MOUSEBUTTONDOWN:
self.mouse_click(event)
if event.type == KEYDOWN:
self.keyboard(event)
if event.type == KEYUP:
pass
def keyboard(self, event):
global screen_text
pressed_keys = pygame.key.get_pressed()
#'ESCAPE' key is for exiting the game
if pressed_keys[K_ESCAPE]:
self.game.run = False
#'K_F1' key to toggle music
if pressed_keys[K_F1]:
pygame.mixer.music.stop()
self.game.db['play_music'] = not world.db['play_music']
self.game.commit()
#'K_F2' key for screenshot. This saves it to timeString().png
if pressed_keys[K_F2]:
try:
filename = timestamp() + '.png'
pygame.image.save((screen),filename)
print "screenshot saved as " + filename
except EnvironmentError as e:
print "Error:", e
if pressed_keys[K_F3]:
#Play random sound
fx_manifest[random.choice(fx_manifest.keys())].play()
#Seperates mouse clicks into left and right and then call their seperate fncs
def mouse_click(self, event):
if (event.button == 1) and (event.type == MOUSEBUTTONDOWN):
self.mouse_left_click(event)
if event.button == 3 and (event.type == MOUSEBUTTONDOWN):
self.mouse_right_click(event)
#This function is called when a left mouse click is passed
def mouse_left_click(self, event):
self.game.screen_text_top = []
self.game.selected = None
point = (event.pos[0],event.pos[1])
collide_list = []
selected_info = None
for e in self.game.view.hit_box_list:
if e[0].collidepoint(point):
if within(e[0], point):
collide_list.append(e)
if collide_list:
for e in collide_list:
info = [e[1].coordinate_key, e[1].name, 'float:' + str(e[1].float_offset), 'layer: ' + str(e[1].layer), 'px,py: ', e[1].pixel_offsets]
self.game.screen_text_top.append(str(info))
if hasattr(e[1], 'controllable') and e[1].controllable:
self.game.selected = e[1]
def mouse_right_click(self, event):
if self.game.selected:
point = (event.pos[0],event.pos[1])
for e in self.game.view.hit_box_list:
if e[0].collidepoint(point):
if within(e[0], point):
if hasattr(e[1], 'pathable') and e[1].pathable:
self.game.path_target = e[1].parent_coordinate
|
24,457 | 9de51082dc3a5753b3a2972dad1e3cd4cf5f22aa |
print("***** read entire file contents *****")
f1 = open("MyData", "r")
print(f1.read())
print("***** read line by line *****")
f2 = open("MyData", "r")
line = f2.readline()
#print(line)
while line!="":
print(line)
line = f2.readline()
print("***** read line by line using readlines() method *****")
f3 = open("MyData", "r")
lines = f3.readlines()
print(lines)
for line in lines:
print(line)
|
24,458 | 186a7941fe6defc01afbf994b7aa4bdc6e163719 | print('==== SUPER PROGRESSAO ====')
i = int(input('Digite um numero para iniciar a progressao: '))
r = int(input('Digite a razão: '))
a = 10
pa = i
while pa != r*a+i:
print('{}'.format(pa), end='->')
pa += r
if pa == r*a+i:
e = int(input('\nQuantos numeros mais: '))
a += e
if e == 0:
print('\033[1;31m FIM DO PROGRAMA')
|
24,459 | 92c26b973cb100a9caf9e9e24cde74f104652c3e | import numpy as np
import cv2
import poincare
import random
import copy
p = 5
q = 4
t1 = 1 - np.tan(np.pi / p) * np.tan(np.pi / q)
t2 = 1 + np.tan(np.pi / p) * np.tan(np.pi / q)
gonRadius = np.sqrt(t1 / t2)
#print gonRadius
pImg = poincare.poincareImg(radius = 400, pointRadius = 5)
pImg.drawEdge()
origin = poincare.interiorPoint(0.0000 + 0.0000j)
#p1 = poincare.interiorPoint(0.5 + 0.0j)
#p2 = poincare.interiorPoint(0.4 + 0.3j)
#p3 = poincare.interiorPoint(0.3 + 0.3j)
gonFundamental = poincare.polygon(origin, verticeCount = 5, radius = gonRadius)
#print gon.vertice1
#print gon.vertice2
#line3 = poincare.genCircle(p1, p2)
#print line3.center
#print line3.radius
#p4 = poincare.getInverse(p3, line3)
#p4 = poincare.getMirrorPoint(p3, p1, p2)
def getGon(p, q, r, label):
# print "getGon, label = {}".format(label)
if label == "0" or label == "":
return poincare.polygon(origin, verticeCount = 5, radius = r)
else:
iter = getGon(p, q, r, label[1:])
# iter = copy.copy(iter)
# print iter, iter.center
return iter.getMirror(int(label[0]))
def getLabel(point):
ret, iterPoint = getLabelIter(point)
while iterPoint:
iterRet, iterPoint = getLabelIter(iterPoint)
ret = iterRet + ret
# print ret
return ret
def getLabelIter(point):
# print "getLabelIter, z = {}".format(point.z)
# pImg.drawPoint(point)
for i in range(gon.verticeCount):
p2 = gonFundamental.side(i + 1, point)
if p2:
# p2 = poincare.getMirrorPoint(p1, gonFundamental.getVertice(i), gonFundamental.getVertice(i+1))
# pImg.drawPoint(p2)
# print "getLabelIter, p2.z = {}".format(p2.z)
# if p2.r < point.r:
return str(i + 1), p2
return "", None
gon = getGon(5, 4, gonRadius, "0")
#pImg.drawPolygon(gon, color = (255, 0, 0))
#gon2 = getGon(5, 4, gonRadius, "1230")
#pImg.drawPolygon(gon2, color = (255, 0, 0))
colors = {
"" : (255, 255, 255)
,"0" : (255, 255, 255)
,"1" : (255, 255, 0)
,"2" : (0, 255, 0)
,"3" : (0, 0, 255)
,"4" : (0, 255, 255)
,"5" : (255, 0, 255)
}
def colorFromLabel(label):
hue = 0
theta = (np.sqrt(5) + 1) / 2 % 1
offset = theta
for l in label:
hue = hue + int(l) * offset
offset = (offset + theta) % 1
hue = int(hue % 1 * 180)
# print hue
return (hue, 255, 255)
for i in range(499):
p1 = poincare.interiorPoint(np.exp(np.pi * 2j * random.random())* (1 - random.random() ** 3))
label = getLabel(p1)
p1.color = colorFromLabel(label)
pImg.drawPoint(p1)
# print p1, label
gonTemp = getGon(5, 4, gonRadius, label)
pImg.drawPolygon(gonTemp, color = (255, 0, 0))
#pImg.drawPolygon(getGon(5, 4, gonRadius, "1"), color = (0, 255, 0))
#for i in range(5):
# print p1
# p2 = gon.getVertice(i + 1)
# pImg.drawPoint(p1)
# c = poincare.genCircle(p1, p2)
# pImg.drawCircle(c, color = (255, 255, 255))
# gon2 = gon.getMirror(i)
# pImg.drawPolygon(gon2)
# pImg.drawPoint(gon2.center)
# print gon2.vertices
# for j in range(5):
# p3 = gon2.getVertice(j)
# print p3
# pImg.drawPoint(p3)
#line1 = poincare.circle(somePoint = origin.z, someDirection = 1 * 1j, inverseRadius = 2.0)
#pImg.drawPoint(origin)
#pImg.drawPoint(p1)
#pImg.drawPoint(p2)
#pImg.drawPoint(p3)
#pImg.drawPoint(p4)
#line1 = p1.getPole()
#line2 = p2.getPole()
#pImg.drawCircle(line1, color = (255, 255, 255))
#pImg.drawCircle(line2, color = (255, 255, 255))
#pImg.drawCircle(line3, color = (255, 255, 255))
img = pImg.getImg()
img = cv2.cvtColor(img, cv2.COLOR_HSV2RGB)
cv2.imshow('myImg', img)
cv2.imwrite("lastPolygon.jpg", img)
cv2.waitKey(0)
|
24,460 | eae76949f11371e493f025cc36ebf5b176d74ae3 | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 22 21:14:00 2016
@author: joshuakaplan
"""# -*- coding: utf-8 -*-
"""
Created on Sat Apr 16 21:34:23 2016
@author: joshuakaplan
"""
import pyproj
import shapefile
from bokeh.models.glyphs import Patches
from bokeh.models import (
GMapPlot, Range1d, ColumnDataSource,HoverTool, PanTool, WheelZoomTool,
BoxSelectTool,ResetTool, PreviewSaveTool,GMapOptions)
from bokeh.plotting import show, output_file
import pandas
## Getting unemployment data into usable format
unemployment = pandas.read_csv('UnemploymentLAD.csv')
cols = unemployment.columns
cols = cols.map(lambda x: x.replace(' ', '_') if isinstance(x, (str, unicode)) else x)
unemployment.columns = cols
cols = unemployment.columns
cols = cols.map(lambda x: x.replace('-', '_') if isinstance(x, (str, unicode)) else x)
unemployment.columns = cols
test =unemployment.loc[:,['local_authority:_district_/_unitary_(prior_to_April_2015)','Unemployment_rate___aged_16_64','Date']]
cols = ['LAD','Unemployment','Date']
test.columns = cols
unemp2011 = test.loc[(test.Date==2011),['LAD','Unemployment','Date']]
## pulling lat/longs from shapefile
sf = shapefile.Reader("/Users/joshuakaplan/lad/england_lad_2011_gen.shp")
#http://gis.stackexchange.com/questions/168310/how-to-convert-projected-coordinates-to-geographic-coordinates-without-arcgis
#https://karlhennermann.wordpress.com/2015/02/16/how-to-make-lsoa-and-msoa-boundaries-from-uk-data-service-align-properly-in-arcgis/
shapes = sf.shapes()
records = sf.records()
#fields = sf.fields
def transform(epsg_in, epsg_out, x_in, y_in):
# define source and destination coordinate systems based on the ESPG code
srcProj = pyproj.Proj(init='epsg:%i' % int(epsg_in), preserve_units=True)
dstProj = pyproj.Proj(init='epsg:%i' % int(epsg_out), preserve_units=True)
# perform transformation
x_out,y_out = pyproj.transform(srcProj, dstProj, x_in, y_in)
return x_out,y_out
data = dict([])
for i in range(len(shapes)):
temp = dict()
lats = list()
longs=list()
for j in range(len(shapes[i].points)):
x = shapes[i].points[j][0]
y = shapes[i].points[j][1]
lats.append(transform(epsg_in=27700,epsg_out=4326,x_in=x,y_in=y)[1])
longs.append(transform(epsg_in=27700,epsg_out=4326,x_in=x,y_in=y)[0])
name = records[i][1]
temp['name']=name
temp['lats']=lats
temp['longs']=longs
data[i] = temp
# we should make the colors based off of unemployment rate, just to learn how to do it
colors = ["#F1EEF6", "#D4B9DA", "#C994C7", "#DF65B0", "#DD1C77", "#980043"]
lad_names =[lad["name"] for lad in data.values()]
lad_lats = [lad["lats"] for lad in data.values()]
lad_longs = [lad["longs"] for lad in data.values()]
lad_unemployment= unemp2011[unemp2011['LAD'].isin(lad_names)]
col = colors*1200
source = ColumnDataSource(data=dict(
y=lad_lats,
x=lad_longs,
color=col[:len(set(lad_names))],
name=lad_names,
unemployment=lad_unemployment.Unemployment
))
TOOLS="pan,wheel_zoom,box_zoom,reset,hover,save"
p = GMapPlot(title="LAD", plot_width=1200, plot_height=800, x_range = Range1d(), y_range = Range1d(), map_options = GMapOptions(lat=51.5074, lng=0.1278, zoom=10))
p.map_options.map_type = "terrain"
patch = Patches(xs="x", ys="y", fill_color="color", fill_alpha=0.7, line_color="black", line_width=0.5)
patches_glyph = p.add_glyph(source, patch)
p.add_tools(PanTool(), WheelZoomTool(), BoxSelectTool(), HoverTool(), ResetTool(), PreviewSaveTool())
hover = p.select_one(HoverTool)
hover.point_policy = "follow_mouse"
hover.tooltips = [
("Name", "@name"),
("(Lat, Long)", "($y, $x)"),
("Unemployment Rate 2011","@unemployment")
]
output_file("LADGMap.html", title="LAD GMap test", mode="cdn")
show(p)
|
24,461 | 6c4eae9799bf78b3639f1ef79ffa26743632952a | #!/usr/local/bin/python
file_1 = '/home/chinara/Documents/ky_wiki/kyr_dict/dict_kw.txt'
file_2 = '/home/chinara/dev/pywikibot/source_2.txt'
new_lines = []
with open(file_1) as myfile:
lines = myfile.readlines()
for i in range(len(lines)):
line = lines[i].split(',')
new_line = line[3:-6]
keyword = new_line[0][2:-1]
title = 'xxxx\n' + "'''" + keyword + "'''" + '\n'
new_lines.append(title)
second_line = ''.join(new_line[1:len(new_line)]).split(' ')
body = '=={{ky}}==\n' + '\n' + '=={{ru}}==\n' + '\n'.join(second_line[1:len(second_line)]) + '\n' + '\n' + 'yyyy\n'
new_lines.append(body)
with open(file_2, 'w') as new_file:
new_file.write(''.join(new_lines))
|
24,462 | 3b6c150325f7b53bb06c268cc684d026fd825a83 | import datetime as dt
import pendulum
from airflow import DAG
from airflow.operators.python_operator import PythonOperator
from airflow.operators.bash_operator import BashOperator
local_tz = pendulum.timezone("America/Sao_Paulo")
args = {
'owner': 'me',
'start_date': dt.datetime(2020, 11, 10, tzinfo=local_tz),
'retries': 3,
'retry_delay': dt.timedelta(minutes=5)
}
with DAG('xcom_example', default_args=args, catchup=False, schedule_interval=None) as dag:
def current_timestamp(**kwargs):
current_timestamp = dt.datetime.strftime(dt.datetime.now(),"%Y-%m-%d %H:%M")
kwargs['ti'].xcom_push(key='current_timestamp', value=current_timestamp)
xcom_push = PythonOperator(task_id='xcom_push',
python_callable=current_timestamp,
provide_context=True)
echo_xcom = BashOperator(task_id='echo_xcom',
bash_command='echo {{ task_instance.xcom_pull(key="current_timestamp", task_ids="xcom_push")}}')
xcom_push >> echo_xcom |
24,463 | df5415e0a26d6f6b8fe668ff45bf8fb1e06ec547 | import copy
N = 5
S = list(range(1,N+1))
def get_subsets(index=0):
if index == len(S):
return [[]]
subsets = get_subsets(index+1)
subtmps = []
for s in subsets:
stmp = copy.copy(s)
stmp.append(S[index])
subtmps.append(stmp)
return subsets + subtmps
print(S)
print(get_subsets())
|
24,464 | c27484e8edbbd35d32235f15f00cc30502486038 | # Example Feature Extraction from XML Files
# We count the number of specific system calls made by the programs, and use
# these as our features.
# This code requires that the unzipped training set is in a folder called "train".
# Wasay: I have extended the sample code. Comments within the file.
import os
from collections import Counter
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
import numpy as np
from scipy import sparse
import csv
import pandas as pd
import util
TRAIN_DIR = "../train"
TEST_DIR = "../test"
CALLS = {}
call_set = set([])
def add_to_set(tree):
for el in tree.iter():
call = el.tag
call_set.add(call)
# Wasay: Extract a set of unique sys calls in the entire training data set:
def get_unique_calls(start_index, end_index, direc="train"):
X = None
classes = []
ids = []
i = -1
for datafile in os.listdir(direc):
if datafile == '.DS_Store':
continue
i += 1
if i < start_index:
continue
if i >= end_index:
break
# extract id and true class (if available) from filename
id_str, clazz = datafile.split('.')[:2]
ids.append(id_str)
# add target class if this is training data
try:
classes.append(util.malware_classes.index(clazz))
except ValueError:
# we should only fail to find the label in our list of malware classes
# if this is test data, which always has an "X" label
assert clazz == "X"
classes.append(-1)
# parse file as an xml document
tree = ET.parse(os.path.join(direc,datafile))
add_to_set(tree)
this_row = call_feats(tree)
# Accumulate all the unique calls in CALLS
for el in tree.iter():
call = el.tag
if call not in CALLS:
CALLS[call]=1
else:
CALLS[call]+=1
def refine_calls(top):
if top >= len(CALLS):
top = len(CALLS)
a = np.argsort(CALLS.values())
top_indices = [a[len(a)-top:len(a)]]
top_calls = np.take(CALLS.keys(),top_indices)
CALLS.clear()
for tc in top_calls[0]:
CALLS[tc]=0
def create_data_matrix(start_index, end_index, direc="train"):
X = None
classes = []
ids = []
i = -1
for datafile in os.listdir(direc):
if datafile == '.DS_Store':
continue
i += 1
if i < start_index:
continue
if i >= end_index:
break
# extract id and true class (if available) from filename
id_str, clazz = datafile.split('.')[:2]
ids.append(id_str)
# add target class if this is training data
try:
classes.append(util.malware_classes.index(clazz))
except ValueError:
# we should only fail to find the label in our list of malware classes
# if this is test data, which always has an "X" label
assert clazz == "X"
classes.append(-1)
# parse file as an xml document
tree = ET.parse(os.path.join(direc,datafile))
add_to_set(tree)
this_row = call_feats(tree)
if X is None:
X = this_row
else:
X = np.vstack((X, this_row))
return X, np.array(classes), ids
# Wasay: This is the function that you can augment to extract other features:
def call_feats(tree):
# Wasay: I am using all unique calls instead of just a subset of them.
good_calls = CALLS.keys()
call_counter = {}
for el in tree.iter():
call = el.tag
if call not in call_counter:
call_counter[call] = 0
else:
call_counter[call] += 1
call_feat_array = np.zeros(len(good_calls))
for i in range(len(good_calls)):
call = good_calls[i]
call_feat_array[i] = 0
if call in call_counter:
call_feat_array[i] = call_counter[call]
return call_feat_array
def getFeatures(filename,start,end,_start,_end):
f = pd.read_csv(filename)
#print f.ix[start:end,_start:_end]
return f.ix[start:end,_start:_end]
## Feature extraction
def main():
#print np.array(getFeatures("../features/SCfeatures_train.csv",0,1499,120,121)).T[0]
#exit()
##
predict = True
cross_validate = True
# Wasay: When predict is true, we use the test data set and make actual
## predictions and write them down to result.csv. When predict is false,
### we divide the train data set into two halves and train on one half
#### and cross validate on the other. We print the accuracy.
#get_unique_calls(0, 5000, TRAIN_DIR)
#refine_calls(200)
#print len(CALLS)
if cross_validate:
X_train, t_train, train_ids = create_data_matrix(0, 1500, TRAIN_DIR)
X_valid, t_valid, valid_ids = create_data_matrix(1500, 5000, TRAIN_DIR)
X_train = getFeatures("../features2/TEST_train.csv",0,1499,0,500)
X_valid = getFeatures("../features2/TEST_train.csv",1500,5000,0,500)
#t_train = np.array(getFeatures("../features/SCfeatures_train.csv",0,1499,120,121)).T[0]
#t_valid = np.array(getFeatures("../features/SCfeatures_train.csv",1500,5000,120,121)).T[0]
#print 'Data matrix (training set):'
#print X_train.shape
#print 'Classes (training set):'
#print t_train.shape
import models
models.EXRT(X_train,t_train,X_valid,t_valid,False)
if predict:
X_train, t_train, train_ids = create_data_matrix(0, 5000, TRAIN_DIR)
X_test, t_test, test_ids = create_data_matrix(0, 5000, TEST_DIR)
X_train = getFeatures("../features2/TEST_train.csv",0,5000,0,500)
X_test = getFeatures("../features2/TEST_test.csv",0,5000,0,500)
print 'Data matrix (training set):'
print X_train.shape
print t_train.shape
print X_test.shape
#print test_ids.shape
#print 'Classes (training set):'
#print t_train.shape
import models
models.EXRT(X_train,t_train,X_test,test_ids,True)
if __name__ == "__main__":
main()
|
24,465 | d2144681688e5bcdc3aae46e873161f683fdc222 | from django.test import TestCase
from rest_framework import status
from rest_framework.test import APITestCase
from .models import Meet
class MeetingTests(APITestCase):
def test_post(self):
url = "/list/"
data = {"id":1,"doctor":"x","patient":"b","meeting_date":"2021-05-08"}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(Meet.objects.get().doctor, 'x')
def test_repeated(self):
url = "/list/"
data = {"id":1,"doctor":"x","patient":"b","meeting_date":"2021-05-08"}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data = {"id":1,"doctor":"x","patient":"b","meeting_date":"2021-05-08"}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_get(self):
url = "/list/"
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
|
24,466 | 756f33027739614f4d3bab0f87d7989b1b01d813 | import os
import scrapy
from scrapy.crawler import CrawlerProcess
from scrapy.loader import ItemLoader
from items import CraigslistScraperItem
class RealEstateSpider(scrapy.Spider):
name = "realestate_loader"
start_urls = ["http://newyork.craigslist.org/d/real-estate/search/rea/"]
try:
os.remove("results.csv")
except OSError:
pass
def __init__(self):
self.lat = ""
self.lon = ""
# implicit scrapy method
# def start_requests(self):
# yield scrapy.Request("http://newyork.craigslist.org/d/real-estate/search/rea/", callback=self.parse)
def parse(self, response):
all_ads = response.xpath("//li[@class='result-row']")
for ads in all_ads:
ad_link = ads.xpath(".//a[@class='result-title hdrlnk']/@href").get()
yield response.follow(url=ad_link, callback=self.parse_detail)
loader = ItemLoader(item=CraigslistScraperItem(), selector=ads, response=response)
loader.add_xpath("title", ".//a[@class='result-title hdrlnk']/text()")
loader.add_xpath("price", ".//a//span[@class='result-price']/text()")
loader.add_xpath("date", ".//time[@class='result-date']/text()")
loader.add_xpath("ad_link", ".//a[@class='result-title hdrlnk']/@href")
loader.add_xpath("neighborhood", ".//span[@class='result-hood']/text()")
loader.add_value("lat", self.lat)
loader.add_value("lon", self.lon)
yield loader.load_item()
# get next page ads
next_page = response.xpath("//a[@class='button next']/@href").get()
if next_page:
yield response.follow(url=next_page, callback=self.parse)
def parse_detail(self, response):
"""
Get coordinates from ad page
"""
self.lat = response.xpath("//meta[@name='geo.position']/@content").get().split(";")[1]
self.lon = response.xpath("//meta[@name='geo.position']/@content").get().split(";")[0]
if __name__ == "__main__":
process = CrawlerProcess(settings={
"DOWNLOADER_CLIENT_TLS_METHOD": "TLSv1.2",
"FEEDS": {
"results.csv": {"format": "csv"},
},
})
process.crawl(RealEstateSpider)
process.start()
|
24,467 | f78c25fa545430c74a906e96417c702f729e8e6e | from django.urls import path, include
from .views import (
AmbassadorAPIView, ProductGenericAPIView, LinkAPIView, OrderAPIView, RegisterAPIView,
LoginAPIView, UserAPIView, LogoutAPIView, ProfileInfoAPIView, ProfilePasswordAPIView
)
urlpatterns = [
path('register', RegisterAPIView.as_view()),
path('login', LoginAPIView.as_view()),
path('user', UserAPIView.as_view()),
path('logout', LogoutAPIView.as_view()),
path('users/info', ProfileInfoAPIView.as_view()),
path('users/password', ProfilePasswordAPIView.as_view()),
path('ambassadors', AmbassadorAPIView.as_view()),
path('products', ProductGenericAPIView.as_view()),
path('products/<str:pk>', ProductGenericAPIView.as_view()),
path('users/<str:pk>/links', LinkAPIView.as_view()),
path('orders', OrderAPIView.as_view()),
]
|
24,468 | cba5453671c61c1c10cbd712896c48262fa33c20 | import os
import sys
import numpy as np
import matplotlib
import platform
if platform.system() == "Windows":
import matplotlib.pyplot as plt
import matplotlib.patches as patches
else:
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.patches as patches
def add_line(ax,
x_beg,
y_beg,
x_end,
slope,
label=None,
xl=None,
yl=None,
fontsize="x-large",
scale="lin",
c="#7f7f7f"):
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
if scale == "lin":
slope_new = slope * (xmax - xmin) / (ymax - ymin)
else:
slope_new = slope * (np.log10(xmax / xmin) / np.log10(ymax / ymin))
x = np.linspace(x_beg, x_end, 100)
y = slope_new * (x - x_beg) + y_beg
ax.plot(x, y, "-.", transform=ax.transAxes, color=c)
if label is not None:
width = ax.bbox.width
height = ax.bbox.height
deg = np.arctan(slope_new * height / width) * 180 / np.pi
dx = x_end - x_beg
if xl is None:
xl = x_beg + dx * 0.3
if yl is None:
yl = y_beg + dx * 0.6 * slope_new
ax.text(
xl,
yl,
label,
transform=ax.transAxes,
rotation=deg,
color=c,
fontsize=fontsize)
def show_disk(file):
with open(file) as f:
lines = f.readlines()[2:]
x = np.zeros(len(lines))
y = np.zeros_like(x)
for i, line in enumerate(lines):
s = line.replace("\n", "").split("\t")
x[i] = float(s[1])
y[i] = float(s[2])
ax = plt.subplot(111)
if x.size <= 10000:
if x.size <= 5000:
aa = True
else:
aa = False
for i in range(x.size):
ax.add_patch(patches.Circle((x[i], y[i]), 1, aa=aa))
else:
for i in range(x.size):
ax.add_patch(
patches.CirclePolygon(
(x[i], y[i]), 1, resolution=10, aa=False))
ax.axis("equal")
plt.show()
plt.close()
def read(file):
with open(file) as f:
lines = f.readlines()
x = np.zeros(len(lines))
y = np.zeros_like(x)
theta = np.zeros_like(x)
for i, line in enumerate(lines):
s = line.replace("\n", "").split("\t")
x[i] = float(s[1])
y[i] = float(s[2])
theta[i] = float(s[3])
return x, y, theta
def show_rect(file, a, b, ax=None, fill=False, nmax=None):
x, y, theta = read(file)
if ax is None:
flag_show = True
ax = plt.subplot(111)
else:
flag_show = False
if nmax is None:
size = x.size
else:
size = nmax
if fill is False:
for i in range(size):
ax.add_patch(
patches.Rectangle(
(x[i], y[i]), a, b, angle=theta[i], fill=fill))
else:
c = plt.cm.viridis(np.linspace(0, 1, size))
for i in range(size):
ax.add_patch(
patches.Rectangle(
(x[i], y[i]), a, b, angle=theta[i], color=c[i]))
ax.axis("equal")
# ax.axis("off")
if flag_show:
plt.show()
plt.close()
def cal_fracal_dimension(file):
x, y, theta = read(file)
r = np.sqrt(x * x + y * y)
rmax = r.max()
print(rmax)
rbins = np.logspace(0, np.log2(rmax), 50, base=2)
bins = np.zeros(rbins.size + 1)
bins[0] = 0
bins[1:] = rbins
hist, bin_edge = np.histogram(r, bins=bins)
mass = np.zeros_like(rbins)
mass[0] = hist[0]
for i in range(1, mass.size):
mass[i] = mass[i - 1] + hist[i]
# plt.loglog(rbins, mass, "-o")
# plt.show()
# plt.close()
with open("fd_" + file, "w") as f:
for i in range(mass.size):
f.write("%f\t%f\n" % (rbins[i], mass[i]))
def plot_r_vs_N():
def read_file(file):
with open(file) as f:
lines = f.readlines()
r = np.zeros(len(lines))
N = np.zeros_like(r)
for i, line in enumerate(lines):
s = line.replace("\n", "").split("\t")
r[i] = float(s[0])
N[i] = float(s[1])
return r, N
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(6, 6))
for angle in [0, 2, 4, 6, 8, 10]:
r, N = read_file("fd_%d.dat" % angle)
ax.loglog(r, N, "o", label=r"$\theta=%d\degree$" % (angle))
plt.legend(fontsize="x-large")
plt.xlim(10)
plt.ylim(1)
add_line(ax, 0.5, 0.5, 0.9, 1.71, scale="log", label=r"$slope = 1.71$")
plt.xlabel(r"$r$", fontsize="xx-large")
plt.ylabel(r"$N$", fontsize="xx-large")
plt.tight_layout()
plt.show()
plt.close()
if __name__ == "__main__":
# if platform.system() == "Windows":
# os.chdir("data")
# file = "500_8_120.dat"
# else:
# file = sys.argv[1]
# fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(7, 7))
# show_rect(file, 14, 2, ax, fill=True, nmax=None)
# plt.tight_layout()
# if platform.system() == "Windows":
# plt.show()
# else:
# plt.savefig(file.replace(".dat", ".png"))
# plt.close()
# cal_fracal_dimension(file)
# plot_r_vs_N()
os.chdir("data/CCW")
N = 500
angle = [2, 4, 6, 8, 10, 12]
tag = ["a", "b", "c", "d", "e", "f"]
fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(11, 8))
for i, ax in enumerate(axes.flat):
file = "100000_%d_123.dat" % (angle[i])
show_rect(file, 14, 2, ax, fill=True, nmax=N)
ax.set_title(r"${\rm (%s)}\ \theta = %d\degree$" % (tag[i], angle[i]))
plt.suptitle(r"$N=%d$" % (N), fontsize="xx-large")
plt.tight_layout(rect=[0, 0, 1, 0.96])
plt.show()
# plt.savefig("%d.png" % (N))
plt.close()
|
24,469 | b04031c23ce200a64e58abbe569ee3095e1f8e20 | r"""
A ``GET`` request to the manifest object will return the concatenation of the
objects from the manifest much like DLO. If query url with
``multipart-manifest=get`` thus extra metadata ``X-Object-Meta-SLOmd5``
with md5sum of SLO object will be added in SLO manifest object.
===================== ==================================================
Header Value
===================== ==================================================
X-Object-Meta-SLOmd5 md5sum of whole segments, md5 of SLO object
"""
import json
import hashlib
from swift.common.http import is_success
from swift.common.utils import split_path, get_logger, \
closing_if_possible, get_valid_utf8_str
from swift.common.wsgi import WSGIContext, make_subrequest, \
make_pre_authed_request
from swift.common.exceptions import ListingIterError
from webob import Request
from webob.exc import HTTPServerError
class SLOHashMiddleware(WSGIContext):
def __init__(self, app, conf):
self.app = app
self.logger = get_logger(conf, log_route='slo_hash')
def _post_slomd5_header(self, env, SLOmd5):
headers = {}
headers['X-Object-Meta-SLOmd5'] = SLOmd5
post_req = make_pre_authed_request(env, method='POST',
swift_source='SLOmd5',
path=env['PATH_INFO'],
headers=headers)
post_resp = post_req.get_response(self.app)
if not is_success(post_resp.status_int):
self.logger.info('POST with SLOmd5 header failed: ' +
str(post_resp.body))
def _get_manifest_read(self, resp_iter):
with closing_if_possible(resp_iter):
resp_body = ''.join(resp_iter)
try:
segments = json.loads(resp_body)
except ValueError:
segments = []
return segments
def _fetch_sub_slo_segments(self, req, version, acc, con, obj):
"""
Fetch the submanifest, parse it, and return it.
Raise exception on failures.
"""
sub_req = make_subrequest(
req.environ, path='/'.join(['', version, acc, con, obj]),
method='GET',
headers={'x-auth-token': req.headers.get('x-auth-token')},
agent='%(orig)s SLO MultipartGET', swift_source='SLO')
sub_resp = sub_req.get_response(self.app)
if not sub_resp.is_success:
closing_if_possible(sub_resp.app_iter)
raise ListingIterError(
'while fetching %s, GET of submanifest %s '
'failed with status %d' % (req.path, sub_req.path,
sub_resp.status_int))
try:
with closing_if_possible(sub_resp.app_iter):
return ''.join(sub_resp.app_iter)
except ValueError as err:
raise ListingIterError(
'while fetching %s, String-decoding of submanifest %s '
'failed with %s' % (req.path, sub_req.path, err))
def __call__(self, env, start_response):
req = Request(env)
if env['REQUEST_METHOD'] not in ['GET']:
return self.app(env, start_response)
version, account, container, obj = split_path(
env['PATH_INFO'], 1, 4, True)
try:
resp = self._app_call(env)
except Exception:
resp = HTTPServerError(request=req, body="error")
return resp(env, start_response)
status = int(self._response_status.split()[0])
if status < 200 or status > 300:
start_response(self._response_status, self._response_headers,
self._response_exc_info)
return resp
SLOmd5 = ''
if req.params.get('multipart-manifest') == 'get':
if req.params.get('format') == 'raw':
resp = self.convert_segment_listing(
self._response_headers, resp)
else:
h = hashlib.md5()
segments = self._get_manifest_read(resp)
for seg_dict in segments:
if 'data' in seg_dict:
continue
sub_path = get_valid_utf8_str(seg_dict['name'])
sub_cont, sub_obj = split_path(sub_path, 2, 2, True)
h.update(self._fetch_sub_slo_segments(req, version,
account, sub_cont, sub_obj))
SLOmd5 = h.hexdigest()
self._post_slomd5_header(env, SLOmd5)
return self.app(env, start_response)
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
def slo_hash(app):
return SLOHashMiddleware(app, conf)
return slo_hash
|
24,470 | 310102999fd743730f8ca592147052597ffb9fd8 | from six.moves import range
from mod_pywebsocket import common
from mod_pywebsocket import stream
from mod_pywebsocket import util
def web_socket_do_extra_handshake(request):
pass
def web_socket_transfer_data(request):
messages_to_send = [b'Hello, world!', b'', all_distinct_bytes()]
for message in messages_to_send:
# FIXME: Should use better API to send binary messages when pywebsocket
# supports it.
header = stream.create_header(common.OPCODE_BINARY,
len(message), 1, 0, 0, 0, 0)
request.connection.write(header + message)
def all_distinct_bytes():
return b''.join([util.pack_byte(i) for i in range(256)])
|
24,471 | 4ba858de9ac75fe13fd28a8feb293941f8e7d4fc | from scrapy import Selector
from scrapy import Spider
from sinaBlogSpider.spiders.comment import get_comment_info
class Test(Spider):
name = 'test'
allowed_domains = ['blog.sina.com.cn']
# start_urls = ['http://www.sanqin.com/']
start_urls = ['http://blog.sina.com.cn/s/comment_918c2cc40102x4q8_1.html?comment_v=articlenew']
def parse(self, response):
# sel = Selector(response)
get_comment_info(response, '918c2cc40102x4q8')
# return item |
24,472 | ad6f8248aca7022af939965bde2311e3a732f14b | '''
Load Data from a list into the Table
Using the multiple insert pattern, in this exercise, you will load the data from values_list into the table.
Instructions
100 XP
Import insert from sqlalchemy.
Build an insert statement for the census table.
Execute the statement stmt along with values_list. You will need to pass them both as arguments to connection.execute().
Print the rowcount attribute of results.
Take Hint (-30 XP)
'''
# Import insert
from sqlalchemy import insert
# Build insert statement: stmt
stmt = census.insert()
# Use values_list to insert data: results
results = connection.execute(stmt, values_list)
# Print rowcount
print(results.rowcount)
|
24,473 | 63487a929a933f7ce7a4fcb60078d7b6c6c57954 | import torch
import torch.autograd as autograd
import torch.nn as nn
import numpy as np
torch.manual_seed(1)
class CBOW(nn.Module):
"""
Continuous bag of words.
Model selects an embedded vector v from the lookup table
model =
v * w1 + b
"""
def __init__(self, embedding_size, corpus):
super(CBOW, self).__init__()
vocabulary = np.unique(np.array(corpus))
vocabulary_size = vocabulary.shape[0]
# word lookup table. Every row is an index of the vocabulary containing an embedded vector.
self.v_embedding = nn.Embedding(vocabulary_size, embedding_size)
# Output layer.
self.linear = nn.Linear(embedding_size, vocabulary_size)
self.vocabulary_index = dict(zip(vocabulary, range(len(vocabulary))))
def forward(self, x):
idx = []
for input_words in x:
idx.append([self.vocabulary_index[w] for w in input_words])
idx = torch.LongTensor(idx)
linear_in = self.v_embedding(autograd.Variable(idx)).mean(dim=1)
return self.linear(linear_in)
def det_row(self, words):
return autograd.Variable(
torch.LongTensor([self.vocabulary_index[w] for w in words]))
def train_model(self, batch_size, X, Y, epochs=100):
iterations = X.shape[0] // batch_size
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(self.parameters(), lr=0.1)
for epoch in range(epochs):
c = 0
for i in range(iterations):
x = X[c: c + batch_size]
y = self.det_row(Y[c: c + batch_size])
c += batch_size
y_pred = self.forward(x)
optimizer.zero_grad()
loss = criterion(y_pred, y)
loss.backward()
optimizer.step()
if epoch % 15:
print(loss.data[0])
if __name__ == '__main__':
CONTEXT_SIZE = 2 # 2 words to the left, 2 to the right
raw_text = """We are about to study the idea of a computational process. Computational processes are abstract
beings that inhabit computers. As they evolve, processes manipulate other abstract
things called data. The evolution of a process is directed by a pattern of rules
called a program. People create programs to direct processes. In effect,
we conjure the spirits of the computer with our spells.""".lower().split()
word_to_ix = {word: i for i, word in enumerate(set(raw_text))}
X = []
Y = []
for i in range(2, len(raw_text) - 2):
context = [raw_text[i - 2], raw_text[i - 1], raw_text[i + 1], raw_text[i + 2]]
target = raw_text[i]
X.append(context)
Y.append(target)
X = np.array(X)
Y = np.array(Y)
model = CBOW(embedding_size=10,
corpus=raw_text)
model.train_model(batch_size=10,
X=X,
Y=Y,
epochs=500)
|
24,474 | a5128367371ef5cecd6cff140d4cea171d8c54d0 | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 24 14:00:33 2019
@author: ROBERTO MARIO
"""
varieties={
'101':'Calmant',
'102':'Nautica',
'103':'Mast',
'104':'Argosy',
'105':'Dresden',
'106':'Compass',
'107':'Apex',
'108':'Knight Rider',
'109':'Red Rider',
'110':'Majesty',
'111':'Sheek',
'201':'Mast',
'202':'Argosy',
'203':'Red Rider',
'204':'Apex',
'205':'Nautica',
'206':'Dresden',
'207':'Knight Rider',
'208':'Calmant',
'209':'Majesty',
'210':'Compass',
'211':'Sheek',
'301':'Soil'}
print(varieties.get(str(101))) |
24,475 | 7dc66d17bda49893f2c0e137c2269b3721cdb321 | '''
Script for FSK modulation
Frequency modulation based on digital or binary information
'''
import numpy as np
import matplotlib.pyplot as plt
import sounddevice as sd
from scipy.io import wavfile
def stringToBits(string: str) -> np.array:
# Transforma a string num array de uint8
narray = np.array(list(map(ord, string)), dtype="uint8")
# converte todos os bytes em bits (array de 1's e 0's)
return np.unpackbits(narray)
def bitsToString(bits: np.array) -> str:
# Transforma o array de bits em um array de uint8
packed = np.packbits(bits)
# Converte o array de uint8 em caracteres e concatena em uma string
return str("".join(map(chr, packed)))
def randomBitArray(n):
return np.random.randint(0, 2, n)
def bitsToWave(bits, Fs=44100, baud=10):
sample_bit = int(Fs/baud)
wave = np.repeat(bits, sample_bit)
wave = 2.0 * wave - 1.0
return wave
def FSKMod(bitwave, Fs=44100, f0=1400.0, df=500.0):
time_end = len(bitwave)/Fs
t = np.linspace(0.0, time_end, len(bitwave))
fsk = np.sin(2.0*np.pi*(f0 + df*bitwave)*t)
return [t, fsk]
def playSound(t, Fs=44100):
sd.play(t, Fs)
def sincronizeMessage(s: str, INIT_STREAM='2wLQTcNgiXyP<{', END_STREAM='}>ggIVZMbi09VM') -> str:
# coloca um caractere de inicio e de final na string
return str(INIT_STREAM) + s + str(END_STREAM)
if __name__ == '__main__':
bits = stringToBits(sincronizeMessage("LJlqRK0sLItJH3dzgpoYb0g79fXs7u2dr67lxY2GYhTiiwyH7y"))
mb = bitsToWave(bits,baud=25)
t, x = FSKMod(mb)
playSound(x)
# Espera o som acabar para continuar o script
sd.wait()
# wavfile.write('hello.wav', 44100, np.int32((2**31 - 1) * x))
#
# ## Plotting Data ##
# ax_bit = plt.subplot(211)
# ax_bit.plot(t, mb)
# ax_bit.set_ylabel('Mensagem binária')
# ax_bit.grid(True)
#
# ax_mod = plt.subplot(212)
# ax_mod.plot(t, x)
# ax_mod.set_ylabel('Mensagem modulada')
# ax_mod.set_xlabel('Tempo (s)')
# ax_mod.grid(True)
#
# plt.show()
|
24,476 | 3b073b69f077fed72e93204fccb30d35177efbae | from django.db import models
from datetime import datetime
class Manager(models.Model):
name = models.CharField(max_length =200)
photo = models.ImageField(blank = True, upload_to='photos/%Y/%m/%d/')
designation = models.CharField(max_length=200)
description = models.TextField(blank = True)
phone = models.CharField(max_length=20)
email = models.EmailField(max_length=50)
is_available = models.BooleanField(default=True)
hire_date = models.DateTimeField(default = datetime.now)
def __str__(self):
return f"{self.name} is a {self.designation}"
class Worker(models.Model):
designation = [
('Plumber',('Efficient in Plumbing work')),
('electrician',('Efficient in handling electircal work')),
('House_maid',('For household activities')),
]
name = models.CharField(max_length=200)
work = models.CharField(max_length=200,choices=designation,default='House_maid')
manager = models.ForeignKey(Manager, on_delete=models.DO_NOTHING)
address = models.TextField()
phone = models.CharField(max_length=20)
is_availabe = models.BooleanField(default=True)
hire_date = models.DateTimeField(default = datetime.now)
photo = models.ImageField(blank=True)
def __str__(self):
return f"{self.name} is a {self.work}"
|
24,477 | fa121bfbd13ff98d55aa018feb3f5f00f72e3522 | '''What is the greatest product of four adjacent numbers in the same
direction (up, down, left, right, or diagonally) in the 20×20 grid?'''
from functools import reduce
matriz = []
def multiplicar(arr):
'''
Multiplica los elementos de una lista
:param arr:
:return:
'''
return reduce(lambda x, y: x* y, arr, 1)
def maxFilas():
'''
Devuelve el valor maximo de multiplicar 4 elementos consecutivos de cualquier fila
:return:
'''
resultado = 0
for fila in range(len(matriz)):
for columna in range(len(matriz[fila])-3):
#print(fila,columna)
resultado = max([multiplicar(matriz[fila][columna:columna+4]), resultado])
return resultado
def maxColumnas():
'''
Devuelve el valor maximo de multiplicar 4 elementos consecutivos de cualquier columna
:return:
'''
resultado = 0
for fila in range(len(matriz)-3):
for columna in range(len(matriz[fila])):
#print(i,j)
resultado = max([multiplicar([matriz[fila+n][columna] for n in range(4)]), resultado])
return resultado
def maxDiagonal():
'''
Devuelve el valor maximo de multiplicar 4 elementos consecutivos de cualquier diagonal
:return:
'''
resultado = 0
#Diagonal principal
for fila in range(len(matriz)-3):
for columna in range(len(matriz[fila])-3):
#print(i,j)
resultado = max([multiplicar([matriz[fila+n][columna+n] for n in range(4)]), resultado])
#Diagonal secundaria
for fila in range(len(matriz)-3):
for columna in range(3, len(matriz[fila])):
#print(i,j)
resultado = max([multiplicar([matriz[fila+n][columna-n] for n in range(4)]), resultado])
return resultado
for _ in range(20):
matriz.append(list(map(int, input().split())))
print(max((maxFilas(),maxColumnas(), maxDiagonal()))) |
24,478 | a7df0742283f34fca20374cfb2daff0364f05a32 | import sys
import json
import os.path
from PyQt5.QtWidgets import *
from PyQt5.QtCore import Qt, pyqtSlot
from PyQt5.QtGui import QIcon
import PyQt5
from shutil import copyfile, copytree
class PopUp(PyQt5.QtWidgets.QDialog):
def __init__(self):
super().__init__()
self.config_data = json.load(open('config.json'))
self.initUI()
def initUI(self):
self.setWindowTitle("Add new database")
self.center()
self.resize(500, 150)
self.show_dialog()
self.show()
def center(self):
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
def update_json_database(self):
self.config_data["databases"][self.textbox_0.text()] = {
"URL": self.textbox_1.text(),
"Workloads": [],
"UserDefinedFunctions": [],
"UserName": self.textbox_2.text(),
"Passwd": self.textbox_3.text()
}
json.dump(self.config_data, open('config.json', 'w'))
self.close()
def show_dialog(self):
self.label_0 = QLabel("Database Name:")
self.textbox_0 = QLineEdit()
self.label_1 = QLabel("Server URL:")
self.textbox_1 = QLineEdit()
self.label_2 = QLabel("User Name:")
self.textbox_2 = QLineEdit()
self.label_3 = QLabel("Password:")
self.textbox_3 = QLineEdit()
self.button_1 = QPushButton('Finished')
self.button_1.clicked.connect(self.update_json_database)
'''
self.textbox.move(20, 20)
self.textbox.resize(280, 40)
'''
self.grid_layout = QGridLayout()
self.grid_layout.addWidget(self.label_0, 0, 0)
self.grid_layout.addWidget(self.textbox_0, 0, 2)
self.grid_layout.addWidget(self.label_1, 1, 0)
self.grid_layout.addWidget(self.textbox_1, 1, 2)
self.grid_layout.addWidget(self.label_2, 2, 0)
self.grid_layout.addWidget(self.textbox_2, 2, 2)
self.grid_layout.addWidget(self.label_3, 3, 0)
self.grid_layout.addWidget(self.textbox_3, 3, 2)
self.grid_layout.addWidget(self.button_1, 4, 1)
self.setLayout(self.grid_layout)
class GUI(QMainWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.setWindowTitle("OpenIS")
self.config_data = json.load(open('config.json'))
self.center()
self.resize(500, 400)
self.add_grid_layout()
self.show()
def center(self):
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
def add_grid_layout(self):
self.w = None
self.label_1 = QLabel("select database")
self.label_2 = QLabel("select workload")
self.label_3 = QLabel("select user defined function")
self.label_4 = QLabel("select algorithms")
self.button_1 = QPushButton('add new database')
self.button_1.clicked.connect(self.add_database)
self.button_2 = QPushButton('add new workload')
self.button_2.clicked.connect(self.add_workload)
self.button_3 = QPushButton('add new user defined function')
self.button_3.clicked.connect(self.add_user_function)
self.button_4 = QPushButton('add new algorithms')
self.button_4.clicked.connect(self.add_algorithms)
self.comboBox_1 = QComboBox(self)
self.generate_combo_box(self.config_data["databases"], self.comboBox_1)
self.comboBox_1.currentTextChanged.connect(self.combo_update)
self.comboBox_2 = QComboBox(self)
if self.comboBox_1.currentText() != "":
print(self.comboBox_1.currentText())
self.generate_combo_box(self.config_data["databases"][self.comboBox_1.currentText()]["Workloads"], self.comboBox_2)
self.comboBox_3 = QComboBox(self)
if self.comboBox_1.currentText() != "":
self.generate_combo_box(self.config_data["databases"][self.comboBox_1.currentText()]["UserDefinedFunctions"], self.comboBox_3)
self.comboBox_4 = QComboBox(self)
self.generate_combo_box(self.config_data["algorithms"], self.comboBox_4)
self.button_run = QPushButton('Set Parameters')
self.button_run.clicked.connect(self.run_program)
grid_layout = QGridLayout()
grid_layout.addWidget(self.label_1, 1, 0)
grid_layout.addWidget(self.comboBox_1, 1, 1)
grid_layout.addWidget(self.button_1, 1, 3)
grid_layout.addWidget(self.label_2, 2, 0)
grid_layout.addWidget(self.comboBox_2, 2, 1)
grid_layout.addWidget(self.button_2, 2, 3)
grid_layout.addWidget(self.label_3, 3, 0)
grid_layout.addWidget(self.comboBox_3, 3, 1)
grid_layout.addWidget(self.button_3, 3, 3)
grid_layout.addWidget(self.label_4, 4, 0)
grid_layout.addWidget(self.comboBox_4, 4, 1)
grid_layout.addWidget(self.button_4, 4, 3)
grid_layout.addWidget(self.button_run, 5, 1)
layout_widget = QWidget()
layout_widget.setLayout(grid_layout)
self.setCentralWidget(layout_widget)
def combo_update(self):
if self.comboBox_1.currentText() == '':
return
self.generate_combo_box(self.config_data["databases"][self.comboBox_1.currentText()]["Workloads"], self.comboBox_2)
self.generate_combo_box(self.config_data["databases"][self.comboBox_1.currentText()]["UserDefinedFunctions"], self.comboBox_3)
@staticmethod
def generate_combo_box(data, combo):
combo.clear()
for key in data:
combo.addItem(key)
def update_first_3_combo(self):
self.generate_combo_box(self.config_data["databases"], self.comboBox_1)
self.combo_update()
def add_database(self):
self.w = PopUp()
self.w.exec_()
self.config_data = json.load(open('config.json'))
self.update_first_3_combo()
def add_workload(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
dir_name = QFileDialog.getExistingDirectory(self, "select workload directory")
if dir_name == "":
return
workload_name = dir_name.split('/')[-1].split('\\')[-1]
new_dir_name = './IndexSelectionTools/data/{}/{}'.format(self.comboBox_1.currentText(), workload_name)
if not os.path.isdir(new_dir_name):
copytree(dir_name, './IndexSelectionTools/data/{}/{}'.format(self.comboBox_1.currentText(), workload_name))
if workload_name not in self.config_data["databases"][self.comboBox_1.currentText()]["Workloads"]:
self.config_data["databases"][self.comboBox_1.currentText()]["Workloads"].append(workload_name)
json.dump(self.config_data, open('config.json', 'w'))
self.generate_combo_box(self.config_data["databases"][self.comboBox_1.currentText()]["Workloads"], self.comboBox_2)
def add_user_function(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
file_name, _ = QFileDialog.getOpenFileName(self, "select user function file", "", "All Files (*);;Java Files (*.java)", options=options)
if file_name == "":
return
class_name = file_name.split('/')[-1].split('\\')[-1].split('.')[0]
new_file_name = './IndexSelectionTools/src/eecs584/project/userfunctions/{}.java'.format(class_name)
if not os.path.isfile(new_file_name):
copyfile(file_name, new_file_name)
if class_name not in self.config_data["databases"][self.comboBox_1.currentText()]["UserDefinedFunctions"]:
self.config_data["databases"][self.comboBox_1.currentText()]["UserDefinedFunctions"].append(class_name)
json.dump(self.config_data, open('config.json', 'w'))
self.generate_combo_box(self.config_data["databases"][self.comboBox_1.currentText()]["UserDefinedFunctions"], self.comboBox_3)
def add_algorithms(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
file_name, _ = QFileDialog.getOpenFileName(self, "select algorithm file", "", "All Files (*);;Java Files (*.java)", options=options)
if file_name == "":
return
class_name = file_name.split('/')[-1].split('\\')[-1].split('.')[0]
new_file_name = './IndexSelectionTools/src/eecs584/project/algorithms/{}.java'.format(class_name)
if not os.path.isfile(new_file_name):
copyfile(file_name, new_file_name)
if class_name not in self.config_data["algorithms"]:
self.config_data["algorithms"].append(class_name)
json.dump(self.config_data, open('config.json', 'w'))
self.generate_combo_box(self.config_data["algorithms"], self.comboBox_4)
def run_program(self):
replace_file(self.comboBox_3.currentText(), self.comboBox_4.currentText(), self.comboBox_1.currentText(), self.comboBox_2.currentText())
def replace_file(user_class_name, alg_class_name, database, workload):
f = open("./IndexSelectionTools/src/eecs584/project/indexselection/Driver.java", "r")
lines = f.readlines()
for i, line in enumerate(lines):
if line.strip() == "//SPACIALMAKR, DO NOT CHANGE":
lines[i + 1] = "\t\tBasicTemplate userDefinedClass = new {}();\n".format(user_class_name)
lines[i + 2] = "\t\tBasicAlgTemplate alg = new {}();\n".format(alg_class_name)
lines[i + 3] = "\t\tdatabase = \"{}\";\n".format(database)
lines[i + 4] = "\t\tworkload = \"{}\";\n".format(workload)
lines[i + 5] = "\t\tString userClassName = \"{}\";\n".format(user_class_name)
lines[i + 6] = "\t\tString algName = \"{}\";\n".format(alg_class_name)
break
f.close()
f = open("./IndexSelectionTools/src/eecs584/project/indexselection/Driver.java", "w")
f.writelines(lines)
f.close()
if __name__ == "__main__":
app = QApplication(sys.argv)
gui = GUI()
gui.show()
sys.exit(app.exec_())
|
24,479 | e7ed38d27d2d2357a0cdd2097cd2a8decd83db70 | from django.db import models
class Product(models.Model):
name = models.CharField(max_length = 50)
description = models.CharField(max_length=255)
weight = models.IntegerField(blank=True, null=True)
prices = models.IntegerField()
cost = models.IntegerField()
category = models.CharField(max_length=50)
# Create your models here.
|
24,480 | bec3d082db27f7d56c165b24bde8070b6665bf28 | from builtins import str
from past.builtins import str
def hash_json( hash_obj, value ):
"""
Compute the hash of a parsed JSON value using the given hash object. This function does not
hash the JSON value, it hashes the object tree that is the result of parsing a string in JSON
format. Hashables (JSON objects) are hashed entry by entry in order of the lexicographical
ordering on the keys. Iterables are hashed in their inherent order.
If value or any of its children is an iterable with non-deterministic ordering of its
elements, e.g. a set, this method will yield non-deterministic results.
:param hash_obj: one of the Hash objects in hashlib, or any other object that has an update(s)
method accepting a single string.
:type value: int|str|float|Iterable[type(obj)]|Hashable[str,type(obj)]
:param value: The value to be hashed
>>> import hashlib
>>> from builtins import str
>>> def actual(x): h = hashlib.md5(); hash_json(h,x); return h.hexdigest()
>>> def expect(s): h = hashlib.md5(); h.update(s.encode('utf-8')); return h.hexdigest()
>>> actual(0) == expect('0')
True
>>> actual(0.0) == expect('0.0')
True
>>> actual(0.1) == expect('0.1')
True
>>> actual(True) == expect('true')
True
>>> actual(False) == expect('false')
True
>>> actual(u"") == expect(u'""')
True
>>> actual([]) == expect('[]')
True
>>> actual([0]) == expect('[0]')
True
>>> actual([0,1]) == expect('[0,1]')
True
>>> actual({}) == expect('{}')
True
>>> actual({'':0}) == expect('{:0}')
True
>>> actual({'0':0}) == expect('{0:0}')
True
>>> actual({'0':0,'1':1}) == expect('{0:0,1:1}')
True
>>> actual({'':[]}) == expect('{:[]}')
True
>>> actual([{}]) == expect('[{}]')
True
>>> actual({0:0})
Traceback (most recent call last):
...
ValueError: Dictionary keys must be strings, not type "int".
>>> actual(object())
Traceback (most recent call last):
...
ValueError: Type "object" is not supported.
"""
try:
items = iter(list(value.items( )))
except AttributeError:
# Must check for string before testing iterability since strings are iterable
if isinstance( value, str ):
_hash_string( hash_obj, value )
else:
try:
iterator = iter( value )
except TypeError:
# We must check for bool first since it is subclass of int (wrongly, IMHO)
if isinstance( value, bool ):
_hash_bool( hash_obj, value )
elif isinstance( value, (int, float) ):
_hash_number( hash_obj, value )
else:
raise ValueError( 'Type "%s" is not supported.' % type( value ).__name__ )
else:
_hash_iterable( hash_obj, iterator )
else:
_hash_hashable( hash_obj, items )
def _hash_number( hash_obj, n ):
hash_obj.update( str( n ).encode('utf-8') )
def _hash_bool( hash_obj, b ):
hash_obj.update( str('true' if b else 'false' ).encode('utf-8'))
def _hash_string( hash_obj, s ):
hash_obj.update( '"'.encode('utf-8') )
hash_obj.update( s.encode('utf-8') )
hash_obj.update( '"'.encode('utf-8') )
def _hash_iterable( hash_obj, items ):
hash_obj.update( '['.encode('utf-8') )
try:
item = next( items )
hash_json( hash_obj, item )
while True:
item = next( items )
hash_obj.update( ','.encode('utf-8') )
hash_json( hash_obj, item )
except StopIteration:
pass
hash_obj.update( ']'.encode('utf-8') )
def _hash_hashable( hash_obj, items ):
items = iter( sorted( items ) )
hash_obj.update( '{'.encode('utf-8') )
try:
item = next( items )
_hash_hashable_item( hash_obj, item )
while True:
item = next( items )
hash_obj.update( ','.encode('utf-8') )
_hash_hashable_item( hash_obj, item )
except StopIteration:
pass
hash_obj.update( '}'.encode('utf-8') )
def _hash_hashable_item( hash_obj, k_v ):
(k, v) = k_v
if isinstance( k, str ):
hash_obj.update( k.encode('utf-8') )
hash_obj.update( ':'.encode('utf-8') )
hash_json( hash_obj, v )
else:
raise ValueError( 'Dictionary keys must be strings, not type "%s".' % type( k ).__name__ )
|
24,481 | cf66013f2f999c17b5b6165624ee45902d23cd92 |
def powerset(iterable):
"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
s = list(iterable)
return from_iterable(combinations(s, r) for r in range(len(s)+1))
def combinations(iterable, r):
# combinations('ABCD', 2) --> AB AC AD BC BD CD
# combinations(range(4), 3) --> 012 013 023 123
pool = tuple(iterable)
n = len(pool)
if r > n:
return
indices = range(r)
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != i + n - r:
break
else:
return
indices[i] += 1
for j in range(i+1, r):
indices[j] = indices[j-1] + 1
yield tuple(pool[i] for i in indices)
def from_iterable(iterables):
# chain.from_iterable(['ABC', 'DEF']) --> A B C D E F
for it in iterables:
for element in it:
yield element
def process(s):
all_nums = map(int, s.split())
num_set = all_nums[1:]
ps = powerset(num_set)
d = {}
for s in ps:
the_sum = sum(s)
if the_sum in d:
return "\n" + ' '.join([str(e) for e in s]) + "\n" + ' '.join([str(e) for e in d[the_sum]])
else:
d[the_sum] = s
return
#print process('20 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20')
#print process('20 120 266 858 1243 1657 1771 2328 2490 2665 2894 3117 4210 4454 4943 5690 6170 7048 7125 9512 9600')
number_of_cases = int(raw_input())
for case_number in xrange(1, number_of_cases+1):
s = raw_input()
result = process(s)
print "Case #%d: %s" % (case_number, result)
case_number += 1 |
24,482 | 9a9f8a37905fa85e3ae04191f2727a27f171685d | import requests
import json
f = open('x-rapidapi-key.json')
data = json.load(f)
tournament_id = '1143'
url = data['api-url-endpoint'] + data['api-matches-by-tournament-id-endpoint'] + tournament_id
headers = {
'x-rapidapi-key': data['x-rapidapi-key'],
'x-rapidapi-host': data['x-rapidapi-host']
}
response = requests.request("GET", url, headers=headers)
if response.status_code == 200:
with open(f'matches-{tournament_id}.json', 'wb') as outf:
outf.write(response.content)
else:
print(f'Error ocurred with status code: {response.status_code}') |
24,483 | cef32ca7166842ee3f2d9e84b6fe2ef775a6fbbe | #to work save it on board as main.py
import network, ntptime
from machine import RTC
DATETIME_ELEMENTS = {
"year": 0,
"month": 1,
"day": 2,
"day_of_week": 3,
"hour": 4,
"minute": 5,
"second": 6,
"millisecond": 7,
}
def connect_to_wifi(wlan, ssid, password):
if not wlan.isconnected():
print("Connecting ....")
wlan.connect(ssid, password)
while not wlan.isconnected():
pass
def set_datetime_element(rtc, datetime_element, value):
date = list(rtc.datetime())
date[DATETIME_ELEMENTS[datetime_element]] = value
rtc.datetime(date)
wlan = network.WLAN(network.STA_IF)
wlan.active(True)
connect_to_wifi(wlan, "kilimanjarro", "kasiapaweldaciamelamerc")
rtc = RTC()
ntptime.settime()
print(rtc.datetime())
set_datetime_element(rtc, "month", 6)
set_datetime_element(rtc, "day", 13)
set_datetime_element(rtc, "hour", 13)
print(rtc.datetime()) |
24,484 | fdda18de2d71bf2216d4632893b191eebca90fec | __author__ = 'Copyright (c) 2013 Alan Yorinks All rights reserved.'
"""
Copyright (c) 2013-15 Alan Yorinks All rights reserved.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public
License as published by the Free Software Foundation; either
version 3 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import threading
import time
class PyMataCommandHandler(threading.Thread):
"""
This class handles all data interchanges with Firmata
The receive loop runs in its own thread.
Messages to be sent to Firmata are queued through a deque to allow for priority
messages to take precedence. The deque is checked within the receive loop for any
outgoing messages.
There is no blocking in either communications direction.
There is blocking when accessing the data tables through the _data_lock
"""
# the following defines are from Firmata.h
# message command bytes (128-255/ 0x80- 0xFF)
# from this client to firmata
MSG_CMD_MIN = 0x80 # minimum value for a message from firmata
REPORT_ANALOG = 0xC0 # enable analog input by pin #
REPORT_DIGITAL = 0xD0 # enable digital input by port pair
SET_PIN_MODE = 0xF4 # set a pin to INPUT/OUTPUT/PWM/etc
START_SYSEX = 0xF0 # start a MIDI Sysex message
END_SYSEX = 0xF7 # end a MIDI Sysex message
SYSTEM_RESET = 0xFF # reset from MIDI
# messages from firmata
DIGITAL_MESSAGE = 0x90 # send or receive data for a digital pin
ANALOG_MESSAGE = 0xE0 # send or receive data for a PWM configured pin
REPORT_VERSION = 0xF9 # report protocol version
# user defined SYSEX commands
ENCODER_CONFIG = 0x20 # create and enable encoder object
TONE_PLAY = 0x5F # play a tone at a specified frequency and duration
SONAR_CONFIG = 0x60 # configure pins to control a Ping type sonar distance device
ENCODER_DATA = 0x21 # current encoder position data
SONAR_DATA = 0x61 # distance data returned
SERVO_CONFIG = 0x70 # set servo pin and max and min angles
STRING_DATA = 0x71 # a string message with 14-bits per char
STEPPER_DATA = 0x72 # Stepper motor command
I2C_REQUEST = 0x76 # send an I2C read/write request
I2C_REPLY = 0x77 # a reply to an I2C read request
I2C_CONFIG = 0x78 # config I2C settings such as delay times and power pins
REPORT_FIRMWARE = 0x79 # report name and version of the firmware
SAMPLING_INTERVAL = 0x7A # modify the sampling interval
EXTENDED_ANALOG = 0x6F # analog write (PWM, Servo, etc) to any pin
PIN_STATE_QUERY = 0x6D # ask for a pin's current mode and value
PIN_STATE_RESPONSE = 0x6E # reply with pin's current mode and value
CAPABILITY_QUERY = 0x6B # ask for supported modes and resolution of all pins
CAPABILITY_RESPONSE = 0x6C # reply with supported modes and resolution
ANALOG_MAPPING_QUERY = 0x69 # ask for mapping of analog to pin numbers
ANALOG_MAPPING_RESPONSE = 0x6A # reply with analog mapping data
# reserved values
SYSEX_NON_REALTIME = 0x7E # MIDI Reserved for non-realtime messages
SYSEX_REALTIME = 0x7F # MIDI Reserved for realtime messages
# The response tables hold response information for all pins
# Each table is a table of entries for each pin, which consists of the pin mode, its last value from firmata
# and a callback function that the user attached to the pin
# This is a table that stores analog pin modes and data
# each entry represents ia mode (INPUT or OUTPUT), and its last current value
analog_response_table = []
# This is a table that stores digital pin modes and data
# each entry represents its mode (INPUT or OUTPUT, PWM, SERVO, ENCODER), and its last current value
digital_response_table = []
# The analog and digital latch tables will store "latched" data for input pins.
# If a pin is armed, the latest value will be stored and maintained until
# the data is read, and the data is cleared from the latch and the latch rearmed.
# The table consists of a list of lists sized by the number of pins for the board. It is ordered by pin number
# and each list entry contains a latch state, a value and a date stamp when latched.
# An armed state = 0 and a latched state = 1
# analog_latch_table entry = [latched_state, threshold_type, threshold_value, latched_data, time_stamp]
# digital_latch_table_entry = [latched_state, threshold_type, latched_data, time_stamp]
analog_latch_table = []
digital_latch_table = []
# index into latch tables
LATCH_STATE = 0
LATCHED_THRESHOLD_TYPE = 1
ANALOG_LATCH_DATA_TARGET = 2
ANALOG_LATCHED_DATA = 3
ANALOG_TIME_STAMP = 4
ANALOG_LATCH_CALLBACK = 5
DIGITAL_LATCHED_DATA = 2
DIGITAL_TIME_STAMP = 3
DIGITAL_LATCH_CALLBACK = 4
# latch states
LATCH_IGNORE = 0 # this pin will be ignored for latching
LATCH_ARMED = 1 # When the next pin value change is received for this pin, if it matches the latch criteria
# the data will be latched
LATCH_LATCHED = 2 # data has been latched. Read the data to re-arm the latch
# latch threshold types
DIGITAL_LATCH_LOW = 0 # for digital pins
DIGITAL_LATCH_HIGH = 1 # for digital pins
ANALOG_LATCH_GT = 2 # greater than for analog
ANALOG_LATCH_LT = 3 # less than for analog
ANALOG_LATCH_GTE = 4 # greater than or equal to for analog
ANALOG_LATCH_LTE = 5 # less than or equal to for analog
# These values are indexes into the response table entries
RESPONSE_TABLE_MODE = 0
RESPONSE_TABLE_PIN_DATA_VALUE = 1
RESPONSE_TABLE_CALLBACK = 2
# These values are the index into the data passed by _arduino and used to reassemble integer values
MSB = 2
LSB = 1
# This is a map that allows the look up of command handler methods using a command as the key.
# This is populated in the run method after the python interpreter sees all of the command handler method
# defines (python does not have forward referencing)
# The "key" is the command, and the value contains is a list containing the method name and the number of
# parameter bytes that the method will require to process the message (in some cases the value is unused)
command_dispatch = {}
# this deque is used by the methods that assemble messages to be sent to Firmata. The deque is filled outside of
# of the message processing loop and emptied within the loop.
command_deque = None
# firmata version information - saved as a list - [major, minor]
firmata_version = []
# firmata firmware version information saved as a list [major, minor, file_name]
firmata_firmware = []
# a lock to protect the data tables when they are being accessed
data_lock = None
# total number of pins for the discovered board
total_pins_discovered = 0
# total number of analog pins for the discovered board
number_of_analog_pins_discovered = 0
# The i2c_map will contain keys of i2c device addresses, and an associated list.
# The associated list will contain 2 elements:
# 1. A callback reference. This reference will be set to None if no callback was registered.
# 2. Data returned from a an i2c read request.
i2c_map = {}
# the active_sonar_map maps the sonar trigger pin number (the key) to the current data value returned
# if a callback was specified, it is stored in the map as well.
# an entry in the map consists of:
# pin: [callback,[current_data_returned]]
active_sonar_map = {}
# the stepper library version number.
stepper_library_version = 0
def __init__(self, pymata):
"""
constructor for CommandHandler class
@param pymata: A reference to the pymata instance.
"""
# reference pointer to pymata
self.pymata = pymata
# this list contains the results of the last pin query
self.last_pin_query_results = []
# this stores the results of a capability request
self.capability_query_results = []
# this stores the results of an analog mapping query
self.analog_mapping_query_results = []
self.total_pins_discovered = 0
self.number_of_analog_pins_discovered = 0
threading.Thread.__init__(self)
self.daemon = True
self.stop_event = threading.Event()
def stop(self):
self.stop_event.set()
def is_stopped(self):
return self.stop_event.is_set()
def auto_discover_board(self, verbose):
"""
This method will allow up to 30 seconds for discovery (communicating with) an Arduino board
and then will determine a pin configuration table for the board.
@return: True if board is successfully discovered or False upon timeout
"""
# get current time
start_time = time.time()
# wait for up to 30 seconds for a successful capability query to occur
while len(self.analog_mapping_query_results) == 0:
if time.time() - start_time > 30:
return False
# keep sending out a capability query until there is a response
self.send_sysex(self.ANALOG_MAPPING_QUERY, None)
time.sleep(.1)
if verbose:
print("Board initialized in %d seconds" % (time.time() - start_time))
for pin in self.analog_mapping_query_results:
self.total_pins_discovered += 1
# non analog pins will be marked as IGNORE
if pin != self.pymata.IGNORE:
self.number_of_analog_pins_discovered += 1
if verbose:
print('Total Number of Pins Detected = %d' % self.total_pins_discovered)
print('Total Number of Analog Pins Detected = %d' % self.number_of_analog_pins_discovered)
# response table initialization
# for each pin set the mode to input and the last read data value to zero
for pin in range(0, self.total_pins_discovered):
response_entry = [self.pymata.INPUT, 0, None]
self.digital_response_table.append(response_entry)
for pin in range(0, self.number_of_analog_pins_discovered):
response_entry = [self.pymata.INPUT, 0, None]
self.analog_response_table.append(response_entry)
# set up latching tables
for pin in range(0, self.total_pins_discovered):
digital_latch_table_entry = [0, 0, 0, 0, None]
self.digital_latch_table.append(digital_latch_table_entry)
for pin in range(0, self.number_of_analog_pins_discovered):
analog_latch_table_entry = [0, 0, 0, 0, 0, None]
self.analog_latch_table.append(analog_latch_table_entry)
return True
def report_version(self, data):
"""
This method processes the report version message, sent asynchronously by Firmata when it starts up
or after refresh_report_version() is called
Use the api method api_get_version to retrieve this information
@param data: Message data from Firmata
@return: No return value.
"""
self.firmata_version.append(data[0]) # add major
self.firmata_version.append(data[1]) # add minor
def set_analog_latch(self, pin, threshold_type, threshold_value, cb):
"""
This method "arms" a pin to allow data latching for the pin.
@param pin: Analog pin number (value following an 'A' designator, i.e. A5 = 5
@param threshold_type: ANALOG_LATCH_GT | ANALOG_LATCH_LT | ANALOG_LATCH_GTE | ANALOG_LATCH_LTE
@param threshold_value: numerical value
@param cb: User provided callback function
"""
with self.pymata.data_lock:
self.analog_latch_table[pin] = [self.LATCH_ARMED, threshold_type, threshold_value, 0, 0, cb]
def set_digital_latch(self, pin, threshold_type, cb):
"""
This method "arms" a pin to allow data latching for the pin.
@param pin: digital pin number
@param threshold_type: DIGITAL_LATCH_HIGH | DIGITAL_LATCH_LOW
@param cb: User provided callback function
"""
with self.pymata.data_lock:
self.digital_latch_table[pin] = [self.LATCH_ARMED, threshold_type, 0, 0, cb]
def get_analog_latch_data(self, pin):
"""
This method reads the analog latch table for the specified pin and returns a list that contains:
[latch_state, latched_data, and time_stamp].
If the latch state is latched, the entry in the table is cleared
@param pin: pin number
@return: [latch_state, latched_data, and time_stamp]
"""
with self.pymata.data_lock:
pin_data = self.analog_latch_table[pin]
current_latch_data = [pin,
pin_data[self.LATCH_STATE],
pin_data[self.ANALOG_LATCHED_DATA],
pin_data[self.ANALOG_TIME_STAMP],
pin_data[self.ANALOG_LATCH_CALLBACK]]
# if this is latched data, clear the latch table entry for this pin
if pin_data[self.LATCH_STATE] == self.LATCH_LATCHED:
self.analog_latch_table[pin] = [0, 0, 0, 0, 0, None]
return current_latch_data
def get_digital_latch_data(self, pin):
"""
This method reads the digital latch table for the specified pin and returns a list that contains:
[latch_state, latched_data, and time_stamp].
If the latch state is latched, the entry in the table is cleared
@param pin: pin number
@return: [latch_state, latched_data, and time_stamp]
"""
with self.pymata.data_lock:
pin_data = self.digital_latch_table[pin]
current_latch_data = [pin,
pin_data[self.LATCH_STATE],
pin_data[self.DIGITAL_LATCHED_DATA],
pin_data[self.DIGITAL_TIME_STAMP],
pin_data[self.DIGITAL_LATCH_CALLBACK]]
if pin_data[self.LATCH_STATE] == self.LATCH_LATCHED:
self.digital_latch_table[pin] = [0, 0, 0, 0, None]
return current_latch_data
def report_firmware(self, data):
"""
This method processes the report firmware message, sent asynchronously by Firmata when it starts up
or after refresh_report_firmware() is called
Use the api method api_get_firmware_version to retrieve this information
@param data: Message data from Firmata
@return: No return value.
"""
self.firmata_firmware.append(data[0]) # add major
self.firmata_firmware.append(data[1]) # add minor
# extract the file name string from the message
# file name is in bytes 2 to the end
name_data = data[2:]
# constructed file name
file_name = []
# the file name is passed in with each character as 2 bytes, the high order byte is equal to 0
# so skip over these zero bytes
for i in name_data[::2]:
file_name.append(chr(i))
# add filename to tuple
self.firmata_firmware.append("".join(file_name))
def analog_message(self, data):
"""
This method handles the incoming analog data message.
It stores the data value for the pin in the analog response table.
If a callback function was associated with this pin, the callback function is invoked.
This method also checks to see if latching was requested for the pin. If the latch criteria was met,
the latching table is updated. If a latching callback function was provided by the user, a latching
notification callback message is sent to the user in place of updating the latching table.
@param data: Message data from Firmata
@return: No return value.
"""
with self.pymata.data_lock:
# hold on to the previous value
previous_value = \
self.analog_response_table[data[self.RESPONSE_TABLE_MODE]][self.RESPONSE_TABLE_PIN_DATA_VALUE]
self.analog_response_table[data[self.RESPONSE_TABLE_MODE]][self.RESPONSE_TABLE_PIN_DATA_VALUE] \
= (data[self.MSB] << 7) + data[self.LSB]
pin = data[0]
pin_response_data_data = self.analog_response_table[pin]
value = pin_response_data_data[self.RESPONSE_TABLE_PIN_DATA_VALUE]
# check to see if there is a callback function attached to this pin
callback = self.analog_response_table[data[self.RESPONSE_TABLE_MODE]][self.RESPONSE_TABLE_CALLBACK]
# send the pin mode, pin number, and current data value
if callback is not None:
if value != previous_value:
# has the value changed since the last report
callback([self.pymata.ANALOG, pin, value])
# check if data is to be latched
# get the analog latching table entry for this pin
latching_entry = self.analog_latch_table[pin]
if latching_entry[self.LATCH_STATE] == self.LATCH_ARMED:
# Has the latching criteria been met
if latching_entry[self.LATCHED_THRESHOLD_TYPE] == self.ANALOG_LATCH_GT:
if value > latching_entry[self.ANALOG_LATCH_DATA_TARGET]:
if latching_entry[self.ANALOG_LATCH_CALLBACK] is not None:
self.analog_latch_table[pin] = [0, 0, 0, 0, 0, None]
latching_entry[self.ANALOG_LATCH_CALLBACK]([self.pymata.ANALOG | self.pymata.LATCH_MODE,
pin, value, time.time()])
else:
updated_latch_entry = latching_entry
updated_latch_entry[self.LATCH_STATE] = self.LATCH_LATCHED
updated_latch_entry[self.ANALOG_LATCHED_DATA] = value
# time stamp it
updated_latch_entry[self.ANALOG_TIME_STAMP] = time.time()
self.analog_latch_table[pin] = updated_latch_entry
else:
pass # haven't hit target
elif latching_entry[self.LATCHED_THRESHOLD_TYPE] == self.ANALOG_LATCH_GTE:
if value >= latching_entry[self.ANALOG_LATCH_DATA_TARGET]:
if latching_entry[self.ANALOG_LATCH_CALLBACK] is not None:
self.analog_latch_table[pin] = [0, 0, 0, 0, 0, None]
latching_entry[self.ANALOG_LATCH_CALLBACK]([self.pymata.ANALOG | self.pymata.LATCH_MODE,
pin, value, time.time()])
else:
updated_latch_entry = latching_entry
updated_latch_entry[self.LATCH_STATE] = self.LATCH_LATCHED
updated_latch_entry[self.ANALOG_LATCHED_DATA] = value
# time stamp it
updated_latch_entry[self.ANALOG_TIME_STAMP] = time.time()
self.analog_latch_table[pin] = updated_latch_entry
else:
pass # haven't hit target:
elif latching_entry[self.LATCHED_THRESHOLD_TYPE] == self.ANALOG_LATCH_LT:
if value < latching_entry[self.ANALOG_LATCH_DATA_TARGET]:
if latching_entry[self.ANALOG_LATCH_CALLBACK] is not None:
latching_entry[self.ANALOG_LATCH_CALLBACK]([self.pymata.ANALOG | self.pymata.LATCH_MODE,
pin, value, time.time()])
self.analog_latch_table[pin] = [0, 0, 0, 0, 0, None]
else:
updated_latch_entry = latching_entry
updated_latch_entry[self.LATCH_STATE] = self.LATCH_LATCHED
updated_latch_entry[self.ANALOG_LATCHED_DATA] = value
# time stamp it
updated_latch_entry[self.ANALOG_TIME_STAMP] = time.time()
self.analog_latch_table[pin] = updated_latch_entry
else:
pass # haven't hit target:
elif latching_entry[self.LATCHED_THRESHOLD_TYPE] == self.ANALOG_LATCH_LTE:
if value <= latching_entry[self.ANALOG_LATCH_DATA_TARGET]:
if latching_entry[self.ANALOG_LATCH_CALLBACK] is not None:
latching_entry[self.ANALOG_LATCH_CALLBACK]([self.pymata.ANALOG | self.pymata.LATCH_MODE,
pin, value, time.time()])
self.analog_latch_table[pin] = [0, 0, 0, 0, 0, None]
else:
updated_latch_entry = latching_entry
updated_latch_entry[self.LATCH_STATE] = self.LATCH_LATCHED
updated_latch_entry[self.ANALOG_LATCHED_DATA] = value
# time stamp it
updated_latch_entry[self.ANALOG_TIME_STAMP] = time.time()
self.analog_latch_table[pin] = updated_latch_entry
else:
pass # haven't hit target:
else:
pass
def digital_message(self, data):
"""
This method handles the incoming digital message.
It stores the data values in the digital response table.
Data is stored for all 8 bits of a digital port
@param data: Message data from Firmata
@return: No return value.
"""
port = data[0]
port_data = (data[self.MSB] << 7) + data[self.LSB]
# set all the pins for this reporting port
# get the first pin number for this report
pin = port * 8
for pin in range(pin, min(pin + 8, self.total_pins_discovered)):
# shift through all the bit positions and set the digital response table
with self.pymata.data_lock:
# look at the previously stored value for this pin
prev_data = self.digital_response_table[pin][self.RESPONSE_TABLE_PIN_DATA_VALUE]
# get the current value
self.digital_response_table[pin][self.RESPONSE_TABLE_PIN_DATA_VALUE] = port_data & 0x01
# if the values differ and callback is enabled for the pin, then send out the callback
if prev_data != port_data & 0x01:
callback = self.digital_response_table[pin][self.RESPONSE_TABLE_CALLBACK]
if callback:
callback([self.pymata.DIGITAL, pin,
self.digital_response_table[pin][self.RESPONSE_TABLE_PIN_DATA_VALUE]])
# determine if the latch data table needs to be updated for each pin
latching_entry = self.digital_latch_table[pin]
if latching_entry[self.LATCH_STATE] == self.LATCH_ARMED:
if latching_entry[self.LATCHED_THRESHOLD_TYPE] == self.DIGITAL_LATCH_LOW:
if (port_data & 0x01) == 0:
if latching_entry[self.DIGITAL_LATCH_CALLBACK] is not None:
self.digital_latch_table[pin] = [0, 0, 0, 0, None]
latching_entry[self.DIGITAL_LATCH_CALLBACK](
[self.pymata.OUTPUT | self.pymata.LATCH_MODE,
pin, 0, time.time()])
else:
updated_latch_entry = latching_entry
updated_latch_entry[self.LATCH_STATE] = self.LATCH_LATCHED
updated_latch_entry[self.DIGITAL_LATCHED_DATA] = self.DIGITAL_LATCH_LOW
# time stamp it
updated_latch_entry[self.DIGITAL_TIME_STAMP] = time.time()
else:
pass
elif latching_entry[self.LATCHED_THRESHOLD_TYPE] == self.DIGITAL_LATCH_HIGH:
if port_data & 0x01:
if latching_entry[self.DIGITAL_LATCH_CALLBACK] is not None:
self.digital_latch_table[pin] = [0, 0, 0, 0, None]
latching_entry[self.DIGITAL_LATCH_CALLBACK](
[self.pymata.OUTPUT | self.pymata.LATCH_MODE,
pin, 1, time.time()])
else:
updated_latch_entry = latching_entry
updated_latch_entry[self.LATCH_STATE] = self.LATCH_LATCHED
updated_latch_entry[self.DIGITAL_LATCHED_DATA] = self.DIGITAL_LATCH_HIGH
# time stamp it
updated_latch_entry[self.DIGITAL_TIME_STAMP] = time.time()
else:
pass
else:
pass
# get the next data bit
port_data >>= 1
def encoder_data(self, data):
"""
This method handles the incoming encoder data message and stores
the data in the digital response table.
@param data: Message data from Firmata
@return: No return value.
"""
prev_val = self.digital_response_table[data[self.RESPONSE_TABLE_MODE]][self.RESPONSE_TABLE_PIN_DATA_VALUE]
val = int((data[self.MSB] << 7) + data[self.LSB])
# set value so that it shows positive and negative values
if val > 8192:
val -= 16384
pin = data[0]
with self.pymata.data_lock:
self.digital_response_table[data[self.RESPONSE_TABLE_MODE]][self.RESPONSE_TABLE_PIN_DATA_VALUE] = val
if prev_val != val:
callback = self.digital_response_table[pin][self.RESPONSE_TABLE_CALLBACK]
if callback is not None:
callback([self.pymata.ENCODER, pin,
self.digital_response_table[pin][self.RESPONSE_TABLE_PIN_DATA_VALUE]])
def sonar_data(self, data):
"""
This method handles the incoming sonar data message and stores
the data in the response table.
@param data: Message data from Firmata
@return: No return value.
"""
val = int((data[self.MSB] << 7) + data[self.LSB])
pin_number = data[0]
with self.pymata.data_lock:
sonar_pin_entry = self.active_sonar_map[pin_number]
# also write it into the digital response table
self.digital_response_table[data[self.RESPONSE_TABLE_MODE]][self.RESPONSE_TABLE_PIN_DATA_VALUE] = val
# send data through callback if there is a callback function for the pin
if sonar_pin_entry[0] is not None:
# check if value changed since last reading
if sonar_pin_entry[1] != val:
self.active_sonar_map[pin_number][0]([self.pymata.SONAR, pin_number, val])
# update the data in the table with latest value
sonar_pin_entry[1] = val
self.active_sonar_map[pin_number] = sonar_pin_entry
def get_analog_response_table(self):
"""
This method returns the entire analog response table to the caller
@return: The analog response table.
"""
with self.pymata.data_lock:
data = self.analog_response_table
return data
def get_digital_response_table(self):
"""
This method returns the entire digital response table to the caller
@return: The digital response table.
"""
with self.pymata.data_lock:
data = self.digital_response_table
return data
def send_sysex(self, sysex_command, sysex_data=None):
"""
This method will send a Sysex command to Firmata with any accompanying data
@param sysex_command: sysex command
@param sysex_data: data for command
@return : No return value.
"""
if not sysex_data:
sysex_data = []
# convert the message command and data to characters
sysex_message = chr(self.START_SYSEX)
sysex_message += chr(sysex_command)
if len(sysex_data):
for d in sysex_data:
sysex_message += chr(d)
sysex_message += chr(self.END_SYSEX)
for data in sysex_message:
self.pymata.transport.write(data)
def send_command(self, command):
"""
This method is used to transmit a non-sysex command.
@param command: Command to send to firmata includes command + data formatted by caller
@return : No return value.
"""
send_message = ""
for i in command:
send_message += chr(i)
#send_message += bytes(i)
for data in send_message:
self.pymata.transport.write(data)
def system_reset(self):
"""
Send the reset command to the Arduino.
It resets the response tables to their initial values
@return: No return value
"""
data = chr(self.SYSTEM_RESET)
self.pymata.transport.write(data)
# response table re-initialization
# for each pin set the mode to input and the last read data value to zero
with self.pymata.data_lock:
# remove all old entries from existing tables
for _ in range(len(self.digital_response_table)):
self.digital_response_table.pop()
for _ in range(len(self.analog_response_table)):
self.analog_response_table.pop()
# reinitialize tables
for pin in range(0, self.total_pins_discovered):
response_entry = [self.pymata.INPUT, 0]
self.digital_response_table.append(response_entry)
for pin in range(0, self.number_of_analog_pins_discovered):
response_entry = [self.pymata.INPUT, 0]
self.analog_response_table.append(response_entry)
#noinspection PyMethodMayBeStatic
# keeps pycharm happy
def _string_data(self, data):
"""
This method handles the incoming string data message from Firmata.
The string is printed to the console
@param data: Message data from Firmata
@return: No return value.s
"""
print("_string_data:")
string_to_print = []
for i in data[::2]:
string_to_print.append(chr(i))
print(string_to_print)
def i2c_reply(self, data):
"""
This method receives replies to i2c_read requests. It stores the data for each i2c device
address in a dictionary called i2c_map. The data is retrieved via a call to i2c_get_read_data()
in pymata.py
It a callback was specified in pymata.i2c_read, the raw data is sent through the callback
@param data: raw data returned from i2c device
"""
reply_data = []
address = (data[0] & 0x7f) + (data[1] << 7)
register = data[2] & 0x7f + data[3] << 7
reply_data.append(register)
for i in range(4, len(data), 2):
data_item = (data[i] & 0x7f) + (data[i + 1] << 7)
reply_data.append(data_item)
# retrieve the data entry for this address from the i2c map
if address in self.i2c_map:
i2c_data = self.i2c_map.get(address, None)
i2c_data[1] = reply_data
self.i2c_map[address] = i2c_data
# is there a call back for this entry?
# if yes, return a list of bytes through the callback
if i2c_data[0] is not None:
i2c_data[0]([self.pymata.I2C, address, reply_data])
def capability_response(self, data):
"""
This method handles a capability response message and stores the results to be retrieved
via get_capability_query_results() in pymata.py
@param data: raw capability data
"""
self.capability_query_results = data
def pin_state_response(self, data):
"""
This method handles a pin state response message and stores the results to be retrieved
via get_pin_state_query_results() in pymata.py
@param data: raw pin state data
"""
self.last_pin_query_results = data
def analog_mapping_response(self, data):
"""
This method handles an analog mapping query response message and stores the results to be retrieved
via get_analog_mapping_request_results() in pymata.py
@param data: raw analog mapping data
"""
self.analog_mapping_query_results = data
def stepper_version_response(self, data):
"""
This method handles a stepper library version message sent from the Arduino
"""
self.stepper_library_version = (data[0] & 0x7f) + (data[1] << 7)
def run(self):
"""
This method starts the thread that continuously runs to receive and interpret
messages coming from Firmata. This must be the last method in this file
It also checks the deque for messages to be sent to Firmata.
"""
# To add a command to the command dispatch table, append here.
self.command_dispatch.update({self.REPORT_VERSION: [self.report_version, 2]})
self.command_dispatch.update({self.REPORT_FIRMWARE: [self.report_firmware, 1]})
self.command_dispatch.update({self.ANALOG_MESSAGE: [self.analog_message, 2]})
self.command_dispatch.update({self.DIGITAL_MESSAGE: [self.digital_message, 2]})
self.command_dispatch.update({self.ENCODER_DATA: [self.encoder_data, 3]})
self.command_dispatch.update({self.SONAR_DATA: [self.sonar_data, 3]})
self.command_dispatch.update({self.STRING_DATA: [self._string_data, 2]})
self.command_dispatch.update({self.I2C_REPLY: [self.i2c_reply, 2]})
self.command_dispatch.update({self.CAPABILITY_RESPONSE: [self.capability_response, 2]})
self.command_dispatch.update({self.PIN_STATE_RESPONSE: [self.pin_state_response, 2]})
self.command_dispatch.update({self.ANALOG_MAPPING_RESPONSE: [self.analog_mapping_response, 2]})
self.command_dispatch.update({self.STEPPER_DATA: [self.stepper_version_response, 2]})
while not self.is_stopped():
if len(self.pymata.command_deque):
# get next byte from the deque and process it
data = self.pymata.command_deque.popleft()
# this list will be populated with the received data for the command
command_data = []
# process sysex commands
if data == self.START_SYSEX:
# next char is the actual sysex command
# wait until we can get data from the deque
while len(self.pymata.command_deque) == 0:
pass
sysex_command = self.pymata.command_deque.popleft()
# retrieve the associated command_dispatch entry for this command
dispatch_entry = self.command_dispatch.get(sysex_command)
# get a "pointer" to the method that will process this command
method = dispatch_entry[0]
# now get the rest of the data excluding the END_SYSEX byte
end_of_sysex = False
while not end_of_sysex:
# wait for more data to arrive
while len(self.pymata.command_deque) == 0:
pass
data = self.pymata.command_deque.popleft()
if data != self.END_SYSEX:
command_data.append(data)
else:
end_of_sysex = True
# invoke the method to process the command
method(command_data)
# go to the beginning of the loop to process the next command
continue
#is this a command byte in the range of 0x80-0xff - these are the non-sysex messages
elif 0x80 <= data <= 0xff:
# look up the method for the command in the command dispatch table
# for the digital reporting the command value is modified with port number
# the handler needs the port to properly process, so decode that from the command and
# place in command_data
if 0x90 <= data <= 0x9f:
port = data & 0xf
command_data.append(port)
data = 0x90
# the pin number for analog data is embedded in the command so, decode it
elif 0xe0 <= data <= 0xef:
pin = data & 0xf
command_data.append(pin)
data = 0xe0
else:
pass
dispatch_entry = self.command_dispatch.get(data)
# this calls the method retrieved from the dispatch table
method = dispatch_entry[0]
# get the number of parameters that this command provides
num_args = dispatch_entry[1]
#look at the number of args that the selected method requires
# now get that number of bytes to pass to the called method
for i in range(num_args):
while len(self.pymata.command_deque) == 0:
pass
data = self.pymata.command_deque.popleft()
command_data.append(data)
#go execute the command with the argument list
method(command_data)
# go to the beginning of the loop to process the next command
continue
|
24,485 | c7135dca5bcf85456ddf065d8cd05ca4a90b9b80 | from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
from pca_new import pca
from PCA import pca_book
imX = np.array(Image.open('CVwithPy/test.jpg').convert('L'))
n,m = imX.shape[0:2]
points = []
for i in range(n):
for j in range(m):
if imX[i,j] < 128:
points.append([float(j),float(n)-float(i)])
imX = np.mat(points).T
print 'im_X=',imX,'shape=',imX.shape
low_X,EV_main,mean_X = pca(imX)
recon_X = np.dot(EV_main,low_X) + mean_X
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(imX[0].A[0],imX[1].A[0],s=1,alpha=0.5)
ax.scatter(recon_X[0].A[0], recon_X[1].A[0],marker='o',s=100,c='blue',edgecolors='white')
plt.show() |
24,486 | 9cb3fbde26a12b9682ace939dacf9c7968418e99 | """Data utilities."""
import torch
from torch.autograd import Variable
import operator
import json
def construct_vocab(lines, vocab_size):
"""Construct a vocabulary from tokenized lines."""
vocab = {}
for line in lines:
for word in line:
if word not in vocab:
vocab[word] = 1
else:
vocab[word] += 1
word2id = {}
id2word = {}
word2id['<pad>'] = 0
word2id['<unk>'] = 1
id2word[0] = '<pad>'
id2word[1] = '<pad>'
sorted_word2id = sorted(
vocab.items(),
key=operator.itemgetter(1),
reverse=True
)
sorted_words = [x[0] for x in sorted_word2id[:vocab_size]]
for ind, word in enumerate(sorted_words):
word2id[word] = ind + 2
for ind, word in enumerate(sorted_words):
id2word[ind + 2] = word
return word2id, id2word
def convert_to_tensor(batch, word2ind):
"""Prepare minibatch."""
lens = [len(line) for line in batch]
max_len = lens[-1]
input_lines = [
[word2ind[w] if w in word2ind else word2ind['<unk>'] for w in line] +
[word2ind['<pad>']] * (max_len - len(line))
for line in batch
]
#mask = [
# ([1] * (l - 1)) + ([0] * (max_len - l))
# for l in lens
#]
tensor_batch = Variable(torch.LongTensor(input_lines))
#mask = Variable(torch.FloatTensor(mask))
return tensor_batch, max_len
|
24,487 | 2a0b7992453f9b859afb9b110476cb418c64151a | """
You're now a baseball game point recorder.
Given a list of strings, each string can be one of the 4
following types:
Integer (one round's score): Directly represents the number of
points you get in this round.
"+" (one round's score): Represents that the points you get in
this round are the sum of the last two valid round's points.
"D" (one round's score): Represents that the points you get in
this round are the doubled data of the last valid round's points.
"C" (an operation, which isn't a round's score): Represents the
last valid round's points you get were invalid and should be removed.
Each round's operation is permanent and could have an impact on
the round before and the round after.
You need to return the sum of the points you could get in all
the rounds.
Example 1:
Input: ["5","2","C","D","+"]
Output: 30
Explanation:
Round 1: You could get 5 points. The sum is: 5.
Round 2: You could get 2 points. The sum is: 7.
Operation 1: The round 2's data was invalid. The sum is: 5.
Round 3: You could get 10 points (the round 2's data has been
removed). The sum is: 15.
Round 4: You could get 5 + 10 = 15 points. The sum is: 30.
"""
class Solution:
def calPoints(self, ops: List[str]) -> int:
vals = []
for op in ops:
if op == 'C':
vals.pop()
elif op == '+':
vals.append(vals[-1] + vals[-2])
elif op == 'D':
vals.append(2 * vals[-1])
else:
vals.append(int(op))
return sum(vals)
|
24,488 | 2d753259c959dc77d6ad40b78e4aa072094ae0de | #!/usr/bin/env python
import time
import pyupm_grove as grove
led = grove.GroveLed(3)
light = grove.GroveLight(2)
print "[start....]"
while True:
light_value = light.raw_value()
if light_value <= 300:
led.on()
else:
led.off()
time.sleep(1)
|
24,489 | 9026eb8fe1858ba21368095739faf132469c64e4 | from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
class BaseElement(object):
def __init__ (self,driver,locator):
self.driver = driver
self.locator = locator
self.web_element = None
self.find()
def find(self):
element = WebDriverWait(
self.driver,10).until(
EC.visibility_of_element_located(locator=self.locator))
self.web_element = element
return None
def input_text(self,txt):
self.web_element.send_keys(txt)
return None
def click(self):
element = WebDriverWait(
self.driver,10).until(
EC.element_to_be_clickable(locator=self.locator))
element.click()
return None
def attribute(self,attr_name):
attribute = self.web_element.get_attribute(attr_name)
return attribute
@property
def text(self):
text = self.web_element.text
return text
|
24,490 | 0361f94a90127b41e92a77e65209fdefffe635c2 | import time
import ubinascii
import machine
from umqtt.simple import MQTTClient
from data import conf
from utils.pins import DHT
CLIENT_ID = ubinascii.hexlify(machine.unique_id())
mqtt = MQTTClient(CLIENT_ID, conf.MQTT_SERVER)
mqtt.connect()
def main():
time.sleep(5)
DHT.measure()
temp = DHT.temperature()
humidity = DHT.humidity()
mqtt.publish('sensors/temperature/{}'.format(CLIENT_ID).encode(), str(temp).encode())
mqtt.publish('sensors/humidity/{}'.format(CLIENT_ID).encode(), str(humidity).encode())
retries = 5
while retries:
try:
main()
break
except Exception as e:
with open('errors.txt', 'a') as err_file:
err_file.write(str(e))
err_file.write('\n')
mqtt.publish('errors/{}'.format(CLIENT_ID).encode(), str(e).encode())
retries -= 1
mqtt.disconnect()
rtc = machine.RTC()
rtc.irq(trigger=rtc.ALARM0, wake=machine.DEEPSLEEP)
rtc.alarm(rtc.ALARM0, 5 * 60 * 1000)
machine.deepsleep()
|
24,491 | 085b2afc54c5c2be1fa9a70b6e026cea3408d484 | def combs(c1, c2):
c1,l = "-"*(len(c2)-2)+c1+"-"*(len(c2)-2),[]
for i in range(len(c1)-(len(c2)-1)):
b,t = "-"*i+c2+"-"*(len(c1)-len(c2)-i),[]
for j,k in zip(c1,b):
if not (j=="-" and k=="-"): t.append([j,k])
if all(j!=["*","*"] for j in t): l.append(t)
return min(len(i) for i in l) if len(l)>0 else len(c1)-1
|
24,492 | e05369b6b300ce4e9255ce603c2cd40f416e1612 | from django.shortcuts import render, redirect, get_object_or_404
from .forms import UserForm, InvestmentForm
from django.contrib.auth import authenticate
from django.contrib.auth import login as login_user
from django.contrib.auth import logout as logout_user
from django.contrib import messages
from .models import User, Investment, Capitallog
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from .serializers import CapitallogSerializer, CapitalflowSerializer
from rest_framework.permissions import IsAuthenticated
from django.core.mail import send_mail
import random
def index(request):
if request.method=='POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user is not None:
if user.is_active:
login_user(request, user)
return redirect(account)
messages.error(request, 'Your account has been disabled')
return redirect(index)
messages.error(request, 'Invalid login')
return redirect(index)
return render(request, 'login.html', {
'title': 'Jimnet account sign in'
})
def profile(request):
if request.user.is_authenticated:
if request.method=='POST':
first_name = request.POST['first_name']
last_name = request.POST['last_name']
email = request.POST['email']
currentpass = request.POST['currentpassword']
newpass = request.POST['newpassword']
newpass2 = request.POST['newpassword2']
user = User.objects.get(username=request.user.username)
if first_name and last_name and email:
if user.check_password(currentpass):
if newpass:
if newpass==newpass2:
user.set_password(newpass)
else:
messages.error(request, 'Password 1 does not match password 2')
return redirect(profile)
user.first_name = first_name
user.last_name = last_name
user.email = email
user.save()
messages.success(request, 'Profile updated')
return redirect(profile)
messages.error(request, 'Current password does not match the one specified')
return redirect(profile)
messages.error(request, 'Please fill the neccessary fields')
return redirect(profile)
return render(request, 'profile.html', {
'title': 'Edit your profile',
'user': User.objects.get(username=request.user.username)
})
return redirect(index)
# Compute the total input and output of a log
def totalInputOutput(log):
totinput = 0; totoutput = 0;
if log:
for i in log.account_set.all():
if i.accType=='input':
totinput += i.amount
else:
totoutput += i.amount
return [totinput, totoutput]
def account(request):
if request.user.is_authenticated:
lastlog = []
if Capitallog.objects.count():
lastlog = Capitallog.objects.all()[len(Capitallog.objects.all())-1]
return render(request, 'index.html', {
'title': 'Jimnet account',
'lastlog': lastlog,
'totinput': totalInputOutput(lastlog)[0],
'totoutput': totalInputOutput(lastlog)[1],
'logs': Capitallog.objects.all(),
'user': User.objects.get(username=request.user.username)
})
return redirect(index)
def investments(request):
if request.user.is_authenticated:
if request.method=='POST':
form = InvestmentForm(request.POST)
if form.is_valid():
investment = form.save(commit=False)
investment.investor = User.objects.get(pk=request.POST['investor'])
investment.save()
messages.success(request, 'Investment added')
return redirect(investments)
messages.error(request, 'Please fill all input')
return redirect(investments)
return render(request, 'investments.html', {
'title': 'Jimnet investments',
'investors': User.objects.all(),
'user': User.objects.get(username=request.user.username)
})
return redirect(index)
def register(request):
if request.method=='POST':
form = UserForm(request.POST)
if form.is_valid():
if request.POST['password']==request.POST['password2']:
if request.POST['pincode']=='inception':
user = form.save(commit=False)
username = form.cleaned_data['username']
password = form.cleaned_data['password']
user.set_password(password)
user.save()
user = authenticate(request, username=username, password=password)
if user is not None:
login_user(request, user)
# user = User.objects.filter(user=request.user)
messages.success(request, 'Registration successful.')
return redirect(account)
else:
messages.error(request, 'Wrong creation pin')
return redirect(register)
else:
messages.error(request, 'Password 1 does not match password 2')
return redirect(register)
messages.error(request, 'Please ensure the all the field are filled')
return redirect(register)
else:
return render(request, 'register.html', {
'title': 'Jimnet account sign up'
})
def forgetpassword(request):
if request.method=='POST':
username = request.POST['username']
if username:
if User.objects.filter(username=username):
user = User.objects.get(username=username)
newpass = random.randint(100000, 1000000)
message = 'Hello, your new password is ' + str(newpass)
user.set_password(newpass)
user.save()
res = send_mail("Jim Account password reset", message, "support@Jimnet.com",[user.email], fail_silently=True)
if res:
messages.success(request, 'A mail has been sent to your mail, please check')
return redirect(index)
messages.error(request, 'Error sending mail to your email')
return redirect(index)
messages.error(request, 'Username specified does not exist')
return redirect(index)
messages.error(request, 'Please specify your username')
return redirect(index)
return redirect(index)
def logout(request):
logout_user(request)
return redirect(index)
class Createcapitallog(APIView):
permission_classes = (IsAuthenticated, )
def post(self, request):
serializer = CapitallogSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(False)
class CapitalFlow(APIView):
permission_classes = (IsAuthenticated, )
def post(self, request):
serializer = CapitalflowSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(False)
class AccountLog(APIView):
permission_classes = (IsAuthenticated, )
def get(self, request, pk):
if Capitallog.objects.filter(pk=pk):
data = Capitallog.objects.get(pk=pk).account_set.all()
serializer = CapitalflowSerializer(data, many=True)
val = [serializer.data, totalInputOutput(Capitallog.objects.get(pk=pk))]
return Response(val)
return Response(False)
serializer = CapitalflowSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(False) |
24,493 | 9f10114a2d91730f8f24015414b03e150cc1bd2e | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 29 15:53:30 2019
@author: FLomb
"""
import pickle
#%%
def save_obj(obj, name ):
with open('results/'+ name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name ):
with open('input_data/' + name + '.pkl', 'rb') as f:
return pickle.load(f) |
24,494 | c36d9766bab20197c480921e8e33afde39de803a | import sys
import math
import time
import signal
from qgis.PyQt.QtGui import QColor
from qgis.PyQt.QtWidgets import QFrame
from qgis.PyQt.QtCore import *
from qgis.core import *
from qgis.gui import *
from renderer import *
def main():
app = QgsApplication([], True)
app.setPrefixPath("/usr/bin/qgis", True)
app.initQgis()
screens = app.screens()
project = QgsProject.instance()
project.read("project.qgz")
display0 = MapCanvasInspector("display0", project)
display0.setGeometry(screens[0].geometry().x(), screens[0].geometry().y(), 1920, 1024)
display0.setFrameShape(QFrame.NoFrame)
display0.showFullScreen()
display0.show()
display1 = MapCanvasInteractive("display1", project)
display1.show()
display1.setGeometry(screens[1].geometry().x(), screens[1].geometry().y(), 1920, 1024)
display1.setFrameShape(QFrame.NoFrame)
display1.showFullScreen()
signal.signal(signal.SIGINT, signal.SIG_DFL)
sys.exit(app.exec_())
if __name__ == "__main__":
main()
|
24,495 | c72f2cfa58244698316e91ece6417a3b04cee84b | ###################### INFORMATION #############################
# It talks to the SQLite-DB and inserts the data of JSON-Files
# Program: **AKF_SQL_DBTalk**
# Info: **Python 3.6**
# Author: **Jan Kamlah**
# Date: **02.11.2017**
###################### GENERAL TO DO #############################
# TODO: - Anleihen, Dependence(Besitztümer), Volume existent?
# TODO: Notiz, Notizen, Bemerkung?! Handelsplatz...Boersennotiztable
######### IMPORT SQL ############
from sqlalchemy import create_engine, MetaData, select
######### IMPORT JSON ############
import json
import configparser
import os
from itertools import zip_longest
import string
import tempfile
from copy import deepcopy
import glob
from functools import wraps
import time, timeit
import re
# Obsolete modules
# import random
######### CLASSES ############
class NoneRemover(json.JSONDecoder):
"""
Removes all Null/None to an empty string
it works but its not beautiful because
it iterates twice over the values.
I would love to change default values :) of the
test library change for perfomance reasons...
jsondecoder -> scanner.py -> Zeile 42
"""
def __init__(self, *args, **kwargs):
json.JSONDecoder.__init__(self, object_hook=self.object_hook, *args, **kwargs)
def object_hook(self, obj):
for key, value in obj.items():
if value == None:
obj[key] = ""
return obj
######### DECORATOR ############
def call_counter_and_time(func):
"""
This decorator is for benchmarking the
single functions in the program.
It tracks the calls and the process time.
"""
@wraps(func)
def helper(*args, **kwds):
helper.calls += 1
startt = timeit.default_timer()
result = func(*args, **kwds)
print(timeit.default_timer()- startt)
return result
helper.calls = 0
return helper
######### TABLE-FUNCTIONS ############
"""
The following functions get case-related called.
Every function represents a table in the sqlite db.
Sorry for breaking the convention with lowercase for
functions, but they seem so classy.
"""
def Aktienkursetable(conn, new_data, table):
print(table.columns.keys())
del_entry(new_data['compare'], [], ['year'])
if 'boersenbewertung' not in new_data: return 0
for boerse in new_data['boersenbewertung']:
if 'kurse' not in new_data['boersenbewertung'][boerse]: continue
idx = 1
for idxx, block in enumerate(new_data['boersenbewertung'][boerse]['kurse']):
del_entry(new_data['compare'], ['boersenbewertung', boerse, 'kurse', idxx],
['jahr', "hoechst", "tiefst", "ultimo", "kommentar"])
del_entry(new_data['compare'], ['boersenbewertung', boerse, ], ['notiz_bereinigteKurse'])
entry_check(block, ['jahr', "hoechst", "tiefst", "ultimo", "kommentar"])
blockkey = list(block.keys())
blockkey.remove('jahr')
blockkey.remove('kommentar')
currency, unit = get_currency_unit(
{'waehrung': new_data['boersenbewertung'][boerse]['notiz_bereinigteKurse'].split("in")[-1].strip()})
year = block['jahr'].replace("\xa0", " ")
year = year.split(" ")[0]
notes = new_data['boersenbewertung'][boerse]['notiz_bereinigteKurse']
comment = replace_geminfo(block['jahr'], new_data['boersenbewertung'][boerse],'notiz_bereinigteKurse')
for entry in blockkey:
del_entry(new_data['compare'], [], ['year'])
amount = block[entry]
if "," in amount and amount.index(",") == 0:
amount = "0"+amount
conn.execute(table.insert(), [
{'unternehmenId': new_data['unternehmenId'],
'Jahr': year,
'Stichtag': block['kommentar'],
'Hoehe': amount,
'Waehrung': currency,
'Einheit': unit,
'Art': entry,
'Notiz': notes,
'Bemerkung': comment,
'BemerkungAbschnitt': "",
'Abschnitt': "",
'Rang': idx,
}])
idx+=1
return 0
def Aktionaertable(conn, new_data, table):
print(table.columns.keys())
if 'aktionaer' in new_data:
if not 'gesellschafter' in new_data:
new_data['gesellschafter'] = {'aktionaere':[]}
new_data['gesellschafter']['aktionaere'] = [deepcopy(new_data['aktionaer'])][0]
del new_data['aktionaer']
del_entry(new_data,['compare'], ['aktionaer'])
if 'gesellschafter' in new_data:
for name in new_data['gesellschafter']:
if name in ['aktionaere', 'anteilseigner', 'kommanditisten']:
for idx, entry in enumerate(new_data['gesellschafter'][name]):
del_entry(new_data['compare'], ['gesellschafter', name, idx],
['beteiliger', 'ort', 'anteil', 'bemerkung'])
entry_check(entry, ['beteiliger', 'ort', 'anteil', 'bemerkung', "bemerkungen"])
comment = entry['bemerkung']
if comment == "":
comment = " ".join(entry["bemerkungen"])
if 'name' in entry:
entry['beteiliger'] = entry['name']
if entry.get("ort","") != "" and not entry["ort"].strip()[0].isupper():
comment = " Info: " + entry['ort'] + " " + comment
entry['ort'] = ""
pwords = ["u.", "%", "über", "ca.", "Kdt.", "Inc.", "dir."]
for word in pwords:
if word in entry['ort']:
comment = " Info: " + entry['ort'] + " " + comment
entry['ort'] = ""
break
aktionear = ""
if len(entry['beteiliger']) > 1:
if ":" == entry['beteiliger'].strip()[0]:
aktionear = entry['beteiliger'].replace(":","").strip()
else:
aktionear = entry['beteiliger'].strip()
conn.execute(table.insert(), [
{'unternehmenId': new_data['unternehmenId'],
'Name': aktionear,
'Ort': entry['ort'],
'Anteil': entry['anteil'],
'Abschnitt': "",
'Bemerkung': comment.strip(),
'BemerkungAbschnitt': "",
'Rang': idx + 1,
}])
return 0
def Anleihentable(conn, new_data, table):
print(table.columns.keys())
return 0
if 'anleihen' not in new_data: return 0
for idx, entry in enumerate(new_data['all_wkn_entry']):
entry_check(entry, ['name', 'ort', 'anteil', 'bemerkung'])
conn.execute(table.insert(), [
{'unternehmenId': new_data['unternehmenId'],
'Anleihen': new_data['name'],
'Rang': idx + 1,
}])
return 0
def Aufsichtsrattable(conn, new_data, table):
print(table.columns.keys())
if 'aufsichtsrat' not in new_data: return 0
for idx, entry in enumerate(new_data['aufsichtsrat']):
del_entry(new_data['compare'], ['aufsichtsrat', idx], ['lastName', 'firstName', 'title', 'cityAcc', 'funct'])
entry_check(entry, ['lastName', 'firstName', 'title', 'cityAcc', 'funct'])
if membercheck(entry): continue
conn.execute(table.insert(), [
{'unternehmenId': new_data['unternehmenId'],
'Name': entry['lastName'],
'Vorname': entry['firstName'],
'Titel': entry['title'],
'Ort': entry['cityAcc'],
'Funktion': entry['funct'],
'Bemerkung': "",
'Rang': idx + 1,
}])
return 0
def Beschaeftigtetable(conn, new_data, table):
print(table.columns.keys())
if 'boersenbewertung' not in new_data: return 0
for boerse in new_data['boersenbewertung']:
if 'kennzahlen' not in new_data['boersenbewertung'][boerse]: continue
if "Mitarbeiter" in new_data['boersenbewertung'][boerse]['kennzahlen']:
new_data['boersenbewertung'][boerse]['kennzahlen']["Beschäftigte"] = new_data['boersenbewertung'][boerse]['kennzahlen'].pop("Mitarbeiter")
if "Beschäftigte" not in new_data['boersenbewertung'][boerse]['kennzahlen']: continue
for idx, block in enumerate(new_data['boersenbewertung'][boerse]['kennzahlen']["Beschäftigte"]):
blockkeys = list(block.keys())
entry_check(block, ['jahr'])
year = block['jahr'].replace("\xa0", " ")
year = year.split(" ")[0]
if "*" in year:
year = year.split("*")[0]
blockkeys.remove("jahr")
comment = replace_geminfo(block['jahr'], "", "")
for key in blockkeys:
conn.execute(table.insert(), [
{'unternehmenId': new_data['unternehmenId'],
'Stichtag': year,
'Anzahl': block[key],
'Notiz': comment,
'Bemerkung': key,
'Rang': idx + 1,
}])
return 0
def Beteiligungentable(conn, new_data, table):
print(table.columns.keys())
if 'beteiligungen' not in new_data: return 0
if len(new_data['beteiligungen']) == 0: return 0
idx = 0
if "elemente" in new_data['beteiligungen']:
new_data['beteiligungen'] = new_data['beteiligungen']['elemente']
for ct, _ in enumerate(new_data['beteiligungen']):
if 'zeilen' not in new_data['beteiligungen'][ct - idx]:
if not isinstance(new_data['beteiligungen'][ct - idx], str):
new_data['beteiligungen'][ct - idx]['zeilen'] = [deepcopy(new_data['beteiligungen'][ct - idx])]
del_entry(new_data['compare'], ['beteiligungen'], [ct - idx])
else:
del new_data['beteiligungen'][ct - idx]
new_data['beteiligungen'].append([])
del_entry(new_data['compare'], ['beteiligungen'], [ct - idx])
idx += 1
continue
if idx != 0:
del new_data['beteiligungen'][len(new_data['beteiligungen']) - idx:]
comment = ""
count = 0
for ix ,block in enumerate(new_data['beteiligungen']):
for idx, entry in enumerate(block['zeilen']):
addcomment = ""
if isinstance(entry, str):
count +=1
continue
del_entry(new_data['compare'], ['beteiligungen', ix], ['beteiliger', 'ort', 'anteil'])
entry_check(entry, ['beteiliger', 'ort', 'anteil'])
if entry['anteil'] != "":
share, share_pc, currency = get_share(entry['anteil'])
else:
share, share_pc, currency = get_share(entry['beteiliger'])
if entry['ort'] == "" and entry['anteil'] == "" and "%" not in entry['beteiliger']:
comment = " Gruppe: "+entry['beteiliger'].replace(":","")
count += 1
continue
#Added new feat only for 2001 and less
if entry['ort'] == "" and len(entry['anteil'].split(".")) > 1 and int(new_data['year'].split("-")[0])<2002:
entry['ort'] = entry['anteil'].split(".")[0]
entry['anteil'] = entry['anteil'].replace(entry['ort']+".","").strip()
pwords = ["u.","%","ca.","Kdt.","Inc.","dir."]
if "über" in entry['ort']:
share_pc = "über "+share_pc
entry['ort'] = entry['ort'].replace('über',"")
for word in pwords:
if word in entry['ort']:
addcomment = " Info: "+entry['ort']
entry['ort'] = ""
break
headline =""
if not "ohne_titel" == block['ueberschrift']:
headline = block['ueberschrift']+" "+comment
entry_check(block, ['ueberschrift'])
del_entry(new_data['compare'], ['beteiligungen'], ['ueberschrift'])
conn.execute(table.insert(), [
{'unternehmenId': new_data['unternehmenId'],
'Firmenname': entry['beteiliger'],
'Ort': entry['ort'],
'Kapitalbetrag': share,
'Waehrung': currency,
'Anteil': share_pc,
'Bemerkung': (headline+" "+comment+addcomment).strip(),
'Rang': idx + 1 - count,
}])
return 0
def BilanzAktivatable(conn, new_data, table):
print(table.columns.keys())
if 'ausBilanzen' not in new_data: return 0
uinfo = "The amount was considered to low to name it. "
indx=1
for ct, _ in enumerate(new_data['ausBilanzen']):
for idx, block in enumerate(new_data['ausBilanzen'][ct]['ausBilanzen']):
if 'aktiva' in block.lower():
currency, unit = get_currency_unit(new_data['ausBilanzen'][ct])
for entries in new_data['ausBilanzen'][ct]['ausBilanzen'][block]:
lvalidpos = ""
year = ""
if "jahr" in entries:
year = entries['jahr'].replace("\xa0", " ")
year = year.split(" ")[0]
if "*" in year:
year = year.split("*")[0]
companystage = "AG"
if 'columnId' in entries:
companystage = entries['columnId']
comment = replace_geminfo(entries['jahr'], new_data['ausBilanzen'][ct], 'notizen')
for idxx, entry in enumerate(entries):
if entry == "jahr" or entry == "columnId":continue
#entity_check(entity, ['beteiliger', 'ort', 'anteil'])
if entries[entry].upper() == "U":
entries[entry] = ""
comment = uinfo + comment
pos = entry
if entry[0].isalpha():
pos = entry.title()
lvalidpos = pos
elif entry[0] == ".":
pos = lvalidpos + "(" + entry + ")"
conn.execute(table.insert(), [
{'unternehmenId': new_data['unternehmenId'],
'Konzernebene': companystage,
'Bilanzposition': pos,
'Jahr': year,
'Einheit': unit,
'Waehrung': currency,
'Betrag': entries[entry].replace(' ', "").replace(" ", ""),
'Bemerkung': comment,
'BemerkungAbschnitt': "",
'Rang': indx,
}])
indx+=1
break
if "u =" in block.lower():
uinfo = block
return 0
def BilanzPassivatable(conn, new_data, table):
print(table.columns.keys())
if 'ausBilanzen' not in new_data: return 0
uinfo = "The amount was considered to low to name it. "
indx=1
for ct, _ in enumerate(new_data['ausBilanzen']):
for idx, block in enumerate(new_data['ausBilanzen'][ct]['ausBilanzen']):
if 'passiva' in block.lower():
currency, unit = get_currency_unit(new_data['ausBilanzen'][ct])
for entries in new_data['ausBilanzen'][ct]['ausBilanzen'][block]:
lvalidpos = ""
year = ""
if "jahr" in entries:
year = entries['jahr'].replace("\xa0", " ")
year = year.split(" ")[0]
if "*" in year:
year = year.split("*")[0]
companystage = "AG"
if 'columnId' in entries:
companystage = entries['columnId']
comment = replace_geminfo(entries['jahr'], new_data['ausBilanzen'][ct], 'notizen')
for idxx, entry in enumerate(entries):
if entry == "jahr" or entry == "columnId": continue
# entity_check(entry, ['beteiliger', 'ort', 'anteil'])
if entries[entry].upper() == "U":
entries[entry] = ""
comment = uinfo + comment
pos = entry
if entry[0].isalpha():
pos = entry.title()
lvalidpos = pos
elif entry[0] == ".":
pos = lvalidpos + "(" + entry + ")"
conn.execute(table.insert(), [
{'unternehmenId': new_data['unternehmenId'],
'Konzernebene': companystage,
'Bilanzposition': pos,
'Jahr': year,
'Einheit': unit,
'Waehrung': currency,
'Betrag': entries[entry].replace(' ', "").replace(" ", ""),
'Bemerkung': comment,
'BemerkungAbschnitt': "",
'Rang': indx,
}])
indx+=1
break
if "u =" in block.lower():
uinfo = block
return 0
def BilanzSummetable(conn, new_data, table):
print(table.columns.keys())
if 'ausBilanzen' not in new_data: return 0
uinfo = "The amount was considered to low to name it. "
indx = 1
for ct, _ in enumerate(new_data['ausBilanzen']):
for idx, block in enumerate(new_data['ausBilanzen'][ct]['ausBilanzen']):
if 'bilanzsumme' in block.lower():
currency, unit = get_currency_unit(new_data['ausBilanzen'][ct])
for entries in new_data['ausBilanzen'][ct]['ausBilanzen'][block]:
lvalidpos = ""
year = ""
if "jahr" in entries:
year = entries['jahr'].replace("\xa0", " ")
year = year.split(" ")[0]
if "*" in year:
year = year.split("*")[0]
companystage = "AG"
if 'columnId' in entries:
companystage = entries['columnId']
comment = replace_geminfo(entries['jahr'], new_data['ausBilanzen'][ct], 'notizen')
for idxx, entry in enumerate(entries):
if entry == "jahr" or entry == "columnId": continue
# entry_check(entity, ['beteiliger', 'ort', 'anteil'])
if entries[entry].upper() == "U":
entries[entry] = ""
comment = uinfo + comment
pos = entry
if entry[0].isalpha():
pos = entry.title()
lvalidpos = pos
elif entry[0] == ".":
pos = lvalidpos + "(" + entry + ")"
conn.execute(table.insert(), [
{'unternehmenId': new_data['unternehmenId'],
'Konzernebene': companystage,
'Bilanzposition': pos,
'Jahr': year,
'Einheit': unit,
'Waehrung': currency,
'Betrag': entries[entry].replace(' ', "").replace(" ", ""),
'Bemerkung': comment,
'BemerkungAbschnitt': "",
'Rang': indx,
}])
indx+=1
break
if "u =" in block.lower():
uinfo = block
return 0
def Boersennotiztable(conn, new_data, table):
print(table.columns.keys())
if 'boersenbewertung' not in new_data: return 0
for idx, block in enumerate(new_data['boersenbewertung']):
del_entry(new_data['compare'], ['boersenbewertung', block], ['notizen_kennzahlen', 'notizen', 'marktbetreuer'])
notes = ""
comment= ""
if 'notizen_kennzahlen' in new_data['boersenbewertung'][block]:
notes += " ".join(new_data['boersenbewertung'][block]['notizen_kennzahlen'])
# if 'notizen' in new_data['boersenbewertung'][block]:
# notes += " ".join(new_data['boersenbewertung'][block]['notizen'])
notes= notes.replace("i) gemäß IAS", " ").replace("g) gemäß US-GAAP", " ").replace("Beschäftigte", " ").replace("_","").replace(" "," ").strip()
if 'marktbetreuer' in new_data['boersenbewertung'][block]:
new_data["Marktbetreuer"] = ", ".join(new_data['boersenbewertung'][block]['marktbetreuer'])
#TODO-Hint: Obsolete? The information are in "WeitereBemerkungen"
#comment = "Marktbetreuer: "+new_data["Marktbetreuer"]+", "
if len(notes) > 1:
notes = notes[0].upper() + notes[1:]
maerktelist = []
with open("./dblib/Maerkte","r", encoding="utf-8") as f:
for line in f.readlines():
if line.strip() in notes:
maerktelist.append(line.strip())
comment = ""
for idx, markt in enumerate(maerktelist):
if idx == len(maerktelist)-1:
comment = notes
conn.execute(table.insert(), [
{'unternehmenId': new_data['unternehmenId'],
'Handelsplatz': markt,
'Abschnitt': "",
'Bemerkung': comment,
'Rang': idx + 1,
}])
return 0
def Dependencetable(conn, new_data, table):
print(table.columns.keys())
if 'dependence' not in new_data: return 0
for idx, _ in enumerate(new_data['all_wkn_entry']):
conn.execute(table.insert(), [
{'unternehmenId': new_data['unternehmenId'],
'Dependence': new_data['name'],
'Bezeichnung': new_data['all_wkn_entry'][idx]['wkn'],
}])
return 0
def Dividendentable(conn, new_data, table):
print(table.columns.keys())
if 'boersenbewertung' not in new_data: return 0
for block in new_data['boersenbewertung']:
if 'dividenden' not in new_data['boersenbewertung'][block]: continue
del_entry(new_data['compare'], ['boersenbewertung', block],
['dividenden', 'dividenden_notiz', "dividenden_bemerkungen", "wkns", "isins"])
for idx, entries in enumerate(new_data['boersenbewertung'][block]['dividenden']):
entry_check(entries, ['jahr', 'dividende', 'steuerguthaben', 'bonus'])
entry_check(new_data['boersenbewertung'][block], ['dividenden_notiz','dividenden_bemerkungen'])
year = entries['jahr'].replace("\xa0", " ")
year = year.split(" ")[0]
if ")" in year:
year = year.strip().split(")")[0][:-1]
comment = new_data['boersenbewertung'][block]['dividenden_notiz']
if isinstance(comment,list): comment = " ".join(comment)
extracomment = ""
for entry in entries:
if entry not in ['jahr', 'dividende', 'steuerguthaben', 'bonus']:
extracomment += entry+": "+entries[entry]+", "
extracomment = replace_geminfo(entries['jahr'], new_data['boersenbewertung'][block], 'dividenden_bemerkungen')+" "+extracomment
currency, div_bemerk= "", ""
if new_data['boersenbewertung'][block]["dividenden_bemerkungen"] != "":
div_bemerk = new_data['boersenbewertung'][block]["dividenden_bemerkungen"][0]
Divnr, type = "", ""
if 'wkns' in new_data['boersenbewertung'][block]:
if 'aktienart' in new_data['boersenbewertung'][block]["wkns"][0]:
type = new_data['boersenbewertung'][block]["wkns"][0]["aktienart"]
elif 'isins' in new_data['boersenbewertung'][block]:
if 'aktienart' in new_data['boersenbewertung'][block]["isins"][0]:
type = new_data['boersenbewertung'][block]["isins"][0]["aktienart"]
if div_bemerk.find("Sch") != -1:
Divnrsplit = div_bemerk.strip().split(" ")
Divnr = Divnrsplit[-1] if len(Divnrsplit) > 1 else Divnrsplit[0]
if new_data['boersenbewertung'][block]["dividenden_notiz"] != "":
currency = new_data['boersenbewertung'][block]["dividenden_notiz"].split("in")[1]
if "in " in new_data['boersenbewertung'][block]["dividenden_notiz"].strip()[:3]:
currency = new_data['boersenbewertung'][block]["dividenden_notiz"].replace("in ","").strip().split(" ")[0]
else:
for cur in ["TEUR","EUR","USD","DM"]:
if cur in new_data['boersenbewertung'][block]["dividenden_notiz"].upper():
currency = cur
elif "dividenden_bemerkungen" in new_data['boersenbewertung'][block]:
for entry in new_data['boersenbewertung'][block]["dividenden_bemerkungen"]:
if "Bereinigte Kurse" in entry:
try:
currency = entry.split("in")[-1].replace(")", "")
except Exception:
pass
dividende = entries["dividende"]
if len(entries["dividende"].split(" ")) > 1:
if "%" in entries["dividende"]:
dividende = entries["dividende"].split(" ")[0].strip()
currency = entries["dividende"].split(" ")[-1].strip()
elif ")" in entries["dividende"]:
dividende = entries["dividende"].split(" ")[0].strip()
extracomment += "Zusatz: "+entries["dividende"].split(" ")[-1]+" "
else:
dividende = entries["dividende"].split(" ")[-1].strip()
currency = entries["dividende"].split(" ")[0].upper()
#Clean the data
if len(currency) > 1:
if ";" == currency[-1] or "," == currency[-1] or "/" == currency[-1] or ":" == currency[-1]:
currency = currency[:-1]
stg = entries["steuerguthaben"]
if len(entries["steuerguthaben"].split(" ")) > 1:
if entries["steuerguthaben"].split(" ")[0].upper() == currency:
stg = entries["steuerguthaben"].split(" ")[1]
extracomment += " Dividendbemerkungen: "+" ".join(new_data['boersenbewertung'][block]["dividenden_bemerkungen"])
bonus = entries["bonus"].replace(currency,"").replace(currency.lower(),"")
conn.execute(table.insert(), [
{'unternehmenId': new_data['unternehmenId'],
'Jahr': year,
'Aktienart': type.strip(),
'Nennwert': "",
'Dividende': dividende,
'Bonus': bonus,
'St_G': stg,
'Waehrung': currency,
'Div_Sch_Nr': Divnr,
'Bemerkung': comment,
'BemerkungAbschnitt': extracomment.strip(),
'Abschnitt': "",
'Rang': idx + 1,
}])
return 0
def Geschaeftsjahrtable(conn, new_data, table):
print(table.columns.keys())
if 'sonstigeAngaben' not in new_data: return 0
del_entry(new_data['compare'], [], ['sonstigeAngaben'])
KJ, GJ, GJA, GJE = "0","", "", ""
for entry in new_data['sonstigeAngaben']:
if entry[0].find('jahr') != -1:
GJ = " ".join(entry[1:])
if len(GJ.split("-")) > 1:
GJA = "".join([char for char in GJ.split("-")[0] if not char.isalpha()])
GJE = "".join([char for char in GJ.split("-")[1] if not char.isalpha()])
GJ = ""
if "Kalenderjahr" in GJ or "Kalenderjahr" in GJA:
KJ = "1"
GJ = "Kalenderjahr"
#GJA = GJA.replace("Kalenderjahr", "")
if not "".join([GJ,GJA,GJE]) == "":
conn.execute(table.insert(), [
{'unternehmenId': new_data['unternehmenId'],
'Kalenderjahr': KJ,
'Geschaeftsjahresanfang': GJA,
'Geschaeftsjahresende': GJE,
'Bemerkung': GJ,
'Abschnitt': "",
}])
return 0
def Geschaeftsleitungtable(conn, new_data, table):
print(table.columns.keys())
if 'geschleitung' not in new_data: return 0
for idx, entry in enumerate(new_data['geschleitung']):
del_entry(new_data['compare'], ['geschleitung', idx], ['lastName', 'firstName', 'title', 'cityAcc', 'funct'])
entry_check(entry, ['lastName', 'firstName', 'title', 'cityAcc', 'funct'])
conn.execute(table.insert(), [
{'unternehmenId': new_data['unternehmenId'],
'Name': entry['lastName'],
'Vorname': entry['firstName'],
'Titel': entry['title'],
'Ort': entry['cityAcc'],
'Funktion': entry['funct'],
'Bemerkung': "",
'Rang': idx + 1,
}])
return 0
def Grundkapitaltable(conn, new_data, table):
print(table.columns.keys())
if 'shareinfo' not in new_data: return 0
del_entry(new_data['compare'], [], ['grundkapital'])
for idx, entry in enumerate(new_data["shareinfo"]):
if (entry['amount']+entry['currency']+entry['info']).strip() == "":continue
if entry["amount"] != "" and entry["amount"].strip()[0].isalpha():
entry["info"] = entry["amount"]+" "+entry["info"]
entry["amount"] = ""
conn.execute(table.insert(), [
{'unternehmenId': new_data['unternehmenId'],
'Hoehe': entry['amount'],
'Waehrung': entry['currency'],
'Bemerkung': entry['info'],
'Rang': idx+1,
}])
return 0
def GuVtable(conn, new_data, table):
print(table.columns.keys())
if 'ausGewinnUndVerlust' not in new_data: return 0
if 'ausGewinnUndVerlustRechnung' in new_data['ausGewinnUndVerlust']:
del_keys = list(new_data['ausGewinnUndVerlust'].keys())
new_data['ausGewinnUndVerlust'][0] = deepcopy(new_data['ausGewinnUndVerlust'])
for _ in del_keys:
del new_data['ausGewinnUndVerlust'][_]
idx = 1
for ct, _ in enumerate(new_data['ausGewinnUndVerlust']):
if 'ausGewinnUndVerlustRechnung' not in new_data['ausGewinnUndVerlust'][ct]: continue
for block in new_data['ausGewinnUndVerlust'][ct]['ausGewinnUndVerlustRechnung']:
del_entry(new_data['compare'], ['ausGewinnUndVerlust', ct],
['ausGewinnUndVerlustRechnung', 'notizen', 'ausGewinnUndVerlust', 'waehrung'])
currency, unit = get_currency_unit(new_data['ausGewinnUndVerlust'][ct])
entry_check(block, ['columnId', 'jahr'])
blockkey = list(block.keys())
blockkey.remove('jahr')
blockkey.remove('columnId')
year = block['jahr'].replace("\xa0", " ")
year = year.split(" ")[0]
if "*" in year:
year = year.split("*")[0]
lvalidpos = ""
for entry in blockkey:
comment = replace_geminfo(block['jahr'], new_data['ausGewinnUndVerlust'][ct], 'notizen')
pos = ""
if block[entry].upper() == "U":
block[entry] = ""
comment = "The amount was considered to low to name it. " + comment
if entry[0].isalpha():
pos = entry.title()
lvalidpos = pos
elif entry[0] == ".":
pos = lvalidpos + "(" + entry + ")"
if pos != "":
conn.execute(table.insert(), [
{'unternehmenId': new_data['unternehmenId'],
'Konzernebene': block['columnId'],
'GuVPosition': pos,
'Jahr': year,
'Einheit': unit,
'Waehrung': currency,
'Betrag': block[entry].replace(' ', "").replace(" ", ""),
'Bemerkung': comment,
'BemerkungAbschnitt': "",
'Rang': idx,
}])
idx+=1
return 0
def Kapitalarttable(conn, new_data, table):
print(table.columns.keys())
entries = []
entry_names = []
del_entry(new_data['compare'], [],
['genehmigtesKapital', 'bedingtesKapital', 'besBezugsrechte', 'ermächtigungAktienerwerb',
'bedingtesKapital2'])
if 'genehmigtesKapital' in new_data:
if new_data['genehmigtesKapital']:
entries.append(new_data['genehmigtesKapital']['genehmKapital'])
entry_names.append('Genehmigtes Kapital')
if 'genehmigtesGenusKapital' in new_data:
if new_data['genehmigtesGenusKapital']:
entries.append(new_data['genehmigtesGenusKapital']['genehmKapital'])
entry_names.append('Genehmigtes Genusskapital')
if 'derzeitigesGenusKapital' in new_data:
if new_data['derzeitigesGenusKapital']:
text = ""
if "bemerkungen" in new_data['derzeitigesGenusKapital']:
for entry in new_data['derzeitigesGenusKapital']["bemerkungen"]:
if isinstance(entry, list):
text += " ".join(entry)+" "
else:
text += entry+ " "
entries.append({'betrag': new_data['derzeitigesGenusKapital']['betrag'], 'bemerkung': text})
entry_names.append('Derzeitiges Genusskapital')
if 'bedingtesKapital' in new_data:
if new_data['bedingtesKapital']:
amount = ""
if new_data['bedingtesKapital']['bedingkapital']["betrag"] != "":
amount = new_data['bedingtesKapital']['bedingkapital']["betrag"].strip()
comment = ""
for idx, entry in enumerate(new_data['bedingtesKapital']['bedingkapital']["eintraege"]):
if entry["betrag_einzel"] != "":
comment = ""
if len(entry["betrag_einzel"].strip().split(" ")) > 1:
currency = entry["betrag_einzel"].strip().split(" ")[0]
amount = currency + " "+entry["betrag_einzel"].replace(currency, "").strip().split("Gem")[0]
comment += " ,"+entry["betrag_einzel"].replace(amount,"").replace(currency,"")
else:
amount = entry["betrag_einzel"].strip()
comment = entry["bemerkung"]+comment
entries.append({'betrag': amount, 'bemerkung': comment})
entry_names.append('Bedingtes Kapital')
else:
comment += entry["bemerkung"]+" "
if idx == len(new_data['bedingtesKapital']['bedingkapital']["eintraege"])-1:
entries.append({'betrag':amount,'bemerkung':comment})
entry_names.append('Bedingtes Kapital')
if 'bedingtesKapital2' in new_data:
if new_data['bedingtesKapital']:
amount = ""
if new_data['bedingtesKapital']['bedingkapital']["betrag"] != None:
amount = new_data['bedingtesKapital']['bedingkapital']["betrag"]
comment = ""
for idx, entry in enumerate(new_data['bedingtesKapital']['bedingkapital']["eintraege"]):
if entry["betrag_einzel"] != None:
amount = entry["betrag_einzel"]
comment = entry["bemerkung"]
entries.append({'betrag': amount, 'bemerkung': comment})
entry_names.append('Bedingtes Kapital')
else:
comment += entry["bemerkung"]+" "
if idx == len(new_data['bedingtesKapital']['bedingkapital']["eintraege"]):
entries.append({'betrag':amount,'bemerkung':comment})
entry_names.append('Bedingtes Kapital')
if entries:
for idx, entry in enumerate(entries):
entry_check(entry, ['betrag', 'bemerkung'])
currency, amount = "", ""
if entry['betrag'] != "":
currency = entry['betrag'].translate(str.maketrans('', '', string.punctuation + string.digits)).strip()
amount = "".join([char for char in entry['betrag'] if char.isdigit() or char in [".-"]])
#for feat in currency.split():
# if feat.strip() in ["Mio","Mrd","Tsd","Brd"]:
# currency = currency.replace(feat, '')
# else:
# amount = amount.replace(feat, '')
#TODO: Experimental!!
if currency+amount == "":
scomment = entry["bemerkung"].split(" ")
if len(scomment)> 2:
if scomment[1][0].isdigit():
currency = scomment[0]
amount = scomment[1]
conn.execute(table.insert(), [
{'unternehmenId': new_data['unternehmenId'],
'Kapitalart': entry_names[idx],
'Hoehe': amount.replace(' ', "").replace(" ", ""),
'Waehrung': currency,
'Bemerkung': entry['bemerkung'],
'Rang': idx + 1,
}])
return 0
def Kapitalentwicklungtable(conn, new_data, table):
print(table.columns.keys())
if 'kapitalEntwicklung' not in new_data: return 0
del_entry(new_data['compare'], [], ['kapitalEntwicklung'])
idx=1
for entries in new_data['kapitalEntwicklung']:
if not 'eintraege' in entries: continue
for entry in entries['eintraege']:
entry_check(entry, ['jahr', 'art', 'text', 'betrag'])
if isinstance(entry['text'],str):
text = entry['text']
else:
text = " ".join(entry['text'])
if (entry['art'] + entry['betrag'] + text).strip() != "":
conn.execute(table.insert(), [
{'unternehmenId': new_data['unternehmenId'],
'Jahr': entries['jahr'],
'Text': "Art: " + entry['art'] + ", Kapital: " + entry['betrag'] + ", Info: " + text,
'Bemerkung': "Kapital",
'Rang': idx,
}])
idx+=1
if 'entwicklungDesGenusKapitals' not in new_data: return 0
for entry in new_data['entwicklungDesGenusKapitals']:
entry_check(entry, ['jahr', 'text'])
if isinstance(entry['text'],str):
text = entry['text']
else:
text = " ".join(entry['text'])
conn.execute(table.insert(), [
{'unternehmenId': new_data['unternehmenId'],
'Jahr': entry['jahr'],
'Text': text,
'Bemerkung': "Genußkapital",
'Rang': idx,
}])
idx+=1
return 0
def Kennzahlentable(conn, new_data, table):
"""
In this case we have different conditions.
Is not a pretty solution.
"""
print(table.columns.keys())
if 'boersenbewertung' not in new_data: return 0
for boerse in new_data['boersenbewertung']:
if 'kennzahlen' not in new_data['boersenbewertung'][boerse]: continue
featkeys = list(new_data['boersenbewertung'][boerse]['kennzahlen'].keys())
if "Beschäftigte" in featkeys: featkeys.remove("Beschäftigte")
addid = []
addid.append(0)
for id, feat in enumerate(featkeys):
for idx, block in enumerate(new_data['boersenbewertung'][boerse]['kennzahlen'][feat]):
del_entry(new_data['compare'], ['boersenbewertung', boerse,'kennzahlen'], [feat])
entry_check(block, ['jahr'])
entry_check(new_data['boersenbewertung'][boerse], ['waehrungsinfo', 'notizen_kennzahlen'])
del_entry(new_data['compare'], ['boersenbewertung', boerse], ['waehrungsinfo', 'notizen_kennzahlen'])
waehrungsinfo = ""
keys = list(block.keys())
try:
keys.remove('jahr')
except Exception:
pass
unit, currency = "", ""
comment = ""
if isinstance(new_data['boersenbewertung'][boerse]['notizen_kennzahlen'], list):
if "in" in new_data['boersenbewertung'][boerse]['notizen_kennzahlen'][-1]:
currency = new_data['boersenbewertung'][boerse]['notizen_kennzahlen'][-1].split("in")[-1].replace(
")", "").strip()
for idxx, entry in enumerate(keys):
if isinstance(block[entry],str):
block[entry] = {entry: block[entry]}
for idxxx, name in enumerate(block[entry]):
if 'waehrungsinfo' in new_data['boersenbewertung'][boerse]:
for infolist in new_data['boersenbewertung'][boerse]['waehrungsinfo']:
if infolist['name'] == feat:
for info in infolist['eintraege']:
if info["name"] == name:
waehrungsinfo = info["waehrung"]
if isinstance(waehrungsinfo,str):
cuinfo = get_currencyinfo(["("+waehrungsinfo+")"])
else:
cuinfo = get_currencyinfo(waehrungsinfo)
if cuinfo:
if len(keys) > 1 or len(block[entry]) > len(keys):
if len(cuinfo) == 1:
unit = cuinfo[0]['unit']
currency = cuinfo[0]['currency']
else:
unit = cuinfo[idxx]['unit']
currency = cuinfo[idxx]['currency']
else:
unit = cuinfo[idxx]['unit']
currency = cuinfo[idxx]['currency']
currency = currency.replace("in ","").strip()
year = block['jahr'].replace("\xa0", " ")
year = year.split(" ")[0]
if "*" in year:
year = year.split("*")[0]
comment = replace_geminfo(block['jahr'], new_data['boersenbewertung'][boerse],
'notizen')
entryinfo = ""
pprtname = name
if "(" in pprtname:
pprtname = pprtname.split("(")[0].strip()
if "gesamt" in name.lower():
entryinfo = " " + cuinfo[0]["text"]
conn.execute(table.insert(), [
{'unternehmenId': new_data['unternehmenId'],
'Kennzahlenposition': pprtname+entryinfo,
'Jahr': year,
'Einheit': unit,
'W\xe4hrung': currency,
'Betrag': block[entry][name].replace(' ', "").replace(" ", ""),
'Bemerkung': comment,
'Rang': idxxx + 1,
}])
return 0
def Maintable(conn, new_data, table):
print(table.columns.keys())
return
if new_data['reference'] == new_data['unternehmenId']:
nextid = get_lastid(table, conn)
new_data['id'] = nextid
conn.execute(table.insert(), [
{'name': new_data['name'],
'referenz': new_data['reference'],
'Erstnotierung': new_data['year'],
'Letztnotierung': "",
'imAktienfuehrer': new_data['year'],
'Bemerkung': "",
'Indexzugeh\xf6rigkeit': "",
'id': nextid,
}])
return 0
def MainRelationtable(conn, new_data, table):
print(table.columns.keys())
return
#if new_data['reference'] != new_data['unternehmenId']:
conn.execute(table.insert(), [
{'referenz': new_data["reference"],
'weiteresAuftreten': new_data["unternehmenId"],
'Unternehmen': new_data['name'],
'Erscheinungsjahr': int(new_data['year'].split("-")[0]),
'id': new_data['id'],
}])
return 0
def Organbezuegetable(conn, new_data, table):
print(table.columns.keys())
if 'organbezuege' not in new_data: return 0
del_entry(new_data['compare'], [], ['organbezuege'])
for idx, _ in enumerate(new_data['organbezuege']):
for entry in new_data['organbezuege'][idx]:
entry_check(entry, ['bezuege', 'organ'])
bezuege = entry['bezuege']
organ = entry['organ'].translate(str.maketrans('', '', string.punctuation + string.digits)).strip()
if 'bemerkung' in entry and bezuege == "":
bezuege = entry['bemerkung']
if "ufsicht" in entry['bemerkung'] and organ == "":
organ = "Aufsichtsrat"
elif "Vorstand" in entry['bemerkung'] and organ == "":
organ = "Vorstand"
else:
if "Aufsichtsrat" in entry['bezuege']:
bezuege = entry['bezuege'].split("Aufsichtsrat")[0].replace(", ","").strip()
new_data['organbezuege'].append([{'organ':"Aufsichtsrat",'bezuege':entry['bezuege'].split("Aufsichtsrat")[1].replace(", ","").strip()}])
elif "Vorstand" in entry['bezuege']:
bezuege = entry['bezuege'].split("Vorstand")[0].replace(", ","").strip()
new_data['organbezuege'].append([{'organ':"Aufsichtsrat",'bezuege':entry['bezuege'].split("Vorstand")[1].replace(", ","").strip()}])
if bezuege == "":continue
if organ == "":
organ = "Organbezuege"
conn.execute(table.insert(), [
{'unternehmenId': new_data['unternehmenId'],
'Organ': organ,
'Bez\xfcge': bezuege,
}])
return 0
def Stimmrechttable(conn, new_data, table):
"""
Stimmrecht entry could be in the first layer or
in the "ausgebenes Kapital" entryblock.
"""
print(table.columns.keys())
if "shareinfo" not in new_data: return 0
idx = 1
for si in new_data["shareinfo"]:
if si["amount"] != "":
info = f"Gesamtbetrag: {si['amount']},"+si["info"]
else:
info = si["info"]
conn.execute(table.insert(), [
{'unternehmenId': new_data['unternehmenId'],
'Aktienart': si["type"],
'Stueck': si["stuck"],
'Stimmzahl': si["voice"],
'Nennwert': si["nw"],
'Waehrung': si["currency"],
'Bemerkung': info,
'Rang': idx,
}])
idx += 1
return
"""
if entry["voice"] != "":
entry["voice"] = entry["voice"].replace("Je","je").replace("jede","je").split(":")[-1]
if len(entry["voice"].split("je")) > 1:
for part in entry["voice"].split("je"):
if part.strip() == "": continue
subparts = part.split("=")
type = entry["type"]
if "nom." in subparts[0]:
type = "nom."
else:
type = get_type(subparts[0])
amountreg = re.compile(r'([\d\s\\/]*)(\D*)([\d,\-]*)')
#print(subparts[0]))
finding = amountreg.findall(subparts[0].strip())[0]
stuck = finding[0]
if stuck == "":
stuck = "1"
currency, nw = "",""
if finding[2] != "":
currency = finding[1].strip().split(" ")[-1]
nw = finding[2]
voicereg = re.compile(r'(\D*)([\d\\/]*)(\D*)')
if len(subparts) > 1:
finding = voicereg.findall(subparts[1])
if finding:
finding = finding[0]
voice = finding[1]
if voice == "":
voice = "1"
else:
voice = "SEE THAT"
else:
voice = "SEE THIS"
#print(part)
conn.execute(table.insert(), [
{'unternehmenId': new_data['unternehmenId'],
'Aktienart': type,
'Stueck': stuck,
'Stimmzahl': voice,
'Nennwert': nw,
'Waehrung': currency,
'Bemerkung': entry["info"],
'Rang': idx,
}])
idx +=1
return 0
"""
def get_type(content):
typereg = re.compile(r"\s?([\w\-]*aktie[n]?)\s")
stckreg = re.compile(r"\s?(\w*tück\w*)\s")
type = ""
if typereg.search(content):
type = typereg.findall(content)[0]
elif stckreg.search(content):
type = "Stückaktien"
return type
def exract_stuecklung(content, VERBOSE = False):
if VERBOSE:
print(f"Input: {content}\n")
results = []
content = content.replace("Nennwert von", "zu").replace("(rechnerischer Wert","zu").replace("jeweils zu", "zu").replace("zu je","zu").replace(" je "," zu ")
#nw = True
#for wn in ["o.N.","o.N","nennwertlose", "nennwertlos", "ohne Nennwert"]:
# content = content.replace(wn, "")
#nw = False
re_delpunct = re.compile(r"(\d)(\.)(\d)")
if re_delpunct.search(content):
for finding in re_delpunct.finditer(content):
content = content.replace(finding[2], " ")
re_date = re.compile(r'(\d\d/\d\d)')
if re_date.search(content):
for finding in re_date.finditer(content):
content = content.replace(finding[0],"")
if "zu" in content:
re_nmbgrps = re.compile(r'(\d[\d\s]*)(\D*)(zu){1,}(\D*)(\d{1,},{1,}[\d-]{1,})')
for finding in re_nmbgrps.finditer(content):
type = get_type(finding[0])
nw = True
wnreplace = ""
number = finding[1]
amount = finding[5].replace(",-", "")
unitreg = re.compile(r'(Mio|Mrd|Brd)')
if unitreg.search(content):
number += f" {unitreg.search(content)[0]}"
for wn in ["o.N.", "o.N", "nennwertlose", "nennwertlos", "ohne Nennwert"]:
wnreplace = finding[2].replace(wn, "")
nw = False
break
content = content.replace(finding[0],"")
results.append({"Anzahl": number,
"Aktienart": type,
"Waehrung": finding[4].strip(),
"Betrag": amount,
"nw": nw
})
if VERBOSE:
print("Anzahl: "+finding[1])
print("Aktie: "+finding[2].replace(wnreplace,"").replace("zum rechn. ","").replace("und","").strip(";, "))
print("zu: "+finding[3])
print("Waehrung: "+finding[4].strip())
print("Betrag: "+finding[5].replace(",-","")+"\n")
if content != "" and "ISIN" not in content:
re_nmbgrps = re.compile(r'(\d[\d\s]*)(\D*)')
for finding in re_nmbgrps.finditer(content):
nw = True
wnreplace = ""
for wn in ["o.N.", "o.N", "nennwertlose", "nennwertlos", "ohne Nennwert"]:
wnreplace = wn
nw = False
break
content = content.replace(finding[0],"")
type = get_type(finding[0])
results.append({"Anzahl": finding[1],
"Aktienart": type,
"nw": nw
})
if VERBOSE:
print("Stueck: "+finding[1])
print("Aktie: "+finding[2].replace(wnreplace,"").replace("zum rechn. ","").replace("und","").strip(";, ")+"\n")
if VERBOSE:
print(f"Rest: {content}\n\n")
return results
def Stueckelungtable(conn, new_data, table):
print(table.columns.keys())
if "shareinfo" not in new_data: return 0
idx = 1
for si in new_data["shareinfo"]:
if si["amount"] != "":
info = f"Gesamtbetrag: {si['amount']},"+si["info"]
else:
info = si["info"]
conn.execute(table.insert(), [
{'unternehmenId': new_data['unternehmenId'],
'Aktienart': si["type"],
'Anzahl': si["number"],
'Nominalwert': si["nomval"],
'Waehrung': si["currency"],
'Bemerkung': info,
'Rang': idx,
}])
idx += 1
return
"""
if entry["number"] != "":
results = exract_stuecklung(entry["number"])
for result in results:
entry["number"] = result.get("Anzahl","")
#entry["type"] = result.get("Aktienart","")
entry["currency"] = result.get("Waehrung", "")
entry["nw"] = result.get("Betrag", "")
conn.execute(table.insert(), [
{'unternehmenId': new_data['unternehmenId'],
'Aktienart': entry["type"],
'Anzahl': entry["number"].strip(),
'Nominalwert': entry["nw"].replace(entry["currency"],"").replace("(rechnerisch)","").strip(),
'Waehrung': entry["currency"],
'Bemerkung': entry["info"],
'Rang': idx,
}])
idx+=1
return 0
"""
def Unternehmentable(conn, new_data, table):
print(table.columns.keys())
del_entry(new_data['compare'], [],
['Sitz', 'investorRelations', 'publicRelations', 'established_year', 'activity_description', 'name'])
WP, HV, GJ, SD, investorRelations, publicRelations = "", "", "", "", "", ""
if 'sonstigeAngaben' in new_data:
for entry in new_data['sonstigeAngaben']:
if entry[0].find('irtschaft') != -1:
WP = entry[1]
if entry[0].find('ersammlun') != -1:
HV = entry[1]
if entry[0].find('jahr') != -1:
GJ = entry[1]
try:
for block in new_data['Sitz']:
entry_check(block, ['type'])
if 'origpost' in block:
SD += block['type']+": "+block['origpost']+" "
else:
for x, y in block.items():
if isinstance(y, list):
for yy in y:
SD += x + ": " + yy + ", "
else:
SD += x + ": " + y + ", "
except:
pass
if 'investorRelations' in new_data:
for entry in new_data['investorRelations']:
investorRelations += get_infostring(entry)
investorRelations = investorRelations.strip().strip(string.punctuation)
if 'publicRelations' in new_data:
for entry in new_data['publicRelations']:
publicRelations += get_infostring(entry)
publicRelations = publicRelations.strip().strip(string.punctuation)
comment = ""
if "unternehmensVertraege" in new_data:
comment = "Unternehmnensverträge: "+" | ".join(new_data["unternehmensVertraege"])
entry_check(new_data, ['established_year', 'activity_description'])
conn.execute(table.insert(), [
{'unternehmenId': new_data['unternehmenId'],
'Unternehmen': new_data['name'],
'Stammdaten': SD.replace("Sitz: ",""),
'Taetigkeitsgebiet': new_data['activity_description'],
'Gruendungsjahr': new_data['established_year'],
'AGseit': "",
'InvestorRelations': investorRelations,
'PublicRelations': publicRelations,
'Hauptversammlung': HV,
'WP': WP,
'Erscheinungsjahr': new_data['year'],
'Startseite': "",
'Bemerkung': comment,
}])
return 0
def Volumetable(conn, new_data, table):
print(table.columns.keys())
#TODO: For cds not necassary
return 0
for idx, entry in enumerate(new_data['all_wkn_entry']):
conn.execute(table.insert(), [
{'Erscheinungsjahr': new_data['year'],
'idGoobi': "",
'offsetSeitenzahlen': "",
'idOAI': "",
'Volume': "",
}])
return 0
def Vorstandtable(conn, new_data, table):
print(table.columns.keys())
if 'vorstand' not in new_data: return 0
for idx, entry in enumerate(new_data['vorstand'][0]):
del_entry(new_data['compare'], ['vorstand', 0, idx], ['lastName', 'firstName', 'title', 'cityAcc', 'funct'])
entry_check(entry, ['lastName', 'firstName', 'title', 'cityAcc', 'funct'])
if membercheck(entry): continue
conn.execute(table.insert(), [
{'unternehmenId': new_data['unternehmenId'],
'Name': entry['lastName'],
'Vorname': entry['firstName'],
'Titel': entry['title'],
'Ort': entry['cityAcc'],
'Funktion': entry['funct'],
'Bemerkung': "",
'Rang': idx + 1,
}])
return 0
def WKNtable(conn, new_data, table):
print(table.columns.keys())
if 'all_wkn_entry' not in new_data: return 0
if "shareinfo" in new_data:
for shareinfo in new_data["shareinfo"]:
if shareinfo['wkn'] + shareinfo['isin'] != "":
count = 0
for idx, wkn_entry in enumerate(new_data['all_wkn_entry']):
if set(shareinfo.values()) & set(wkn_entry.values()):
for entries in ['type', 'wkn', 'isin', 'nw']:
if wkn_entry[entries] == "":
wkn_entry[entries] = deepcopy(shareinfo[entries])
count = 1
continue
if count == 0:
new_data['all_wkn_entry'] += {len(new_data['all_wkn_entry']): []}
new_data['all_wkn_entry'][len(new_data['all_wkn_entry']) - 1] = dict(
zip_longest(new_data['all_wkn_entry'][0].keys(), [""] * len(new_data['all_wkn_entry'][0].keys())))
for entry in ['type', 'wkn', 'isin', 'nw']:
new_data['all_wkn_entry'][len(new_data['all_wkn_entry']) - 1][entry] = shareinfo[entry]
del_entry(new_data['compare'], [], ['all_wkn_entry'])
for idx, entry in enumerate(new_data['all_wkn_entry']):
entry_check(entry, ['type', 'wkn', 'isin', 'nw'])
comment = ""
if entry['isin'] != "":
comment = "ISIN: " + entry['isin']
if entry['nw'] != "":
comment = (comment + " Nennwert: " + entry['nw']).strip()
if entry['wkn']+entry['isin'] != "":
conn.execute(table.insert(), [
{'unternehmenId': new_data['unternehmenId'],
'Unternehmen': new_data['name'],
'WKN': entry['wkn'],
'ISIN': entry['isin'],
'Bemerkung': comment,
'Aktienart': entry['type'],
'Rang': idx + 1,
}])
return 0
def WeitereBemerkungentable(conn, new_data, table):
print(table.columns.keys())
if "leitung_kommunikation" in new_data:
for key in new_data["leitung_kommunikation"]:
comment = ""
for entries in new_data["leitung_kommunikation"][key]:
for entry in entries:
comment += entry.title()+ ": " + entries[entry]+" "
comment += " ("+key.title()+")"
conn.execute(table.insert(), [
{'unternehmenId': new_data['unternehmenId'],
'Abschnitt': "Leitung/Kommunikation",
'Bemerkung': comment,
}])
if "ermächtigungAktienerwerb" in new_data:
for key in new_data["ermächtigungAktienerwerb"]:
comment = " ".join(new_data["ermächtigungAktienerwerb"][key])
conn.execute(table.insert(), [
{'unternehmenId': new_data['unternehmenId'],
'Abschnitt': "Ermächtigung Aktienerwerb",
'Bemerkung': comment,
}])
if "Marktbetreuer" in new_data:
conn.execute(table.insert(), [
{'unternehmenId': new_data['unternehmenId'],
'Abschnitt': "Marktbetreuer",
'Bemerkung': new_data["Marktbetreuer"],
}])
if 'besBezugsrechte' in new_data:
if new_data['besBezugsrechte']:
for entry in new_data['besBezugsrechte']['besBezugsrechte']:
text = ""
if 'jahr' in entry:
text = "Jahr: "+entry['jahr']+" "
for feat in entry["bemerkungen"]:
text += feat+" "
conn.execute(table.insert(), [
{'unternehmenId': new_data['unternehmenId'],
'Abschnitt': "Besondere Bezugsrechte",
'Bemerkung': text,
}])
if "unternehmensVertraege" in new_data:
if isinstance(new_data["unternehmensVertraege"],list): comment = " ".join(new_data["unternehmensVertraege"])
else: comment = new_data["unternehmensVertraege"]
conn.execute(table.insert(), [
{'unternehmenId': new_data['unternehmenId'],
'Abschnitt': "Unternehmens Verträge",
'Bemerkung': comment,
}])
return 0
######### FUNCTIONS ############
def membercheck(entry):
"""
This function checks if there is an entry like "4 Mitglieder",
which appears sometimes in the data.
"""
if entry['funct'] == "Mitglieder" and (entry["firstName"].isdigit() or entry["lastName"].isdigit()):
return 1
return 0
def update_all_wkn(new_data):
"""
It takes the wkn/isin information out of the table "boersenbwertung"
and update the data in "all_wkn_entries".
"""
if not 'boersenbewertung' in new_data: return 0
shareinfolist = []
for block in new_data['boersenbewertung']:
shareinfo = {}
blockkeys = new_data['boersenbewertung'][block].keys()&['wkns','isins']
for entries in blockkeys:
for entry in new_data['boersenbewertung'][block][entries]:
if 'nummer' in entry.keys():
shareinfo[entries[:-1]] = entry['nummer']
if 'aktienart' in entry.keys():
shareinfo['type'] = entry['aktienart']
if shareinfo:
shareinfolist.append(deepcopy(shareinfo))
del shareinfo
if not shareinfolist: return 0
if ''.join(new_data['all_wkn_entry'][0].values()) == "":
for idx, shareinfo in enumerate(shareinfolist):
new_data['all_wkn_entry'] += {idx+1: []}
new_data['all_wkn_entry'][idx+1] = deepcopy(new_data['all_wkn_entry'][0])
for entry in shareinfo:
new_data['all_wkn_entry'][idx + 1][entry] = shareinfo[entry]
del new_data['all_wkn_entry'][0]
else:
for shareinfo in shareinfolist:
count = 0
for idx, wkn_entry in enumerate(new_data['all_wkn_entry']):
if set(shareinfo.values())&set(wkn_entry.values()):
for entries in shareinfo:
entry_check(new_data['all_wkn_entry'][idx], ['type', 'wkn', 'isin', 'nw'])
if wkn_entry[entries] == "":
wkn_entry[entries] = deepcopy(shareinfo[entries])
count = 1
continue
if count == 0:
new_data['all_wkn_entry'] += {len(new_data['all_wkn_entry']): []}
new_data['all_wkn_entry'][len(new_data['all_wkn_entry'])-1] = dict(zip_longest(new_data['all_wkn_entry'][0].keys(), [""]*len(new_data['all_wkn_entry'][0].keys())))
for entry in shareinfo:
new_data['all_wkn_entry'][len(new_data['all_wkn_entry']) - 1][entry] = shareinfo[entry]
return 0
def seperate_shareinfo(entry_split,entries,shareinfo):
"""
This function takes a line of information, splits it
and search for the amount and the currency in the string.
"""
# Test if Stückelung wasn't recognized
shareinfo['currency'] = entry_split
shareinfo['amount'] = ""
for idx, entry in enumerate(entries.split(" ")[1:]):
if entry == "":
continue
if entry[0].isdigit():
shareinfo['amount'] += entry
elif "Mio" in entry:
shareinfo['amount'] += " "+entry
elif "Mrd" in entry:
shareinfo['amount'] += " "+entry
else:
entries = " ".join(entries.split(" ")[idx + 1:])
break
get_kennnummer(entries,shareinfo)
return 0
def get_kennnummer(entries,shareinfo):
re_idnr = re.compile(r"(WKN|Kenn-Nr|ISIN)\S*\s(\S*)")
for finding in re_idnr.finditer(entries):
if finding[1] == "ISIN":
shareinfo['isin'] = finding[2]
shareinfo["info"] = entries.replace(finding[1],"").replace(finding[2],"")
else:
shareinfo['wkn'] = finding[2]
shareinfo["info"] = entries.replace(finding[1],"").replace(finding[2],"")
"""
if "WKN" in entries:
shareinfo['wkn'] = entries.split("WKN")[-1].replace(".", "").replace(":", "")
shareinfo["info"] = entries.split("WKN")[0]
elif "Kenn-Nr" in entries:
shareinfo['wkn'] = entries.split("Kenn-Nr")[-1].replace(".", "").replace(":", "")
shareinfo["info"] = " ".join(entries.split("Kenn-Nr")[0].split(" ")[:-1])
elif "ISIN" in entries or "Wertpapier-Kenn-Nr" in entries:
shareinfo['isin'] = entries.split("ISIN")[-1].replace(".", "").replace(":", "")
shareinfo["info"] = entries.split("ISIN")[0]
"""
return
def get_shareinfo(new_data):
"""
This function loads all necessary share information in one list.
The informations will be used in the following functions:
- grundkapital
- stimmrecht
- stueckelung
"""
shareinfolist = []
if 'grundkapital' in new_data:
max_entries = max([len(new_data['stückelung']),len(new_data['stimmrecht'])])
if max_entries > 1:
new_data['grundkapital']['bemerkungen'].append([new_data['grundkapital']['betrag'],"Grundkapital"])
# TODO-Hint: Search here if something smells fishy!
# TODO: REWORK REWORK REWORK!!! AND STIMMRECHT-STÜCKELUNG!
for idx, entries in enumerate(x for x in new_data['grundkapital']['bemerkungen'] if x and not (len(x) == 1 and x[0]== "")):
shareinfo = {'wkn': "", 'isin': "", 'nw': "", 'type': "", 'number': "", 'voice': "", 'amount': "",
'currency': "", 'info': ""}
stype = ""
if isinstance(entries, str): entries = [entries]
if entries[0] == "": del entries[0]
if not entries: continue
entries[0] = entries[0].strip().replace(" "," ")
entry_split = entries[0].split(" ")
# The query was earlier in "seperate shareinfo"
for idxx, entry in enumerate(entries):
entry_splitted = entry.split(" ")
for feat in ["Stückelung", "Stück "]:
if feat in entry:
new_data["stückelung"].append(entry.split(feat)[-1])
entries[idxx] = entry.split(feat)[0]
if " Stimme" in entry:
new_data["stimmrecht"].append(entry)
entries[idxx] = ""
kennnummer = [wkn["wkn"] for wkn in new_data["all_wkn_entry"]]+[wkn["isin"] for wkn in new_data["all_wkn_entry"]]
aktienarten = [wkn["type"] for wkn in new_data["all_wkn_entry"]] + [wkn["type"] for wkn in
new_data["all_wkn_entry"]]
for idxes, es in enumerate(entry_splitted):
if "Kenn-Nr" in es:
if idxes+1 < len(entry_splitted):
if entry_splitted[idxes+1] not in kennnummer:
stype = ""
for idxcon, content in enumerate(entry_splitted):
if "ktien" in content:
stype = content[0]
if "vinku" in entry_splitted[idxcon-1]:
stype = "vinkuliert "+stype
break
new_data["all_wkn_entry"].append({"type": stype,
"isin": "",
"wkn": entry_splitted[idxes + 1],
"nw": ""})
break
else:
stype = aktienarten[kennnummer.index(entry_splitted[idxes+1] )]
#new_data["all_wkn_entry"][len(new_data["all_wkn_entry"])] = {"type":stype[0],
# "isin":"",
# "wkn":entry_splitted[idxes+1],
# "nw":""}
if "ISIN" in es:
if idxes + 1 < len(entry_splitted):
if entry_splitted[idxes + 1] not in kennnummer:
stype = ""
for idxcon, content in enumerate(entry_splitted):
if "ktien" in content:
stype = content[0]
if "vinku" in entry_splitted[idxcon - 1]:
stype = "vinkuliert " + stype
break
new_data["all_wkn_entry"].append({"type": stype,
"isin": entry_splitted[idxes + 1],
"wkn": "",
"nw": ""})
break
else:
stype = aktienarten[kennnummer.index(entry_splitted[idxes+1] )]
#if entries[0] == "": continue
if len(entry_split) <= 1: continue
if len(entry_split[0]) > 1 and not entry_split[0][1].isupper() \
and (len(entry_split) < 2 or not entry_split[1][0].isdigit()):
if idx == 0:
entries[0] = new_data['grundkapital']['betrag']+" "+entries[0]
entry_split = entries[0].split(" ")
else:
if len(shareinfolist) > 1:
shareinfolist[len(shareinfolist)-1]['info'] += entries[0]
del entries[0]
if not entries:
continue
entry_split = entries[0].split(" ")
re_amount = re.compile(r'(\D*\S\D)\s(\d[\d\s.,]*(Mio\.?|Mrd\.?|Brd\.?)?)')
finding = re_amount.search(entries[0])
if finding:
shareinfo["currency"] = finding[1]
shareinfo["amount"] = finding[2]
shareinfo['info'] += " ".join(entries[1:])
get_kennnummer(entries[-1], shareinfo)
"""
if len(entry_split[0]) > 1 and entry_split[0][1].isupper() and entry_split[1][0].isdigit():
seperate_shareinfo(entry_split[0], entries[0], shareinfo)
if len(entries) > 1:
shareinfo['info'] += " ".join(entries[1:])
"""
if len(new_data['stückelung'])-1 >= idx: shareinfo['number'] = new_data['stückelung'][idx]
if len(new_data['stimmrecht'])-1 >= idx: shareinfo['voice'] = new_data['stimmrecht'][idx]
if shareinfo["type"] == "" and stype != "":
shareinfo["type"] = stype
sharewkn = shareinfo['wkn']+shareinfo['isin'].replace(" ","")
if sharewkn != "":
for entry in new_data["all_wkn_entry"]:
for entrykey in ["wkn","isin"]:
if entry[entrykey] != "":
if entry[entrykey] in entries[0].replace(" ",""):
for feat in ['wkn','isin','nw','type']:
if entry[feat] != "":
shareinfo[feat] = entry[feat]
shareinfolist.append(deepcopy(shareinfo))
del shareinfo
if "ausgegebenesKapital" in new_data:
shareinfo = {'wkn': "", 'isin': "", 'nw': "", 'type': "", 'number': "", 'voice': "", 'amount': "",
'currency': "", 'info': ""}
entrydict = {'betrag': "amount", 'notiz': "info", 'stimmrecht': "voice", 'stueckelung': "number"}
for entries in new_data["ausgegebenesKapital"]["eintraege"]:
for entry in entries:
if entry in entrydict:
if entry == "betrag":
re_amount = re.compile(r'(\D*\S\D)\s(\d[\d\s.,]*(Mio\.?|Mrd\.?|Brd\.?)?)')
finding = re_amount.search(entries[entry])
if finding:
shareinfo["currency"] = finding[1]
shareinfo["amount"] = finding[2]
shareinfo["info"] = entries[entry]
#seperate_shareinfo(entry_split[0], entries[entry], shareinfo)
else:
shareinfo[entrydict[entry]] += entries[entry] + " "
else:
shareinfo[entrydict[entry]] += entries[entry]+" "
get_kennnummer(entries[entry], shareinfo)
shareinfolist.append(deepcopy(shareinfo))
del shareinfo
new_data["shareinfo"] = shareinfolist
print(new_data["shareinfo"])
return 0
def stck_stimmrecht(data):
#data="\f\n\n\n\n\n\n\n\n Basler Aktiengesellschaft \n\nWertpapier Kenn.-Nr.: 510200 (Inhaber-Stammaktien) \n\n Sitz \n\nAn der Strusbek 30, 22926 Ahrensburg Telefon: (0 41 02) 4 63-0 Telefax: (0 41 02) 4 63-108 \n\n\n\n Tätigkeitsgebiet/Gründung \n\nEntwicklung, Herstellung und Vertrieb von Produkten der Meß-, Automatisierungs- und Rechnertechnik. \n\n \n\nGründung: 1988 \n\n Management \n\nAufsichtsrat: Dipl.-Ing. Prof. Dr.-Ing. Walter Kunerth, Zeitlarn, Vors.; Dipl.-Kfm. Hans Henning Offen, Großhansdorf, stellv. Vors.; Dipl.-Betriebsw. Bernd Priske, Willich-Neersen \n\nVorstand: Dipl.-Ing. Norbert Basler, Großhansdorf, Vors.; Bryan Hayes, Hamburg; Dr. Dietmar Ley, Ahrensburg; Dipl.-Wirtschafts-Ing. Thorsten Schmidtke, Hamburg \n\n\n\n Aktionäre \n\nDipl.-Ing. Norbert Basler, Großhansdorf (45,7%); IKB Beteiligungsgesellschaft mbH, Düseldorf (21,4%); Nicola-Irina Basler, Großhansdorf (4,3%); Dr. Dietmar Ley, Ahrensburg (4,3%); Bryan Hayes, Hamburg (4,3%); Streubesitz \n\n\n\n\n\n Wesentliche Beteiligungen \n\n\n\n Basler Inc., Exton (USA), Kapital: US$ 0,03 Mio (100%)\n\n\n\n\n\n Kapitalentwicklung seit 1980 \n\n\n\n 1988GründungskapitalDM 0,05 Mio\n\n der GmbH\n\n 1993ErhöhungDM 0,1 Mio\n\n Gem. GV vom 3.6.\n\n 1994ErhöhungDM 0,25 Mio\n\n Im April\n\n 1997ErhöhungDM 0,4 Mio\n\n Im August\n\n 1998Kapital bei Umwandlung der GesellschaftDM 0,4 Mio\n\n in AG gem. GV vom 13.10.\n\n 1999Umstellung auf EUREUR 204 516,75\n\n Gem. HV vom 24.2\n\n KapitalberichtigungEUR 3 Mio\n\n Gem. HV vom 24.2. (ohne Ausgabe von Aktien).\n\n BareinlageEUR 3,5 Mio\n\n Gem. ao. HV vom 3.3.\n\n\n\n\n\n\n\n Derzeitiges GrundkapitalEUR 3,5 Mio\n\n\n\nInhaber-Stammaktien, WP-Kenn-Nr. 510200 Stückelung: 3 500 000 Stückaktien Stimmrecht: je Stückaktie = 1 Stimme \n\n\n\n\n\n Genehmigtes KapitalEUR 1,75 Mio\n\n\n\nGem. ao. HV vom 3.3.1999, befristet bis 3.3.2004 (ggf. unter Ausschluß des ges. Bezugsrechts). \n\n\n\n\n\n Bedingtes KapitalEUR 0,3 Mio\n\n\n\nGem. ao. HV vom 3.3.1999 für Manager und Mitarbeiter der Gruppe (300 000 Stücke) \n\n\n\n Börsenbewertung \n\nWertpapier-Kenn-Nr.: 510200, Inhaber-Stammaktien \n\nNotiert: Neuer Markt in Frankfurt \n\nMarktbetreuer: Dresdner Bank AG; BHF-Bank AG \n\nNotiert seit 23.3.1999 Stückaktien o.N.; Emissionspreis: EUR 57,- \n\n\n\nBereinigte Kurse (Frankfurt in EUR) \n\n\n\n \n\n \n\n \n\n \n\n \n\n 1999\n\n \n\n \n\n \n\n \n\n \n\n \n\n bis 31.3.\n\n \n\n Höchst\n\n \n\n \n\n \n\n \n\n 95,00\n\n \n\n Tiefst\n\n \n\n \n\n \n\n \n\n 78,00\n\n \n\n Ultimo\n\n \n\n \n\n \n\n \n\n 82,00\n\n \n\n\n\n\n\n Dividenden (in DM; Auschüttungen sind erst für das Jahr 2000 geplant) \n\n\n\n \n\n \n\n \n\n \n\n 1998\n\n 1999\n\n \n\n Dividende\n\n \n\n \n\n \n\n 0\n\n 0\n\n \n\n\n\n\n\n \n\n Sonstige Angaben \n\nWirtschaftsprüfer : ARTHUR ANDERSEN Wirtschaftsprüfungsgesellschaft Steuerberatungsgesellschaft mbH, Hamburg \n\nHauptversammlung: 3.3.1999 (ao. HV) \n\nGeschäftsjahr: Kalenderjahr \n\n\n\n"
#data= "Inhaber-Stammaktien, WP-Kenn-Nr. 553700 voll an der Börse zugelassen und eingeführt Stückelung: 77 000 Stücke zu je DM 50; 102 000 zu DM 100; 25 000 zu DM 300; 100 450 zu DM 1 000 Stimmrecht: Das Stimmrecht jeder Aktie entspricht ihrem Nennbetrag."
#data = "\n\n\n\n\n\n\n\n\n \n\n Didier-Werke Aktiengesellschaft \n\nWertpapier Kenn.-Nr.: 553700 (Inhaber-Stammaktien) \n\n Sitz \n\nAbraham-Lincoln-Straße 1, 65189 Wiesbaden Postfach 20 25, 65010 Wiesbaden Telefon: (06 11) 73 35-0 Telefax: (06 11) 73 35-4 75 \n\n\n\n Tätigkeitsgebiet/Gründung \n\nFeuerfest-Produkte: Fertigung, Vertrieb, Montage von hochtemperaturfester Spezialkeramik. Anlagentechnik: Konstruktion, Fertigung, Vertrieb, Montage von Spezialaggregaten der Energie-, der Hochtemperatur-, der Korrosionsschutz- und Umweltschutztechnik. \n\n \n\nGründung: 1834 \n\n Management \n\nAufsichtsrat: Dr. Walter Ressler, Villach (Österreich), Vors.; Roland Platzer, Wien (Österreich), stellv. Vors.; Hubert Jacobs, Wiesbaden *); Dipl.-Ing. Dr. Günther Mörtl, Wien (Österreich); Jürgen Waligora, Duisburg *); Dr. Wilhelm Winterstein, München \n\n*) Arbeitnehmervertreter \n\nVorstand: Dipl.-Kfm. Robert Binder, Wiesbaden; Dipl.-Ing. Ingo Gruber, St. Veit/Glan (Österreich); Dr. Andreas Meier, Niklasdorf (Österreich); Dipl.-Ing. Uwe Schatz, Urmitz/Rhein; Walther von Wietzlow (Gesamtkoordination, Finanzen, Verkauf/Marketing, Personal, Recht, Organisation) \n\nOrganbezüge : 1997: Vorstand DM 1,755 Mio; Aufsichtsrat DM 0,038 Mio \n\n Investor Relations \n\nMag. Peter Hofmann, Tel.: 0043-1-587767123, Fax: 0043-1-5873380 \n\n\n\n Aktionäre \n\nRHI AG, Wien (Österreich) (90,1%); Rest Streubesitz \n\n\n\n\n\n Wesentliche Beteiligungen \n\nI. Verbundene Unternehmen, die in den Konzernabschluß einbezogen sind (Inland) \n\n\n\n Dinova GmbH, Königswinter, Kapital: DM 6 Mio (100%)\n\n Didier-M&P Energietechnik GmbH, Wiesbaden, Kapital: DM 4,5 Mio (66,67%)\n\n Teublitzer Ton GmbH, Teublitz, Kapital: DM 5 Mio (51%)\n\n\n\n\n\n Rohstoffgesellschaft mbH Ponholz, Maxhütte-Haidhof, Kapital: DM 2 Mio (100%)\n\n\n\n\n\n \n\nII. Verbundene Unternehmen, die in den Konzernabschluß einbezogen sind (Ausland) \n\n\n\n North American Refractories Co. (NARCO), Cleveland/Ohio (USA), Kapital: US-$ 20,158 Mio (85,12%)\n\n\n\n\n\n Zircoa Inc., Solon/Ohio (USA), Kapital: US-$ 1 Mio (100%)\n\n TRI-STAR Refractories Inc., Cincinnati/Ohio (USA), Kapital: US-$ 2,955 Mio (80%)\n\n\n\n\n\n InterTec Company, Cincinnati/Ohio (USA), Kapital: US-$ 0,998 Mio (100%)\n\n\n\n\n\n Didier, Corporation de Produits Réfractaires, Burlington (Kanada), Kapital: can$ 17 Mio (100%)\n\n\n\n\n\n Narco Canada Inc., Burlington/Ontario (Kanada), Kapital: can$ 3,705 Mio (100%)\n\n\n\n\n\n D.S.I.P.C. - Didier Société Industrielle de Production et de Constructions, Breuillet (Frankreich), Kapital: FF 33,713 Mio (99,88%)\n\n Thor Ceramics Ltd., Clydebank (Großbritannien), Kapital: £ 1,375 Mio (100%)\n\n Didier Refractarios S.A., Lugones (Spanien), Kapital: Ptas 200 Mio (100%)\n\n REFEL S.p.A., San Vito al Tagliamento (Italien), Kapital: Lit 9 851 Mio (100%)\n\n Didier Asia Sdn. Bhd., Petaling Jaya (Malaysia), Kapital: M-$ 7,5 Mio (60%)\n\n Veitsch-Radex-Didier S.E.A. PTE. LTD., Singapur (Singapur), Kapital: S$ 0,3 Mio (100%)\n\n Veitsch-Radex-Didier Andino C.A., Ciudad Guayana (Venezuela), Kapital: VEB 10 Mio (99,6%)\n\n Veitsch-Radex-Didier México, S.A. de C.V., Garza Garcia (Mexiko), Kapital: mex$ 0,05 Mio (100%)\n\n Veitsch-Radex-Didier Australia Pty. Ltd., Newcastle (Australien), Kapital: A$ 1,4 Mio (100%)\n\n\n\n\n\n \n\nIII. Verbundene Unternehmen, die nicht in den Konzernabschluß einbezogen sind (Inland) \n\n\n\n Rheinischer Vulkan Chamotte- und Dinaswerke mbH, Königswinter, Kapital: DM 2 Mio (100%)\n\n W. Strikfeld & Koch GmbH, Wiehl, Kapital: DM 1 Mio (100% über Striko-Westofen GmbH)\n\n\n\n\n\n \n\nIV. Verbundene Unternehmen, die nicht in den Konzernabschluß einbezogen sind (Ausland) \n\n\n\n Shanghai Dinova Ltd., Shanghai (China), Kapital: DM 15 Mio (60% über Dinova GmbH)\n\n Beijing Yanke Dinova Building Materials Co., Ltd., Peking (China), Kapital: DM 2,3 Mio (60% über Dinova GmbH)\n\n\n\n\n\n \n\nV. Beteiligungen an assoziierten Unternehmen \n\n\n\n EKW-Eisenberger Klebsandwerke GmbH, Eisenberg/Pfalz, Kapital: DM 6 Mio (31,5%)\n\n\n\n\n\n Société Francaise des Pises Siliceux S.A.R.L., Paris (Frankreich), Kapital: FF 1 Mio (97,5%)\n\n\n\n\n\n Kapitalentwicklung seit 1980 \n\n\n\n 1982BezugsrechtDM 92,4 Mio\n\n Im Dezember aus gen. Kap. (HV 16.7.1981), i.V. 8:1 zu 130 %; rechn. Abschlag DM 6,- am 6.12.; div.-ber. ab 1.1.1983; Div.Sch.Nr. 42.\n\n 1989BezugsrechtDM 122 Mio\n\n Im August aus gen. Kap. (HV 12.7.); für Aktionäre und Inhaber der Wandelanleihe von 1969 i.V. 7:2 zu 330 %; rechn. Abschlag DM 24,22 am 10.8.; div.-ber. ab 1.7.1989; Tal. bzw. Legitimationsschein A (für Inhaber der Wandelanleihe).\n\n\n\n\n\n\n\n Besondere Bezugsrechte\n\n\n\n\n\n 1985Bezugsrecht auf Optionsanleihe i.V. 5:2 zu 100 %, 1. Bezugsrechtsnotiz DM 1,90 am 17.9., Div.Sch.Nr. 46\n\n\n\n\n\n\n\n Derzeitiges GrundkapitalDM 122 Mio\n\n\n\nInhaber-Stammaktien, WP-Kenn-Nr. 553700 voll an der Börse zugelassen und eingeführt Stückelung: 77 000 Stücke zu je DM 50; 102 000 zu DM 100; 25 000 zu DM 300; 100 450 zu DM 1 000 Stimmrecht: Das Stimmrecht jeder Aktie entspricht ihrem Nennbetrag. \n\n\n\n\n\n Genehmigtes KapitalDM 40 Mio\n\n\n\nGem. HV vom 18.7.1994, befristet bis 30.6.1999 \n\n\n\n Börsenbewertung \n\nWertpapier-Kenn-Nr.: 553700, Inhaber-Stammaktien \n\nNotiert: amtlich in Berlin, Düsseldorf, Frankfurt am Main und München sowie im Freiverkehr in Hamburg \n\nNotiz seit 1948 Seit 16.3.1973 Stücknotiz zu DM 50,-; seit 9.6.1969 Stücknotiz zu DM 100,-; vorher Prozentnotiz \n\n\n\nBereinigte Kurse (Frankfurt in EUR) \n\n\n\n \n\n 1995\n\n 1996\n\n 1997\n\n 1998\n\n 1999\n\n \n\n \n\n \n\n \n\n \n\n \n\n bis 31.3.\n\n \n\n Höchst\n\n 72,60\n\n 67,13\n\n 80,78\n\n 82,83\n\n 99,00\n\n \n\n Tiefst\n\n 53,69\n\n 50,11\n\n 58,29\n\n 66,47\n\n 70,00\n\n \n\n Ultimo\n\n 60,49\n\n 62,38\n\n 74,65\n\n 71,48\n\n 92,50\n\n \n\n\n\n\n\n Dividenden (in DM pro Aktie) \n\n\n\n \n\n 1993\n\n 1994\n\n 1995\n\n 1996\n\n 1997\n\n \n\n Dividende\n\n 0\n\n 0\n\n 0\n\n 2\n\n 3 1)\n\n \n\n Steuerguthaben\n\n 0\n\n 0\n\n 0\n\n 0,86\n\n 0\n\n \n\n Div.-Schein-Nr.\n\n -\n\n -\n\n -\n\n 55\n\n 56\n\n \n\n\n\n_____________________________ \n\n1) Freiwillige Zahlung des Mehrheitsaktionärs an außenstehende Aktionäre \n\nNr. des nächstfälligen Div.-Scheines: 57 \n\n\n\n \n\n Kennzahlen \n\n\n\n Konzern\n\n 1993\n\n 1994\n\n 1995\n\n 1996\n\n 1997\n\n \n\n Investitionen (in TDM)\n\n 51 853,0\n\n 54 265,0\n\n 40 712,0\n\n 34 660,0\n\n 52 530,0\n\n \n\n Jahresüberschuß + Abschreibungen (in TDM)\n\n 9 057,0\n\n 45 885,0\n\n 59 420,0\n\n 58 013,0\n\n 52 584,0\n\n \n\n Bilanzkurs (%)\n\n 127,0\n\n 118,5\n\n 121,1\n\n 136,0\n\n 142,3\n\n \n\n Eigenkapitalquote (%)\n\n 16,8\n\n 16,0\n\n 17,1\n\n 19,9\n\n 20,6\n\n \n\n\n\nBeschäftigte \n\n\n\n Durchschnitt\n\n 6 953\n\n 6 516\n\n 5 753\n\n 5 293\n\n 4 681\n\n \n\n GJ-Ende\n\n 6 764\n\n 6 511\n\n 5 597\n\n 5 144\n\n 4 685\n\n \n\n\n\n\n\n Aus den Bilanzen (in Mio DM) \n\n\n\n \n\n AG\n\n Konzern\n\n\n\n U = Posten unter 0,5 Mio DM 1996 1997 1996 1997\n\n\n\n Aktiva \n\n Anlagevermögen 312 306 266 272 \n\n ..(Sachanlagenzugang) 13 12 35 53 \n\n ..(Beteiligungen) 250 247 28 21 \n\n Umlaufvermögen 323 433 668 730 \n\n ..(Flüssige Mittel) 27 12 49 28 \n\n Rechnungsabgrenzung U - 8 8 \n\n Steuerabgrenzung - - 4 4 \n\n\n\n\n\n Passiva \n\n Eigenkapital 291 247 198 220 \n\n ..(Gezeichnetes Kapital) 122 122 122 122 \n\n ..(Bilanzergebnis) 5 - 5 11 \n\n Sopo m. Rücklageant. - 18 1 - \n\n Fremdkapital 344 474 744 794 \n\n ..(Pensionsrückstell.) 148 148 186 191 \n\n ..(And. Rückstellungen) 79 71 176 151 \n\n ..(langfr. Verbindlichk.) 51 51 53 52 \n\n ..(kurz- +mfr. Verbindlk.) 65 205 327 400 \n\n Rechnungsabgrenzung - - 4 1 \n\n Bilanzsumme 635 739 947 1 014 \n\n\n\n Aus den Gewinn- und Verlustrechnungen (in Mio DM) \n\n\n\n \n\n AG\n\n Konzern\n\n\n\n U = Posten unter 0,5 Mio DM 1996 1997 1996 1997\n\n\n\n Umsatz 497 550 1 289 1 480 \n\n Bestandsveränderung - 9 1 2 15 \n\n Akt. Eigenleistung U 1 1 2 \n\n sonst. betr. Erträge 33 54 43 72 \n\n Materialaufwand 238 310 597 768 \n\n Personalaufwand 145 136 395 432 \n\n Abschreibungen 17 15 40 41 \n\n sonst. betr. Aufwand 110 168 253 281 \n\n Finanzergebnis - 7 - 15 - 20 - 22 \n\n Ergebnis d. gewöhnl. Geschäftstätigkeit 5 - 39 31 26 \n\n Steuern U U 13 14 \n\n ..(EE-Steuern) - 2 - 1 5 6 \n\n Jahresergebnis 5 - 39 18 11 \n\n\n\n Sonstige Angaben \n\nWirtschaftsprüfer : C & L Deutsche Revision Aktiengesellschaft Wirtschaftsprüfungsgesellschaft, Frankfurt/M. \n\nHauptversammlung: 15.5.1998 \n\nGeschäftsjahr: Kalenderjahr \n\n\n\n"
split_by_line = [line for line in data.replace("\n\n","\n").replace("\nStimmrecht","Stimmrecht").split("\n") if line != "" and ("Stückelung:" in line or ("Stückelung" in line and "Stimmrecht" in line))]
for line in split_by_line:
print(line)
reg_groups =re.compile(r"(Stückelung|Stimmrecht)")
groups = []
for line in split_by_line:
group = {}
datatype = "Aktien"
line = line.replace("papier-Nr.","Kenn-Nr.").replace("WP-Nr.","Kenn-Nr.").replace(" bzw. ","").replace("(voll eingezahlt)","").replace("- ","-")
sidx = 0
max_parts = 1
for finding in reg_groups.finditer(line):
#if datatype == "Aktien":
# if re.compile(r"(Kenn-Nr\.|ISIN)"):
#print(finding[0])
group.update({datatype:line[sidx:finding.regs[0][0]].strip()})
if datatype != "Aktien" and "stimmrechtslos" not in line[sidx:finding.regs[0][0]] and "Besondere" not in line[sidx:finding.regs[0][0]] and max_parts < len(line[sidx:finding.regs[0][0]].split(";")):
max_parts = len(line[sidx:finding.regs[0][0]].split(";"))
#offset = -1
#if ":" not in finding[0]:
# offset = len(finding[0])
datatype= finding[0][:]
sidx = finding.regs[0][1]+1
else:
group.update({datatype: line[sidx:].strip()})
if datatype != "Aktien" and "stimmrechtslos" not in line[sidx:] and "Besondere" not in line[sidx:] and max_parts < len(line[sidx:].split(";")):
max_parts = len(line[sidx:].split(";"))
groups.append((max_parts,group))
shareinfoarr = []
regs = {"Aktien":re.compile(r"(?P<currency>\D*)(?P<amount>[\d\s,-]*(Mio\s|Mrd\s)?)(?P<type>(vinkulierte\s)?[^0-9,\s]*)(?P<rest>.*)"),
"Stückelung":re.compile(r"(?P<number>[\d\s]*)(?P<addinfo>[\D]*)(?P<nomval>[\d\s,]*)"),
"Stimmrecht":re.compile(r"((Je)(?P<stuck>([\d\s]*))\D*(?P<nw>[\d\s,-]*)[^=]*=(?P<voice>[\d\s]*))")}
dellist = []
for (max_parts, group)in groups:
shareinfo = []
for idx in range(0,max_parts):
shareinfo.append({'wkn': "", 'isin': "", 'nw': "",'nomval':"", 'type': "", 'number': "", "stuck": "1", 'voice': "stimmrechtlos", 'amount': "",
'currency': "", 'info': ""})
for key,content in group.items():
if key == "Aktien" and " " not in content[:5]:
regs["Aktien"] = re.compile(r"(?P<type>(vinkulierte\s|kumulative\s)?[^0-9,\s]*)(?P<rest>.*)")
if key == "Aktien" and ("%" in content or "davon" in content[:6]):
shareinfo[0]["info"] += content
knr = re.compile(r"(Kenn-Nr\.)(?P<wkn>([\s\d]*))|(ISIN\s)(?P<isin>([\S]*)).*").search(content)
if knr:
if knr["wkn"]:
shareinfo[0]["wkn"] = knr["wkn"].strip()
if knr["isin"]:
shareinfo[0]["isin"] = knr["isin"].strip()
continue
if key != "Aktien" and "stimmrechtslos" not in content and "Besondere" not in content:
contentparts = content.split(";")
else:
contentparts = [content]
for idx,part in enumerate(contentparts):
finding = regs[key].search(part)
if finding:
for grpkey, grpval in finding.groupdict().items():
if grpkey == "rest":
knr = re.compile(r"(Kenn-Nr\.)(?P<wkn>([\s\d]*))|(ISIN\s)(?P<isin>([\S]*)).*").search(grpval)
if knr:
if knr["wkn"]:
shareinfo[idx]["wkn"] = knr["wkn"].strip()
if knr["isin"]:
shareinfo[idx]["isin"] = knr["isin"].strip()
elif grpval and grpkey not in ["addinfo"]:
if grpval.strip() != "":
if part == "Aktien" and grpkey == "currency" and ("ktien" in grpval or "tück" in grpval or len(grpval) > 5):
shareinfo[idx]["type"] = grpval
shareinfo[idx]["currency"] = ""
else:
if grpkey != "currency" or len(grpval) < 8:
shareinfo[idx][grpkey] = grpval.strip(" :,=")
if key == "Stückelung" and finding["addinfo"]:
stype = finding["addinfo"].replace("zu", "je").replace("(","").split("je")[0].replace("o.N.", "").replace("ohne Nennwert", "").strip(" ,")
if shareinfo[idx]["type"] == "" and ("ktien" in stype or "tück" in stype):
shareinfo[idx]["type"] = stype.strip(" :,=")
if shareinfo[idx]["currency"] == "":
currency = finding["addinfo"].strip().split(" ")[-1].replace("je","").replace(":","").replace("o.N.", "").replace("ohne Nennwert", "")
if "ktien" not in currency and "tück" not in currency and "wer" not in currency and len(currency) < 8:
shareinfo[idx]["currency"] = currency.strip(" :,=")
else:
shareinfo[idx]["info"] += content
if key == "Stückelung" and "Stimmrecht" not in group.keys() and shareinfo[idx]["number"]+shareinfo[idx]["nomval"] == "":
dellist.append(idx)
for delitem in sorted(dellist,reverse=True):
del shareinfo[delitem]
shareinfoarr+=shareinfo
return shareinfoarr
#for shareinfo in shareinfoarr:
# print(shareinfo)
def replace_geminfo(yearinfo, notizinfo, notizstr):
""""
It searches for gem ("gemäß") information in the year info (like "2000 i)")
and return it if it find something!
"""
comment = ""
if "i" in yearinfo:
comment = "gemäß IAS"
elif "g" in yearinfo:
comment = "gemäß US-GAAP"
elif string.ascii_letters in yearinfo:
if notizstr in notizinfo:
comment = " ".join(notizinfo[notizstr]).replace("_", "")
elif "**)" in yearinfo:
if notizstr == "":
comment = yearinfo.strip().split("*")[0]
elif notizstr in notizinfo:
comment = " ".join(notizinfo[notizstr]).replace("_", "").split("**)")[-1]
elif "*" in yearinfo:
if notizstr == "":
comment = yearinfo.strip().split("*")[0]
elif notizstr in notizinfo:
comment = " "+" ".join(notizinfo[notizstr]).replace("_", "")
comment = comment.split("*")[1][1:]
elif ")" in yearinfo:
if yearinfo.strip().split(" ")[-1][0] in string.digits:
if notizstr == "":
comment = yearinfo.strip().split(" ")[0]
elif notizstr in notizinfo:
for info in notizinfo[notizstr]:
if yearinfo.strip().split(" ")[-1] in info:
comment = info.replace("_", "").replace(yearinfo.strip().split(" ")[-1], "").strip()
return comment
def del_entry(table, locations, entries):
"""
Delets entries in the compare table!
The files contains all data from the jsonfile,
which didn't get transferred in the db.
It get stored in a temp dir.
"""
if table['debug'] == False: return 0
if table:
for location in locations:
if location in table:
table = table[location]
for entry in entries:
if entry in table:
del table[entry]
return 0
def get_infostring(table):
"""
Joins the values of an dictonary to one string,
joined by comma.
"""
infostring = ""
for entries in table:
for entry in entries.values():
infostring += entry+", "
return infostring
def get_share(entry):
"""
Extracts the share, share_pc (percentage)
and the currency of the entrystring.
"""
share, share_pc, currency = "", "", ""
if entry != "":
if ":" in entry:
clean_entry = entry.split(":")[1].strip()
if "(" in clean_entry:
share_list = clean_entry.replace(")", "").split("(")
share_pc = share_list[1]
currency = share_list[0].split(" ")[0]
share = " ".join(share_list[0].split(" ")[1:]).strip()
else:
if "(" in entry and "%" in entry:
share_list = entry.replace(")", "").split("(")
share_pc = share_list[1]
for feat in share_list:
if "%" in feat:
share_pc = feat
elif "%" in entry:
share_pc = entry
# Obsolete code
# share =.replace("Kapital:", "").strip()
# currency = share.split(" ")[0]
return share, share_pc, currency
def get_currencyinfo(table):
"""
Generates a list of cuinfo (Currency & Unit INFO) dictonaries.
And converts units from words to numbers (e.g. "Mio"->"1000000")
"""
cuinfo = []
for item in table:
if "Beschäft" in item:
continue
currency = ""
unit = "1"
if "TEUR" in item:
currency = "EUR"
unit = "1000"
elif "TDM" in item:
currency = "DM"
unit = "1000"
elif "%" in item:
unit = "%"
elif len(item.split("(")) > 1:
currency = item.split("(")[-1].split(" ")[-1].replace(")", "").replace(",", "").strip()
if len(item.split("(")[-1].split(" ")) > 1:
unit = item.split("(")[-1].split(" ")[-2]
if "Mio" in item:
unit = "1000000"
if "Mrd" in item:
unit = "1000000000"
else:
currency = item
cuinfo.append({'currency': currency, 'unit': unit,'text': item.split("(")[0]})
return cuinfo
def get_currency_unit(table):
"""
Extracts the currency and the unit of the entrystring.
It replaces:
"Mio" with unit: 1_000_000
"Mrd" with currency: 1_000_000_000
TEUR with unit: 1_000 and currency: EUR
TDM with unit: 1_000 and currency: DM
"""
currency = ""
unit = "1"
if 'waehrung' in table.keys():
if "TEUR" in table['waehrung']:
currency = "EUR"
unit = "1000"
elif "TDM" in table['waehrung']:
currency = "DM"
unit = "1000"
elif len(table['waehrung'].split(" ")) > 2:
currency = table['waehrung'].split(" ")[-1]
unit = table['waehrung'].split(" ")[-2]
if unit == "Mio":
unit = "1000000"
if unit == "Mrd":
unit = "1000000000"
elif len(table['waehrung'].split(" ")) > 1:
currency = table['waehrung'].split(" ")[-1]
elif len(table['waehrung']) > 0:
currency = table['waehrung']
return currency, unit
def entry_check(entry, paramlist):
"""
Checks if every key is in the entry otherwise an empty key will be added
"""
for i in paramlist:
if i not in entry:
entry[i] = ""
if entry[i] != "":
if not isinstance(entry[i], int):
if len(entry[i]) == 0:
del entry[i]
entry[i] = ""
return 0
def empty_check(entry, paramlist):
"""
Checks if every key is empty or
if its an empty string.
"""
# TODO: Not yet implemented. Is it necessary?!
count = 0
for i in paramlist:
if i not in entry:
count += 1
if entry[i] == "" or entry[i] == " ":
count += 1
if count == len(paramlist): return True
return False
def create_dir(newdir):
"""
Creates a new directory
"""
if not os.path.isdir(newdir):
try:
os.mkdir(newdir)
print(newdir)
except IOError:
print("cannot create %s directoy" % newdir)
def get_lastid(table, conn):
"""
Get the last id available to generate a new one.
New_ID = Old_ID + 1
"""
s = select([table.c.id])
result = conn.execute(s)
allids = result.fetchall()
idlist = []
for idx in allids:
if isinstance(idx.values()[0], int):
idlist.append(idx.values()[0])
lastid = max(idlist)
return lastid + 1
def get_files(filedir):
"""
Get all file names!
"""
inputfiles = sorted(glob.glob(os.path.normcase(filedir+"/")+"*.json"))
return inputfiles
def get_uid(new_data, metadata, conn):
"""
Get uid (unique ID) for the given WKN.
"""
for awe in new_data["all_wkn_entry"]:
for key in ["wkn","isin"]:
if awe[key] == "":continue
if key == "wkn":
s = select([metadata.tables["WKN"]]).where(metadata.tables["WKN"].c.WKN == awe[key]).order_by(
metadata.tables["WKN"].c.unternehmenId.desc())
else:
s = select([metadata.tables["WKN"]]).where(metadata.tables["WKN"].c.ISIN == awe[key]).order_by(
metadata.tables["WKN"].c.unternehmenId.desc())
result = conn.execute(s)
try:
someid = result.fetchone()[0]
except:
new_data['reference'] = new_data['unternehmenId']
new_data['id'] = ""
return 0
s = select([metadata.tables['MainRelation']]).where(
metadata.tables['MainRelation'].c.referenz == someid)
result = conn.execute(s)
fone = result.fetchall()
if len(fone) > 0:
for row in fone:
new_data['reference'] = row[0]
new_data['id'] = row[4]
return 0
s = select([metadata.tables['MainRelation']]).where(
metadata.tables['MainRelation'].c.weiteresAuftreten == someid)
result = conn.execute(s)
fone = result.fetchall()
if len(fone) > 0:
for row in fone:
new_data['reference'] = row[0]
new_data['id'] = row[4]
return 0
new_data['reference'] = new_data['unternehmenId']
new_data['id'] = ""
return 0
#TODO-HINT: Be aware its order is descendent by year to avoid apply new WKN's to old ones which get used in early years!
def akf_db_updater(file,dbPath):
"""
Main function of the AKF_SQL_DBTalk!
"""
file = file.replace("\\", "/")
#Condition
#if "0704" not in file: return
print("Start SQLTalk")
print(file)
with open(file, 'r', encoding="utf-8") as f:
new_data = json.load(f, cls=NoneRemover)
# Generate a compare object
new_data['debug'] = False
if new_data['debug']:
new_data['compare'] = deepcopy(new_data)
del_entry(new_data['compare'], [], ['_fulltext', 'debug'])
else:
new_data['compare'] = {"debug": False}
# Generate unternehmenId
new_data.update({'unternehmenId': file.split("/")[-2].replace("-",".") + "." + file.split("/")[-1][:4]})
# Generate Year
new_data.update({'year': file.split("/")[-2]})
db_akf = dbPath
engine = create_engine(db_akf)
conn = engine.connect()
# Create a MetaData instance
metadata = MetaData(engine, reflect=True)
# Check if entry already exists
#s = select([metadata.tables['Unternehmen']]).where(
# metadata.tables['Unternehmen'].c.unternehmenId == new_data['unternehmenId'])
#result = conn.execute(s)
#if len(result.fetchall()) > 0: print("Entry already exists!");conn.close(); return 0;
new_data["shareinfo"] = stck_stimmrecht(new_data["_fulltext"])
#for shareinfo in new_data["shareinfo"]:
# print(shareinfo)
for si in new_data["shareinfo"]:
if si["wkn"]+si["isin"] != "":
for awe in new_data["all_wkn_entry"]:
if len(awe.keys())<4:
for key in ["type","wkn","isin","nw"]:
if not awe.get(key,False):
awe[key] = ""
if si["wkn"] == awe["wkn"] and si["wkn"] != "":
break
if si["isin"] == awe["isin"] and si["isin"] != "":
break
else:
new_data["all_wkn_entry"].append(
{"type":si.get("type",""),
"wkn":si.get("wkn",""),
"isin":si.get("isin",""),
"nw":""}
)
#return
# Check if a universal ID already exists
get_uid(new_data, metadata, conn)
# Update all_wkn_entry
#update_all_wkn(new_data)
# Get shareinfo for later use
#get_shareinfo(new_data)
"""
with open("stimmrecht.txt","a") as stfile:
for entry in new_data["shareinfo"]:
stfile.write(entry["voice"]+"\n")
with open("stuckelung.txt","a") as stfile:
for entry in new_data["shareinfo"]:
stfile.write(entry["number"]+"\n")
return
"""
# Start writing in the table
print("TABLES")
options = {
'Aktienkurse': Aktienkursetable,
'Aktionaer': Aktionaertable,
'Anleihen': Anleihentable,
'Aufsichtsrat': Aufsichtsrattable,
'Beschaeftigte': Beschaeftigtetable,
'Beteiligungen': Beteiligungentable,
'BilanzAktiva': BilanzAktivatable,
'BilanzPassiva': BilanzPassivatable,
'BilanzSumme': BilanzSummetable,
'Boersennotiz': Boersennotiztable,
'Dependence': Dependencetable,
'Dividenden': Dividendentable,
'Geschaeftsjahr': Geschaeftsjahrtable,
'Geschaeftsleitung': Geschaeftsleitungtable,
'Grundkapital': Grundkapitaltable,
'GuV': GuVtable,
'Kapitalart': Kapitalarttable,
'Kapitalentwicklung': Kapitalentwicklungtable,
'Kennzahlen': Kennzahlentable,
'Main': Maintable,
'MainRelation': MainRelationtable,
'Organbezuege': Organbezuegetable,
'Stimmrecht': Stimmrechttable,
'Stueckelung': Stueckelungtable,
'Unternehmen': Unternehmentable,
'Volume': Volumetable,
'Vorstand': Vorstandtable,
'WKN': WKNtable,
'WeitereBemerkungen': WeitereBemerkungentable,
}
for name in metadata.tables:
if name in ['Dependence','Volume']: continue;
print(name)
options[name](conn, new_data, metadata.tables[name])
conn.close()
engine.dispose()
if new_data['debug']:
TEMP = tempfile.gettempdir()
create_dir(TEMP + "/SQLDBTalk/")
with open(TEMP + "/SQLDBTalk/" + os.path.basename(file), 'w', encoding="utf-8") as f:
json.dump(new_data['compare'], f, indent=4)
print("Wrote File: \n" + os.path.normcase(TEMP + "/SQLDBTalk/" + os.path.basename(file)))
print("FINISHED!")
return 0
def main(config):
# The filespath are stored in the config.ini file.
# For later use to iterate over all dir
if config['CDS']['SingleOn'] == "True":
folders = [os.path.normpath(config['DEFAULT']['SinglePath'])]
else:
my_path = config['CDS']['AllPath']
my_path = os.path.normpath(my_path)
# folders = glob.glob(my_path) # old way of obtaining all folders
# define the path (with pathlib so absolute paths also work in unix)
folders = sorted(glob.glob(os.path.normpath(my_path)))
dbPath = config['DEFAULT']['DBPath']
t0all = time.time()
for folder in folders:
""""" Read files """""
files = get_files(folder)
""""" Start Main """""
for file in files:
#if "0062" not in file:continue
akf_db_updater(file, dbPath)
print("The whole folder was finished in {}s".format(round(time.time() - t0all, 3))) |
24,496 | 0a1daee66c336e999cd831e0d3f50fd643680b7e | from django.db import models
from django.contrib import admin
from filebrowser.fields import FileBrowseField
from util import custom_fields
class Publication(models.Model):
image = FileBrowseField("Image", max_length=200, blank=True, null=True)
image_initialdir = FileBrowseField("Image (Initial Directory)", max_length=200, directory="images/", blank=True, null=True)
image_extensions = FileBrowseField("Image (Extensions)", max_length=200, extensions=['.jpg'], help_text="Only jpg-Images allowed.", blank=True, null=True)
image_format = FileBrowseField("Image (Format)", max_length=200, format='Image', blank=True, null=True)
pdf = FileBrowseField("PDF", max_length=200, directory="documents/", extensions=['.pdf'], format='Document', blank=True, null=True)
my_field = custom_fields.HTMLField()
admin.site.register(Publication) |
24,497 | a367adaaf3fbc36d87ba2691a23cc79030cee342 | from __future__ import absolute_import, print_function, unicode_literals
from flask import Blueprint, render_template, request, current_app
from canvass_cluster.cluster import ClusterCreator
import requests
import numpy as np
import json
import os
views = Blueprint('views', __name__)
@views.route('/')
def index():
return render_template('index.html')
@views.route('/locations', methods=['POST'])
def handle_locations():
locs = request.json['locations']
num_clusters = request.json.get('clusters', None)
if not num_clusters:
num_clusters = 20
cluster_handler = ClusterCreator(
locs, num_clusters, current_app.config['MAPZEN_KEY']
)
return json.dumps({'locations': cluster_handler()})
|
24,498 | 36afa5935df0c14f09bdfc78ec93faec18e4ac56 | class Solution(object):
def firstUniqChar(self, s):
"""
:type s: str
:rtype: int
"""
if not s:
return -1
dict_freq = {}
for i in range(len(s)):
if not s[i] in dict_freq:
dict_freq[s[i]] = [1, i]
else:
dict_freq[s[i]][0] += 1
result = -1
for list_this in dict_freq.values():
if list_this[0] == 1 and (result == -1 or list_this[1] < result):
result = list_this[1]
return result
my_solution = Solution()
print(my_solution.firstUniqChar('leetcode'))
|
24,499 | 3ac797c325430a41b9b695cc98604f0a7a15779a | import argparse
from dataclasses import dataclass
import re
import sys
import textwrap
SIMPLE_PREFIX_RE = r"""
(\s*) # Some blanks, then either:
(
\# # Pound comments
|
// # C-style comments
|
/// # Rust doc comments
|
//! # Doxygen
|
\* # Bullet point (star)
|
- # Bullet point (dash)
)?
[ \t] # Exactly one space or tab
"""
BLOCKQUOTE_RE = r"\s*(>\s*)+\s+"
FULL_RE = re.compile(f"{BLOCKQUOTE_RE}|{SIMPLE_PREFIX_RE}", re.VERBOSE)
def get_prefix(text):
match = FULL_RE.match(text)
if match is None:
return ""
else:
return match.group()
def is_blank(line):
return all(x == " " for x in line[:-1])
@dataclass
class Region:
text: str
prefix: str
def split_regions(text):
res = []
current_prefix = None
current_text = ""
current_region = None
for line in text.splitlines(keepends=False):
prefix = get_prefix(line)
if prefix != current_prefix:
current_region = Region(text=line + "\n", prefix=prefix)
res.append(current_region)
current_prefix = prefix
else:
current_region.text += line + "\n"
return res
def reformat_region(region, *, width):
text = region.text
prefix = region.prefix
if is_blank(text):
return "\n"
lines = text.splitlines()
prefix_length = len(prefix)
to_wrap = "\n".join(x[prefix_length:] for x in text.splitlines())
wrapped = textwrap.wrap(
to_wrap, width=width - prefix_length, break_long_words=False
)
res = ""
for line in wrapped:
res += prefix + line + "\n"
return res
def reformat(text, *, width=80):
if text in ("", "\n"):
return "\n"
regions = split_regions(text)
res = ""
for region in regions:
res += reformat_region(region, width=width)
return res
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-w", "--width", default=80, type=int)
args = parser.parse_args()
text = sys.stdin.read()
wrapped = reformat(text, width=args.width)
sys.stdout.write(wrapped)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.