index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
18,000 | 9400c7c2092e04ab265708b03f5973d6ef42c027 | T=int(input())
for _ in range(0,T):
s=input()
val=len(s)//2
if len(s)%2==1:
str1=s[:val]
str2=s[(val+1):]
else:
str1=s[:val]
str2=s[(val):]
d={}
d1={}
for i in str1:
d[i]=str1.count(i)
for i in str2:
d1[i]=str2.count(i)
l1=list(d.values())
l2=list(d1.values())
l3=list(d.keys())
l4=list(d1.keys())
flag=0
for i in str1:
if i in str2:
if d[i]!=d1[i]:
flag=1
print("NO")
break
else:
print("NO")
flag=1
break
if flag==0:
print("YES")
|
18,001 | 96f214ad679b5f13ffff1f23610eaacf15a52df2 | from __future__ import division
from PIL import Image
from server.components.layers.screen_layer import ScreenLayer
from util.timedelta import TimeDelta
DEFAULT_DURATION = 25.0 # milliseconds
class GifScreenLayer(ScreenLayer):
def __init__(self, filename):
self.filename = filename
self.is_playing = False
def _get_duration(self):
""" Return the requested duration (in milliseconds) encoded
in the gif file."""
try:
dur = self.im.info["duration"] / 1000.0
except KeyError:
dur = DEFAULT_DURATION / 1000.0
return dur
def _load_frame(self, i):
""" Load the indicated frame.
If found return True, otherwise False """
eof = False
try:
self.im.seek(i)
except EOFError:
eof = True
return eof
def enter(self):
""" Reset the animation to the beginning """
self.im = Image.open(self.filename)
self.frame = 0
self.timedelta = TimeDelta().reset()
self._load_frame(self.frame)
self.dur = self._get_duration()
self.is_playing = True
def exit(self):
self.is_playing = False
def suspend(self):
self.is_playing = False
def resume(self):
self.is_playing = True
def step(self):
if not self.is_playing:
return
if self.timedelta.test(self.dur):
self.frame += 1
eof = self._load_frame(self.frame)
if eof:
self.frame = 0
self._load_frame(self.frame)
self.dur = self._get_duration()
self.timedelta.reset()
def render(self):
im_copy = self.im.copy()
im_copy = im_copy.convert("RGBA")
return im_copy
|
18,002 | c257b5089eeb6ea000fa5653368078324eed0b76 | from fastapi import APIRouter
from src.dependency.manager import Manager
from src.model.group import GroupData
router = APIRouter(prefix="/api/v1/swaddle")
manager = Manager.GROUPED_DATA_MANAGER
@router.get(
"/group", tags=["Group"],
summary="Status of Group",
response_model=GroupData
)
async def status(group: str):
return manager.get_by_id(group)
|
18,003 | ae9b6353abb4e4232587dffebc64a174c6f1a3e7 | #coding=utf-8
import paramiko,os,time,configparser
from prometheus_client import CollectorRegistry,Gauge,push_to_gateway
from multiprocessing import process,Queue,Event
import configparser,sys,random,string
sys.path.append('../')
from until_funtion.ssh_tool import ssh_tool
from until_funtion.globallogger import globallogger
class collection_data():
config=configparser.ConfigParser()
cur_path=os.path.dirname(os.path.realpath(__file__))
config_path=os.path.join(cur_path,"monitor_config.ini")
config.read(config_path)
def __init__(self):
self.logger=self.get_logger()
self.nodes=self.config.items("nodes")
self.node_list=[node[1] for node in self.nodes]
self.small_node=self.config.items("smallnodes")
self.small_node_list = [node[0] for node in self.small_node]
self.small_node_dic=dict(self.small_node_list)
self.small_user=self.config.get("default","small_user")
self.ssh_tool=ssh_tool(logger=self.logger)
def get_logger(self):
log_file=os.path.join(self.cur_path,"collection_data.log")
logger_handle=globallogger("collection",log_file=log_file)
return logger_handle.log
def get_cpu_info(self,node):
data_type="systemcpu"
data_dic={"keyword":data_type+"_used",
"job":node+"_"+data_type,
"lables":["sys_name","sys_type"],
"value_list":[]
}
cpu_command = "top -bn 1|sed -n '3p'| awk -F ':' '{print $2}'|awk '{print $1}'"
if "_" in node:
password=self.small_node_dic.get(node)
node_list=node.split("_")
node=node_list[0]
node_ip=node_list[1]
cpu_command ="""sshpass -p "%s" ssh %s@%s "export PATH=/usr/sbin:$path;%s" """ % (password,self.small_user,node_ip,cpu_command)
cpu_use=self.exec_command(cpu_command,node)
data_dic["value_list"].append([data_type,data_type,cpu_use[0].replace("%","")])
return data_dic
def get_mem_info(self,node):
data_type = "systemmem"
data_dic={"keyword":data_type+"_used",
"job":node+"_"+data_type,
"lables":["sys_name","sys_type"],
"value_list":[]
}
mem_command = """free -m|sed -n '2p' |awk '{printf ("%.1f\\n",$3/$2*100)}'"""
if "_" in node:
password=self.small_node_dic.get(node)
node_list=node.split("_")
node=node_list[0]
node_ip=node_list[1]
mem_command ="""sshpass -p "%s" ssh %s@%s "export PATH=/usr/sbin:$path;%s" """ % (password,self.small_user,node_ip,mem_command)
mem_use = self.exec_command(mem_command, node)
data_dic["value_list"].append([data_type,data_type,mem_use[0].replace("%","")])
return data_dic
|
18,004 | fef4fc2fae6c26b4a13f0a9d06113d47f394521f | t = int(input())
for i in range(1,t+1):
for j in range(1):
number_houses, budget = [int(s) for s in input().split(" ")]
houses_prices = [int(s) for s in input().split(" ")]
houses_prices.sort()
sum_prices = 0
maximun_number_houses = 0
for k in houses_prices:
sum_prices = sum_prices + k
if sum_prices > budget:
break
else:
maximun_number_houses = maximun_number_houses + 1
print ( 'Case #{}: {}'.format(str(i),str(maximun_number_houses)) ) |
18,005 | 00380036d56784844f511df8a25dd5e33844314e | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="power_dict",
version="0.0.13",
author="Gorinenko Anton",
author_email="anton.gorinenko@gmail.com",
description="Library for easy work with the python dictionary",
long_description=long_description,
keywords='python, dict, utils',
long_description_content_type="text/markdown",
url="https://github.com/agorinenko/power-dict",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=[
'try-parse'
],
setup_requires=['pytest-runner'],
tests_require=['pytest'],
python_requires='>=3.7',
)
|
18,006 | 055d9f7ff7934a0b6d8f8b3ef4ee4467ba28c941 | import sys
import time
import re
import rollbar
from slackclient import SlackClient
from ecs_deploy import handle_ecs_bot_cmd
from secret_manager import get_secret
# logging
import logging
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
# constants
RTM_READ_DELAY = 1 # 1 second delay between reading from RTM
MENTION_REGEX = "^<@(|[WU].+?)>(.*)"
COMMAND_REGEX = "^(?P<cmd>\w+)\s(to)\s(?P<cluster>\w+)\s(?P<services>(([a-zA-Z0-9\/\-]+)(\:([a-zA-Z0-9\-]+))?)?(\,([a-zA-Z0-9\/\-]+)(\:([a-zA-Z0-9\-]+)){1})*)$"
SERVICES_REGEX = "(?P<service>[a-zA-Z0-9\/\-]+)(\:(?P<tag>[a-zA-Z0-9\-]+))?"
def parse_bot_commands(starterbot_id, slack_events):
"""
Parses a list of events coming from the Slack RTM API to find bot commands.
If a bot command is found, this function returns a tuple of command and channel.
If its not found, then this function returns None, None.
"""
for event in slack_events:
if event["type"] == "message" and not "subtype" in event:
user_id, message = parse_direct_mention(event["text"])
if user_id == starterbot_id:
return message, event["channel"]
return None, None
def parse_direct_mention(message_text):
"""
Finds a direct mention (a mention that is at the beginning) in message text
and returns the user ID which was mentioned. If there is no direct mention, returns None
"""
matches = re.search(MENTION_REGEX, message_text)
# the first group contains the username, the second group contains the remaining message
return (matches.group(1), matches.group(2).strip()) if matches else (None, None)
def parse_command(message_text):
m = re.search(COMMAND_REGEX, message_text)
# the first group contains the username, the second group contains the remaining message
if not m:
raise ValueError
services = []
for svc in re.finditer(SERVICES_REGEX, m.group("services")):
services.append({"service": svc.group("service"), "tag": svc.group("tag")})
return (m.group("cmd"), m.group("cluster"), services) if m else (None, None, None)
def handle_command(slack_client, authorized_channel_id, command, channel):
"""
Executes bot command if the command is known
"""
parameters = {
"channel": channel,
"text": "Not sure what you mean. Try *{}*" \
.format("@nysa deploy to <cluster> <service>:<tag>[,<service>:<tag>] or @nysa deploy to <cluster> all:<tag>")
}
if channel != authorized_channel_id:
parameters["text"] = "you cannot invoke @nysa outside of the authorized channel"
else:
try:
parameters.update(handle_ecs_bot_cmd(*parse_command(command)))
except ValueError:
pass
except Exception as ex:
logging.error(ex)
rollbar.report_exc_info(sys.exc_info())
parameters["text"] = "Oops, there was an error with the deploy, try it again!!"
# Sends the response back to the channel
slack_client.api_call("chat.postMessage", **parameters)
if __name__ == "__main__":
logging.info("Starting Slackbot")
rollbar.init(get_secret('ROLLBAR_KEY'))
authorized_channel = get_secret('SLACK_BOT_AUTHORIZED_CHANNEL')
slack_client = SlackClient(get_secret('SLACK_BOT_TOKEN'))
try:
logging.info("getting channels list")
rs = slack_client.api_call("channels.list")
channels = [channel for channel in rs.get(u'channels')]
logging.info("getting private groups list")
rs = slack_client.api_call("groups.list")
channels.extend([group for group in rs.get(u'groups')])
authorized_channel_id = next(channel for channel in channels
if channel.get(u'name').lower() == authorized_channel.lower()).get(u'id')
logging.info("connecting to rtm")
if slack_client.rtm_connect(with_team_state=False):
logging.info("Starter Bot connected and running!")
# Read bot's user ID by calling Web API method `auth.test`
starterbot_id = slack_client.api_call("auth.test")["user_id"]
while True:
command, channel = parse_bot_commands(starterbot_id, slack_client.rtm_read())
if command:
handle_command(slack_client, authorized_channel_id, command, channel)
time.sleep(RTM_READ_DELAY)
else:
logging.error("Connection failed. Exception traceback printed above.")
except Exception as e:
logging.error(e)
rollbar.report_exc_info()
|
18,007 | 47cc944889f84a4d643c114b15f2d4b3d9abb692 | from .auth import login_user
from .auth import register_user
from .jobs import AllJobs
from .companies import AllCompanies
from .contacts import AllContacts
from .statuses import AllStatuses |
18,008 | 413f994adfebd8d9fb0dc59b786462dbcf617282 | import os
import atexit
import logging
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.interval import IntervalTrigger
from flask_mail import Message
from app import mail
from app.helpers.query_helpers import get_tomorrow_flights
def get_booked_flights():
logging.info('Fetching tomorrows flights from the database')
flights = []
flights_to = get_tomorrow_flights()
for flight in flights:
total_booked = flight.booked_business + flight.booked_economy
total_seats = flight.airplane.total_seats
if (total_seats - total_booked) != total_seats:
flights.append(flight)
return flights
def create_message_list(flights):
messages = []
for flight in flights:
message = Message(
subject='Flight Reservation Reminder',
recipients=[flight.bookings.owner.email],
html=(f'Hello {flight.bookings.owner.email.name},'
f'<p> This is to remind you of your scheduled flight <b>{flight.airplane.reg_number}</b>'
f'from <b>{flight.airport.name}</b> on <b>{flight.departure_date}</b> </p>'
f'<p> Please check in for your flight three hours before departure time</p>'
f'<p> Thank you </p>')
)
messages.append(message)
return messages
def send_reminder_email():
flights = get_booked_flights()
if not flights:
logging.info("There are no bookings for tommorrow flights yet")
else:
logging.info("creating the mailing lists ...")
messages = create_message_list(flights)
logging.info("connecting to the mail server ...")
with mail.connect() as conn:
for message in messages:
try:
conn.send(message)
"sending success: " + message.recipients
except Exception as e:
logging.exception("sending failed: " + message.recipients)
def background_scheduler():
scheduler = BackgroundScheduler()
scheduler.start()
scheduler.add_job(
func=send_reminder_email,
trigger=IntervalTrigger(start_date='2019-07-02 03:00:00', days=1),
id='reminder_email_job',
name='sending emails in the background',
replace_existing=True)
logging.basicConfig(level=logging.DEBUG)
logging.getLogger('apscheduler').setLevel(logging.DEBUG)
atexit.register(lambda: scheduler.shutdown())
|
18,009 | 69954e135d125f8504a44aec6b0b94352dcf0d2a | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
from scipy.io import savemat
from scipy.io import loadmat
import timeit
# import density integration functions
from DensityIntegrationUncertaintyQuantification import Density_integration_Poisson_uncertainty
from DensityIntegrationUncertaintyQuantification import Density_integration_WLS_uncertainty
import loadmat_functions
import helper_functions
def main():
# file containing displacements and uncertainties
filename = 'sample-displacements.mat'
# displacement estimation method ('c' for correlation and 't' for tracking)
displacement_estimation_method = 'c'
# displacement uncertainty method ('MC' for correlation and 'crlb' for tracking)
displacement_uncertainty_method = 'MC'
# set integration method ('p' for poisson or 'w' for wls)
density_integration_method = 'w'
# dataset type (syntehtic or experiment)
dataset_type = 'synthetic'
# -------------------------------------------------
# experimental parameters for density integration
# -------------------------------------------------
experimental_parameters = dict()
# ambient/reference density (kg/m^3)
experimental_parameters['rho_0'] = 1.225
# uncertainty in the reference density (kg/m^3) (MUST BE GREATER THAN 0)
experimental_parameters['sigma_rho_0'] = 1e-10
# gladstone dale constant (m^3/kg)
experimental_parameters['gladstone_dale'] = 0.225e-3
# ambient refractive index
experimental_parameters['n_0'] = 1.0 + experimental_parameters['gladstone_dale'] * experimental_parameters['rho_0']
# thickness of the density gradient field (m)
experimental_parameters['delta_z'] = 0.01
# distance between lens and dot target (object / working distance) (m)
experimental_parameters['object_distance'] = 1.0
# distance between the mid-point of the density gradient field and the dot pattern (m)
experimental_parameters['Z_D'] = 0.25
# distance between the mid-point of the density gradient field and the camera lens (m)
experimental_parameters['Z_A'] = experimental_parameters['object_distance'] - experimental_parameters['Z_D']
# distance between the dot pattern and the camera lens (m)
experimental_parameters['Z_B'] = experimental_parameters['object_distance']
# origin (pixels)
experimental_parameters['x0'] = 256
experimental_parameters['y0'] = 256
# size of a pixel on the camera sensor (m)
experimental_parameters['pixel_pitch'] = 10e-6
# focal length of camera lens (m)
experimental_parameters['focal_length'] = 105e-3
# non-dimensional magnification of the dot pattern (can also set it directly)
experimental_parameters['magnification'] = experimental_parameters['focal_length'] / (
experimental_parameters['object_distance'] - experimental_parameters['focal_length'])
# uncertainty in magnification
experimental_parameters['sigma_M'] = 0.1
# uncertainty in Z_D (m)
experimental_parameters['sigma_Z_D'] = 1e-3
# non-dimensional magnification of the mid-z-PLANE of the density gradient field
experimental_parameters['magnification_grad'] = experimental_parameters['magnification'] \
* experimental_parameters['Z_B'] / experimental_parameters['Z_A']
# --------------------------
# processing
# --------------------------
# load displacements and uncertainties from file
if displacement_estimation_method == 'c':
# correlation
X_pix, Y_pix, U, V, sigma_U, sigma_V, Eval = helper_functions.load_displacements_correlation(filename, displacement_uncertainty_method)
elif displacement_estimation_method == 't':
# tracking
X_pix, Y_pix, U, V, sigma_U, sigma_V = helper_functions.load_displacements_tracking(filename, experimental_parameters['dot_spacing'], displacement_uncertainty_method)
# account for sign convention
if dataset_type == 'synthetic':
U *= -1
V *= -1
# create mask array (1 for flow, 0 elsewhere) - only implemented for Correlation at the moment
if displacement_estimation_method == 'c':
mask = helper_functions.create_mask(X_pix.shape[0], X_pix.shape[1], Eval)
elif displacement_estimation_method == 't':
mask = np.ones_like(a=U)
# convert displacements to density gradients and co-ordinates to physical units
X, Y, rho_x, rho_y, sigma_rho_x, sigma_rho_y = helper_functions.convert_displacements_to_physical_units(X_pix, Y_pix, U, V, sigma_U, sigma_V, experimental_parameters, mask)
# define dirichlet boundary points (minimum one point) - here defined to be all boundaries
# This is specific to the current dataset
dirichlet_label, rho_dirichlet, sigma_rho_dirichlet = helper_functions.set_bc(X_pix.shape[0], X_pix.shape[1], experimental_parameters['rho_0'], experimental_parameters['sigma_rho_0'])
# calculate density and uncertainty
if density_integration_method == 'p':
# Poisson
rho, sigma_rho = Density_integration_Poisson_uncertainty(X, Y, mask, rho_x, rho_y,
dirichlet_label, rho_dirichlet,
uncertainty_quantification=True,
sigma_grad_x=sigma_rho_x, sigma_grad_y=sigma_rho_y,
sigma_dirichlet=sigma_rho_dirichlet)
elif density_integration_method == 'w':
# Weighted Least Squares
rho, sigma_rho = Density_integration_WLS_uncertainty(X, Y, mask,rho_x, rho_y,
dirichlet_label, rho_dirichlet,
uncertainty_quantification=True,
sigma_grad_x=sigma_rho_x, sigma_grad_y=sigma_rho_y,
sigma_dirichlet=sigma_rho_dirichlet)
# save the results to file
savemat(filename='sample-result.mat', mdict={'X': X, 'Y': Y, 'rho': rho, 'sigma_rho': sigma_rho,
'dirichlet_label': dirichlet_label, 'rho_dirichlet':rho_dirichlet, 'sigma_rho_dirichlet':sigma_rho_dirichlet
}, long_field_names=True)
# plot results
fig = helper_functions.plot_figures(X, Y, rho_x, rho_y, rho, sigma_rho)
# save plot to file
fig.savefig('sample-result.png')
plt.close()
if __name__ == '__main__':
main()
|
18,010 | aa17ee530a94bcdaeb801f1357ffd51abcf7e275 |
def create_list(a,b):
my_list = []
for i in range(4):
my_list.append(a)
my_list.append(b)
return my_list
print(create_list(1,2))
|
18,011 | 0796190640ae78ecb6a9aadd8e3871d347520ee1 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-08 18:04
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('dictionary', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Language',
fields=[
('language', models.CharField(max_length=5, primary_key=True, serialize=False)),
('alphabet', models.CharField(max_length=50)),
],
),
migrations.AlterField(
model_name='word',
name='language',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dictionary.Language'),
),
]
|
18,012 | 4ed68d8c81c53409c640d43f1f76df5ad3439bd7 | # use following to specify what is exported from module
|
18,013 | 5e2b183d6cd3ab5715dfce9425fa14a3e6f8fde4 | """Constants for the Amazon Polly text to speech service."""
from __future__ import annotations
from typing import Final
CONF_REGION: Final = "region_name"
CONF_ACCESS_KEY_ID: Final = "aws_access_key_id"
CONF_SECRET_ACCESS_KEY: Final = "aws_secret_access_key"
DEFAULT_REGION: Final = "us-east-1"
SUPPORTED_REGIONS: Final[list[str]] = [
"us-east-1",
"us-east-2",
"us-west-1",
"us-west-2",
"ca-central-1",
"eu-west-1",
"eu-central-1",
"eu-west-2",
"eu-west-3",
"ap-southeast-1",
"ap-southeast-2",
"ap-northeast-2",
"ap-northeast-1",
"ap-south-1",
"sa-east-1",
]
CONF_ENGINE: Final = "engine"
CONF_VOICE: Final = "voice"
CONF_OUTPUT_FORMAT: Final = "output_format"
CONF_SAMPLE_RATE: Final = "sample_rate"
CONF_TEXT_TYPE: Final = "text_type"
SUPPORTED_VOICES: Final[list[str]] = [
"Aditi", # Hindi
"Amy", # English (British)
"Aria", # English (New Zealand), Neural
"Arlet", # Catalan, Neural
"Arthur", # English, Neural
"Astrid", # Swedish
"Ayanda", # English (South African), Neural
"Bianca", # Italian
"Brian", # English (British)
"Camila", # Portuguese, Brazilian
"Carla", # Italian
"Carmen", # Romanian
"Celine", # French
"Chantal", # French Canadian
"Conchita", # Spanish (European)
"Cristiano", # Portuguese (European)
"Daniel", # German, Neural
"Dora", # Icelandic
"Elin", # Swedish, Neural
"Emma", # English
"Enrique", # Spanish (European)
"Ewa", # Polish
"Filiz", # Turkish
"Gabrielle", # French (Canadian)
"Geraint", # English Welsh
"Giorgio", # Italian
"Gwyneth", # Welsh
"Hala", # Arabic (Gulf), Neural
"Hannah", # German (Austrian), Neural
"Hans", # German
"Hiujin", # Chinese (Cantonese), Neural
"Ida", # Norwegian, Neural
"Ines", # Portuguese, European
"Ivy", # English
"Jacek", # Polish
"Jan", # Polish
"Joanna", # English
"Joey", # English
"Justin", # English
"Kajal", # English (Indian)/Hindi (Bilingual ), Neural
"Karl", # Icelandic
"Kendra", # English
"Kevin", # English, Neural
"Kimberly", # English
"Laura", # Dutch, Neural
"Lea", # French
"Liam", # Canadian French, Neural
"Liv", # Norwegian
"Lotte", # Dutch
"Lucia", # Spanish European
"Lupe", # Spanish US
"Mads", # Danish
"Maja", # Polish
"Marlene", # German
"Mathieu", # French
"Matthew", # English
"Maxim", # Russian
"Mia", # Spanish Mexican
"Miguel", # Spanish US
"Mizuki", # Japanese
"Naja", # Danish
"Nicole", # English Australian
"Ola", # Polish, Neural
"Olivia", # Female, Australian, Neural
"Penelope", # Spanish US
"Pedro", # Spanish US, Neural
"Raveena", # English, Indian
"Ricardo", # Portuguese (Brazilian)
"Ruben", # Dutch
"Russell", # English (Australian)
"Ruth", # English, Neural
"Salli", # English
"Seoyeon", # Korean
"Stephen", # English, Neural
"Suvi", # Finnish
"Takumi", # Japanese
"Tatyana", # Russian
"Vicki", # German
"Vitoria", # Portuguese, Brazilian
"Zeina", # Arabic
"Zhiyu", # Chinese
]
SUPPORTED_OUTPUT_FORMATS: Final[list[str]] = ["mp3", "ogg_vorbis", "pcm"]
SUPPORTED_ENGINES: Final[list[str]] = ["neural", "standard"]
SUPPORTED_SAMPLE_RATES: Final[list[str]] = ["8000", "16000", "22050", "24000"]
SUPPORTED_SAMPLE_RATES_MAP: Final[dict[str, list[str]]] = {
"mp3": ["8000", "16000", "22050", "24000"],
"ogg_vorbis": ["8000", "16000", "22050"],
"pcm": ["8000", "16000"],
}
SUPPORTED_TEXT_TYPES: Final[list[str]] = ["text", "ssml"]
CONTENT_TYPE_EXTENSIONS: Final[dict[str, str]] = {
"audio/mpeg": "mp3",
"audio/ogg": "ogg",
"audio/pcm": "pcm",
}
DEFAULT_ENGINE: Final = "standard"
DEFAULT_VOICE: Final = "Joanna"
DEFAULT_OUTPUT_FORMAT: Final = "mp3"
DEFAULT_TEXT_TYPE: Final = "text"
DEFAULT_SAMPLE_RATES: Final[dict[str, str]] = {
"mp3": "22050",
"ogg_vorbis": "22050",
"pcm": "16000",
}
AWS_CONF_CONNECT_TIMEOUT: Final = 10
AWS_CONF_READ_TIMEOUT: Final = 5
AWS_CONF_MAX_POOL_CONNECTIONS: Final = 1
|
18,014 | 4d1a5d68a3a33168c8fd5604a0576a9a08531f29 | import fnmatch
import os
import re
from collections import Counter
from scipy import stats
pastas=["cursos_9","cursos_15","cursos_16","cursos_20","cursos_25","cursos_27","cursos_28","cursos_31","cursos_37","cursos_41","cursos_42","cursos_54",
"groups\\courses_20_25_28_54_41_31_group_1","groups\\courses_15_42_37_27_group_2","groups\\courses_9_16_group_3"]
nome=["Nutrição","Farmácia","Medicina","Matemática","Física","Engenharia Química","Química","Ciências da Computação","Engenharia Civil","Engenharia de Telecomunicações","Engenharia de Produção","Estatística",
"Cursos de Alta Taxa de Evasão","Cursos de Média Taxa de Evasão","Cursos de Baixa Taxa de Evasão"]
arquivos=["normalize_chi2_*_10_dt.txt","normalize_chi2_*_10_knn.txt","normalize_chi2_*_10_mlp.txt","normalize_chi2_*_10_svm.txt","normalize_chi2_*_10_xgboost.txt",
"normalize_pca_*_10_dt.txt","normalize_pca_*_10_knn.txt","normalize_pca_*_10_mlp.txt","normalize_pca_*_10_svm.txt","normalize_pca_*_10_xgboost.txt",
"standard_pca_*_10_dt.txt","standard_pca_*_10_knn.txt","standard_pca_*_10_mlp.txt","standard_pca_*_10_svm.txt","standard_pca_*_10_xgboost.txt"]
x=[]
ps=[]
for i in range(len(pastas)):
for j in range(len(arquivos)-1):
#for k in range((j+1),len(arquivos)):
nomeArquivo = fnmatch.filter(os.listdir("resultados/data_2009_2010/" + pastas[i]), arquivos[j])
file = open("resultados/data_2009_2010/" + pastas[i] + "/" + nomeArquivo[0], "r")
#nomeArquivo2 = fnmatch.filter(os.listdir("resultados/data_2009_2010/" + pastas[i]), arquivos[k])
nomeArquivo2 = fnmatch.filter(os.listdir("resultados/data_2009_2010/" + pastas[i]), arquivos[14])
file2 = open("resultados/data_2009_2010/" + pastas[i] + "/" + nomeArquivo2[0], "r")
lines = file.readlines()
score = re.findall(r"[-+]?[0-9]*\.?[0-9]+", lines[19], re.MULTILINE)
score = list(map(float,score))
lines2 = file2.readlines()
score2 = re.findall(r"[-+]?[0-9]*\.?[0-9]+", lines2[19], re.MULTILINE)
score2 = list(map(float, score2))
#print(score)
#print(score2)
t, p = stats.ttest_ind(score, score2)
if p > 0.05:
x.append(arquivos[j])
print("resultados/data_2009_2010/" + pastas[i] + "/" + nomeArquivo[0], "r")
print("resultados/data_2009_2010/" + pastas[i] + "/" + nomeArquivo2[0], "r")
print("t = " + str(t))
print("p = " + str(p))
ps.append(p)
#print()
file.close()
file2.close()
print()
print(Counter(x))
#hardcoded
standard_pca__10_dt = []
standard_pca__10_svm = []
normalize_chi2__10_xgboost = []
normalize_chi2__10_dt = []
normalize_pca__10_dt = []
normalize_pca__10_xgboost = []
for i in range(len(x)):
if x[i] == 'standard_pca_*_10_dt.txt':
standard_pca__10_dt.append(ps[i])
if x[i] == 'standard_pca_*_10_svm.txt':
standard_pca__10_svm.append(ps[i])
if x[i] == 'normalize_chi2_*_10_xgboost.txt':
normalize_chi2__10_xgboost.append(ps[i])
if x[i] == 'normalize_chi2_*_10_dt.txt':
normalize_chi2__10_dt.append(ps[i])
if x[i] == 'normalize_pca_*_10_dt.txt':
normalize_pca__10_dt.append(ps[i])
if x[i] == 'normalize_pca_*_10_xgboost.txt':
normalize_pca__10_xgboost.append(ps[i])
print(standard_pca__10_dt)
print(standard_pca__10_svm)
print(normalize_chi2__10_xgboost)
print(normalize_chi2__10_dt)
print(normalize_pca__10_dt)
print(normalize_pca__10_xgboost)
|
18,015 | da9a65037482736fc7f6aba0596eb9724b64eec6 | import socket
import json
msg_token = '<m>'
def send_message(senderid, receiverid, msg, client_socket):
message = {}
message['Token'] = msg_token
data = {}
data['SenderID'] = senderid
data['ReceiverID'] = receiverid
data['Message'] = msg
message['Data'] = data
msg_json = json.dumps(message)
client_socket.send(msg_json.encode())
|
18,016 | 7ba42bf6177ae24d4df4750c4b8c49de8beebcdf | # Equailty operators
# equals to (==)
print(10==20)
print(1==True)
# not equals to (!=)
print(10!=20)
print(0!=False)
# equality results 'False' as output when incompatible datatypes are compared
print(10 == 'apple')
print(10 == '10')
#chain of equality operators
# if all comparisions is True, result is True.
# If at least one comparision if False, result is False.
print(10 == 20 == 40)
print(10 == 10 == 10)
# NOTE : The difference between '==' and 'is' operators :
# '==' is use for content comparision.
# 'is' is used for reference(address) comparision.
l1 = [10,20,30]
l2 = [10,20,30]
print(l1 is l2) # the result will be False as l1 and l2 are located at separate address location.
print(l1 == l2) # the result will be True as the content is same.
l3 = []
l1 = l3
print(l1 is l3) # the result will be True as both l1 and l3 refers to the same address location.
|
18,017 | 1c2c7651505aed36bed15120989446e89c00ecd8 | from app import app
from mvcode.signals import mvcode_generated, mvcode_verified
from sms.gateway import Gateway as SMSGateway
from mvcode.generator import Generator
from mvcode.store import Store
from mvcode.usage import Usage
MVCODE_SMS_TEMPLATE = 'your mvcode is {}'
class MvcodeService:
def __init__(self, generator: Generator, sms_gateway: SMSGateway, store: Store,
length: int = 4, template: str = MVCODE_SMS_TEMPLATE):
super(MvcodeService, self).__init__()
self._generator = generator
self._sms_gateway = sms_gateway
self._store = store
self._mvcode_length = length
self._template = template
def generate_mvcode(self, mobile, used_for=Usage.REGISTRATION):
# 产生验证码
mvcode = self._generator.generate_mvcode(self._mvcode_length)
# 发送短信验证码
self._sms_gateway.send(mobile, self._template.format(mvcode))
# 保存在临时存储中
self._store.save(mobile, used_for.name, mvcode)
# 发送 产生手机验证码 事件
mvcode_generated.send(app, mobile=mobile, used_for=used_for, mvcode=mvcode)
def verify_mvcode(self, mobile, mvcode, used_for=Usage.REGISTRATION):
# 校验指定的验证码是否正确
result = self._store.verify(mobile, used_for.name, mvcode)
# 发送 验证手机验证码 事件
mvcode_verified.send(app, mobile=mobile, used_for=used_for, mvcode=mvcode, result=result)
return result
# mvcode_service = MvcodeService()
|
18,018 | e59844de681eaa86e82099fff04dbd999a223505 | '''
爬取淘宝手机信息 手机型号 原价 现在价格 商店 月销
反爬: 无法直接得到手机信息,动态页面
方法: js逆向 使用selenium用户模拟 爬取数据
'''
from selenium import webdriver
import time
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.by import By
from 动态网页爬取.save import taobaosql
from 动态网页爬取.CONSTANT import URL
browser = webdriver.Chrome()
browser.maximize_window()
for url in URL:
# url = 'https://uland.taobao.com/sem/tbsearch?refpid=mm_26632258_3504122_32538762&keyword=%E6%89%8B%E6%9C%BA&clk1=57a7282b8b117d6cb2d7f8bedc0c45ba&upsId=57a7282b8b117d6cb2d7f8bedc0c45ba&spm=a2e0b.20350158.search.1&pid=mm_26632258_3504122_32538762&union_lens=recoveryid%3A201_11.1.33.17_3780656_1615174981797%3Bprepvid%3A201_11.1.33.17_3780656_1615174981797'
page = 0
browser.get(url)
browser.implicitly_wait(5)
mobile = browser.find_elements_by_xpath('//ul[@class="pc-search-items-list"]/li')
next_page = browser.find_element_by_xpath('//div[@id="J_pc-search-page-nav"]/span[3]')
while page < 17:
sql_conn = taobaosql.SavePhone()
id = sql_conn.maxindex()+1
try:
for res in mobile:
phone = res.find_element_by_xpath('./a/div[1]/span').text
price = '¥'+res.find_element_by_xpath('./a/div[2]/span[2]').text
original_price = res.find_element_by_xpath('./a/div[2]/span[3]').text
shop = res.find_element_by_xpath('./a/div[3]/div').text
monthly_sales = res.find_element_by_xpath('./a/div[4]/div[2]').text
sql_conn.insert((id, phone, price, original_price, shop, monthly_sales))
id += 1
print("下载完成了%d" % id)
next_page = WebDriverWait(browser, 60).until(lambda x: x.find_element(By.XPATH, '//div[@id="J_pc-search-page-nav"]/span[3]'))
browser.execute_script('arguments[0].click()', next_page)
page += 1
mobile = browser.find_elements_by_xpath('//ul[@class="pc-search-items-list"]/li')
next_page = browser.find_element_by_xpath('//div[@id="J_pc-search-page-nav"]/span[3]')
time.sleep(10)
except Exception as e:
print("停止下载, {}".format(e))
break
finally:
sql_conn.close_sql()
currentPageUrl = browser.current_url
print("当前页面的url是:", currentPageUrl)
time.sleep(1)
browser.quit()
# 手机型号 phone //ul/li/a/div[1]/span
# 售价 ¥+price //ul/li/a/div[2]/span[2]
# 原价 original_price //ul/li/a/div[2]/span[3]
# 商店 shop //ul/li/a/div[3]/div
# 月销 monthly_sales //ul/li/a/div[4]/div[2] |
18,019 | 5cb582daf9fddfd4a81a03cbc72aee9696814604 | from django import forms
from .models import Comment
class CommentForm(forms.ModelForm):
content = forms.CharField(widget=forms.Textarea(attrs={
'class': 'form-control',
'placeholder': 'Type your comment',
'id': 'usercomment',
'rows': '4'
}))
class Meta:
model = Comment
fields = ('content', )
class ContactForm(forms.Form):
contact_name = forms.CharField(required=True)
contact_email = forms.EmailField(required=True)
content = forms.CharField(
required=True,
widget=forms.Textarea(attrs={
'class': 'form-control',
'placeholder': 'What do you want to say?',
'rows': '4'
})
)
contact_name.widget.attrs.update({'class': 'form-control', 'placeholder': 'Your name'})
contact_email.widget.attrs.update({'class': 'form-control', 'placeholder': 'Your email'})
|
18,020 | 59cb29cddf9cf69683625240ad4b23427628a02e | #!/usr/bin/env python
"""
This is an EXAMPLE script of how to apply detector simulation.
There does not seem to be a collection of simulation tray segments
anywhere, so this uses a bunch of modules copied from simprod
scripts. This simulation should be valid for IC86, but don't trust
it blindly. Check the modules against simprod to make sure this
does the same thing!
"""
from __future__ import print_function
from optparse import OptionParser
import os
import string
usage = "usage: %prog [options] inputfile"
parser = OptionParser(usage)
parser.add_option("-o", "--outfile", default=None,
dest="OUTFILE", help="Write output to OUTFILE (.i3{.gz} format)")
parser.add_option("-i", "--infile", default="test_flashes_clsim.i3",
dest="INFILE", help="Read input from INFILE (.i3{.gz} format)")
parser.add_option("-r", "--runnumber", type="int", default=1,
dest="RUNNUMBER", help="The run number for this simulation")
parser.add_option("-s", "--seed",type="int",default=12345,
dest="SEED", help="Initial seed for the random number generator")
parser.add_option("--keep-mcpes", action="store_true", default=False,
dest="KEEPMCPES", help="Keep I3MCPEs before writing the output file")
# parse cmd line args, bail out if anything is not understood
(options,args) = parser.parse_args()
if len(args) != 0:
crap = "Got undefined options:"
for a in args:
crap += a
crap += " "
parser.error(crap)
########################
if options.INFILE:
filename = options.INFILE
if os.access(filename,os.R_OK) == False:
raise RuntimeError("cannot find input file!")
infile = filename
print('using input file %s' % infile)
else:
print("No input file!")
parser.print_help()
exit(-1)
infileRoot, infileExt = os.path.splitext(infile)
if infileExt == ".gz":
infileRoot2, infileExt2 = os.path.splitext(infileRoot)
if infileExt2 == ".i3":
infileRoot=infileRoot2
infileExt = ".i3.gz"
if infileExt != ".i3" and infileExt != ".i3.gz":
raise Exception("you have to specify either a .i3 or an .i3.gz file!")
########################
outdir=""
outfile=None
if options.OUTFILE:
outfile = options.OUTFILE
# did the user specify a directory? then use that and auto-generate
if os.path.isdir(outfile):
outdir = outfile
outfile = None
else:
outdir, outfile = os.path.split(outfile)
# add a trailing slash to the output directory name if not already there
if outdir and outdir!="":
if outdir[-1] != "/":
outdir += "/"
if not outfile:
# automatically generate the output filename
infileRootDir, infileRootFile = os.path.split(infileRoot)
outfile = infileRootFile + "_detsim"
outfile = outfile + infileExt
print("output dir is %s" % outdir)
print("output file is %s" % outdir + outfile)
########################
from I3Tray import *
from os.path import expandvars
import os
import sys
from icecube import icetray, dataclasses, dataio, phys_services, trigger_sim, vuvuzela
from icecube import DOMLauncher
tray = I3Tray()
tray.Add("I3SPRNGRandomServiceFactory",
seed = options.SEED,
nstreams = 10000,
streamnum = options.RUNNUMBER)
tray.Add("I3Reader", Filename = infile)
# Add noise parameters
noise_file = expandvars("$I3_SRC/vuvuzela/resources/data/parameters.dat")
tray.Add("Inject", InputNoiseFile = noise_file)
tray.Add("Vuvuzela",
InputHitSeriesMapName = "",
OutputHitSeriesMapName = "I3MCPESeriesMap",
StartWindow = 0,
EndWindow = 25*I3Units.millisecond,
IceTop = False,
InIce = True,
RandomServiceName = "I3RandomService",
UseIndividual = True,
DisableLowDTCutoff = True,
)
tray.Add("PMTResponseSimulator")
tray.Add("DOMLauncher")
tray.Add(trigger_sim.TriggerSim)
if not options.KEEPMCPES:
tray.Add("Delete", Keys = ["I3MCPESeriesMap"])
tray.Add("I3Writer", Filename = outdir+outfile)
tray.Execute()
|
18,021 | f6cf9d5f9322ef078441b55c4cf405230fee4003 | import sys,regexextraction,xpathextraction,automaticextraction
if(sys.argv[1] == "A"):
regexextraction.main()
elif(sys.argv[1] == "B"):
xpathextraction.main()
elif(sys.argv[1] == "C"):
automaticextraction.main() |
18,022 | 70e5c56f433a9c6f6c38b96c7ccc95aec752eac8 | from tf_agents.replay_buffers.tf_uniform_replay_buffer import TFUniformReplayBuffer
from ethical_rl.constants import *
# TODO: replay buffers should be moved out of dqn folder
class ReplayBuffer:
def __init__(self, **kwargs):
self.batch_size = 1
self.tf_agent = kwargs["tf_agent"]
self.replay_buffer = TFUniformReplayBuffer(
self.tf_agent.collect_data_spec,
batch_size=self.batch_size,
max_length=kwargs[MAX_REPLAY_BUFFER_LENGTH]
) |
18,023 | f784755882fd745f6fc8df0f5bc62f38feb0830a | import os
import time
import torch
from datasets.dataset_imagenet_dct import ImageFolderDCT
import datasets.cvtransforms as transforms
from datasets import train_y_mean, train_y_std, train_cb_mean, train_cb_std, \
train_cr_mean, train_cr_std
from datasets import train_y_mean_upscaled, train_y_std_upscaled, train_cb_mean_upscaled, train_cb_std_upscaled, \
train_cr_mean_upscaled, train_cr_std_upscaled
from datasets import train_dct_subset_mean, train_dct_subset_std
from datasets import train_upscaled_static_mean, train_upscaled_static_std
import datasets.dataenhance as enhance
def valloader_upscaled_static(args, model='mobilenet'):
valdir = os.path.join(args.data, 'val')
if model == 'mobilenet':
input_size1 = 1024
input_size2 = 896
elif model == 'resnet':
input_size1 = 512
input_size2 = 448
else:
raise NotImplementedError
if int(args.subset) == 0 or int(args.subset) == 192:
transform = transforms.Compose([
transforms.Resize(input_size1),
transforms.CenterCrop(input_size2),
transforms.Upscale(upscale_factor=2),
transforms.TransformUpscaledDCT(),
transforms.ToTensorDCT(),
transforms.Aggregate(),
transforms.NormalizeDCT(
train_upscaled_static_mean,
train_upscaled_static_std,
)
])
else:
transform = transforms.Compose([
transforms.Resize(input_size1),
transforms.CenterCrop(input_size2),
transforms.Upscale(upscale_factor=2),
transforms.TransformUpscaledDCT(),
transforms.ToTensorDCT(),
transforms.SubsetDCT(channels=args.subset),
transforms.Aggregate(),
transforms.NormalizeDCT(train_upscaled_static_mean,
train_upscaled_static_std,
channels=args.subset)
])
dset = ImageFolderDCT(valdir, transform, backend='opencv')
val_loader = torch.utils.data.DataLoader(dset,
batch_size=args.train_batch,
shuffle=False,
num_workers=args.workers,
pin_memory=True)
return val_loader, len(dset)
def trainloader_upscaled_static(args, model='mobilenet'):
valdir = os.path.join(args.data, 'train')
if model == 'mobilenet':
input_size1 = 1024
input_size2 = 896
elif model == 'resnet':
input_size1 = 512
input_size2 = 448
else:
raise NotImplementedError
if int(args.subset) == 0 or int(args.subset) == 192:
transform = transforms.Compose([
enhance.random_crop(),
enhance.horizontal_flip(),
enhance.vertical_flip(),
enhance.random_rotation(),
enhance.tocv2(),
transforms.Resize(input_size1),
transforms.CenterCrop(input_size2),
transforms.Upscale(upscale_factor=2),
transforms.TransformUpscaledDCT(),
transforms.ToTensorDCT(),
transforms.Aggregate(),
transforms.NormalizeDCT(
train_upscaled_static_mean,
train_upscaled_static_std,
)
])
else:
transform = transforms.Compose([
enhance.random_crop(),
enhance.horizontal_flip(),
enhance.vertical_flip(),
enhance.random_rotation(),
enhance.tocv2(),
transforms.Resize(input_size1),
transforms.CenterCrop(input_size2),
transforms.Upscale(upscale_factor=2),
transforms.TransformUpscaledDCT(),
transforms.ToTensorDCT(),
transforms.SubsetDCT(channels=args.subset),
transforms.Aggregate(),
transforms.NormalizeDCT(train_upscaled_static_mean,
train_upscaled_static_std,
channels=args.subset)
])
dset = ImageFolderDCT(valdir, transform, backend='pil')
val_loader = torch.utils.data.DataLoader(dset,
batch_size=args.train_batch,
shuffle=True,
num_workers=args.workers,
pin_memory=True)
return val_loader, len(dset), dset.get_clsnum()
def testloader_upscaled_static(args, model='mobilenet'):
valdir = os.path.join(args.data, 'test')
if model == 'mobilenet':
input_size1 = 1024
input_size2 = 896
elif model == 'resnet':
input_size1 = 512
input_size2 = 448
else:
raise NotImplementedError
if int(args.subset) == 0 or int(args.subset) == 192:
transform = transforms.Compose([
transforms.Resize(input_size1),
transforms.CenterCrop(input_size2),
transforms.Upscale(upscale_factor=2),
transforms.TransformUpscaledDCT(),
transforms.ToTensorDCT(),
transforms.Aggregate(),
transforms.NormalizeDCT(
train_upscaled_static_mean,
train_upscaled_static_std,
)
])
else:
transform = transforms.Compose([
transforms.Resize(input_size1),
transforms.CenterCrop(input_size2),
transforms.Upscale(upscale_factor=2),
transforms.TransformUpscaledDCT(),
transforms.ToTensorDCT(),
transforms.SubsetDCT(channels=args.subset),
transforms.Aggregate(),
transforms.NormalizeDCT(train_upscaled_static_mean,
train_upscaled_static_std,
channels=args.subset)
])
dset = ImageFolderDCT(valdir, transform, backend='opencv')
val_loader = torch.utils.data.DataLoader(dset,
batch_size=args.train_batch,
shuffle=False,
num_workers=args.workers,
pin_memory=True)
return val_loader, len(dset)
def test_transform(args,image):
input_size1 = 512
input_size2 = 448
if int(args.subset) == 0 or int(args.subset) == 192:
transform = transforms.Compose([
transforms.Resize(input_size1),
transforms.CenterCrop(input_size2),
transforms.Upscale(upscale_factor=2),
transforms.TransformUpscaledDCT(),
transforms.ToTensorDCT(),
transforms.Aggregate(),
transforms.NormalizeDCT(
train_upscaled_static_mean,
train_upscaled_static_std,
)
])
else:
transform = transforms.Compose([
transforms.Resize(input_size1),
transforms.CenterCrop(input_size2),
transforms.Upscale(upscale_factor=2),
transforms.TransformUpscaledDCT(),
transforms.ToTensorDCT(),
transforms.SubsetDCT(channels=args.subset),
transforms.Aggregate(),
transforms.NormalizeDCT(train_upscaled_static_mean,
train_upscaled_static_std,
channels=args.subset)
])
return transform
|
18,024 | 1a32bddddefdf5675784816b3e33f3352f29b857 | from problem import Problem
from collections import defaultdict
import random
import string
cnt = 10
class Problem5(Problem):
def __init__(self):
statement = "Inserati urmatoarele valori, pe rand, intr-un arbore binar de cautare: "
data = random.sample(range(1,9),7)
x=[data[0],data[1],data[2],data[3]]
statement = statement + str(data) + ".\nScrieti nodurile care se pot sterge in doua moduri. Stergeti elementul "
k = random.choice(x)
statement = statement + str(k) + "."
data = [data, k]
super().__init__(statement, data)
def solve(self):
data = self.data
solution=" "
solution=solution+"\n"
v=data[0] # in v retinem vectorul generat random
solution=solution + "Introducem elementele: " +str(v)+" intr-un arbore binar de cautare. \n"
solution=solution + "Radacina va fi: "+ str(v[0]) +"\n"
nr=data[1] # in nr retinem valoarea nodului generata random care trebuie sters
# cream structura nod: retinem fiul stang, pe cel drept si valoarea sa:
class Nod:
def __init__(self, val):
self.fiu_stang = None
self.fiu_drept = None
self.val = val
t=[-1,-1,-1,-1,-1,-1,-1,-1,-1,-1] # t=vectorul de tati
v_niv=[-1,-1,-1,-1,-1,-1,-1,-1,-1,-1] # v_niv=vectorul de nivel
# cream functia de inserare a fiecarui element din vector:
def inserare(rad, nod):
# daca valoarea nodului este mai mica decat valoarea radacinii
if nod.val < rad.val:
# verifica daca radacina nu are fiu in stanga
if rad.fiu_stang is None:
rad.fiu_stang = nod # nodul devine fiul stang al radacinii curente
t[nod.val]=rad.val # tatal nodului este radacina curenta
v_niv[nod.val]=v_niv[rad.val]+1 # nivelul pe care se afla nodul in arbore este nivelul tatalui sau +1
# daca are fiu in stanga
else:
inserare(rad.fiu_stang, nod) # radacina devine radacina fiului stang si intra iar in inserare
# daca valoarea nodului este mai mare decat valoarea radacinii
else:
# verifica daca radacina nu are fiu in dreapta
if rad.fiu_drept is None:
rad.fiu_drept = nod # nodul devine fiul drept al radacinii curente
t[nod.val] = rad.val # tatal nodului este radacina curenta
v_niv[nod.val] = v_niv[rad.val] + 1 # nivelul pe care se afla nodul in arbore este nivelul tatalui sau +1
# daca are fiu in dreapta
else:
inserare(rad.fiu_drept, nod) # radacina devine radacina fiului drept si intra iar in inserare
# parcurgere in inordine (afiseaza de la cel mai mic la cel mai mare):
def inordine(rad):
# daca nu mai exista noduri de parcurs iese
if not rad:
return
inordine(rad.fiu_stang) # parcurge fiii din stanga
print(rad.val) # afiseaza nodurile
inordine(rad.fiu_drept) # parcurge fiii din dreapta
return rad
A = Nod(v[0]) # A=primul element din vector=radacina
t[v[0]] = 0 # tatal radacinii este 0
v_niv[v[0]]=0 # nivelul radacinii este 0
# inserarea elementelor din vector:
for i in range(1, len(v)):
inserare(A, Nod(v[i]))
solution = solution + "\n"
solution = solution + "Vectorul de tati este: "
solution = solution + str(t) + "\n"
solution = solution + "Vectorul de nivel este: "
solution = solution + str(v_niv) + "\n"
solution = solution + "\n"
solution = solution + "Verificam in vectorul de tati ce noduri pot fi sterse in 2 moduri (cele care au 2 fii).\n"
solution = solution + "Nodurile care se pot sterge in doua moduri sunt: "
solution = solution + "\n"
# verificam in vectorul de tati ce noduri pot fi sterse in 2 moduri (cele care au 2 fii)
for i in range(1,9):
k = 0 # in k numaram de cate ori apare nodul i in vectorul de tati
for j in range(1,9):
# daca i apare in vectorul de tati, creste k
if i is t[j]:
k = k+1
if k > 1: # daca k apare de minim 2 ori, retine i in statement pt. a fi afisat
solution=solution + str(i) + " " + "deoarece are "+str(k) + " fii\n"
# stabilim cea mai mica valoare din arbore:
def val_min(nod):
crt = nod # crt=valoarea nodului
# cautam cel mai din stanga fiu:
while (crt.fiu_stang is not None):
crt = crt.fiu_stang
return crt
# afisare nod:
def afisare_imbunatatita(rad, space, sol=""):
# primul caz
if (rad == None):
return ''
space += cnt # creste distanta dintre nivele
sol += afisare_imbunatatita(rad.fiu_drept, space) # parcurge intai fiii din dreapta
# afisam nodul curent dupa spatiu:
sol += '\n' # trecem la linie noua
sol += ' ' * (space - cnt) # afisam suficiente spatii
sol += str(rad.val) # afisam valoarea nodului curent
sol += afisare_imbunatatita(rad.fiu_stang, space) # parcurge fiii din stanga
return sol
# functie care returneaza sol-ul creat in functia anterioara
def print_imbunatatit(rad):
s = afisare_imbunatatita(rad, 0) # punem in string ce returneaza
return s
# stergere nod:
def stergere(rad, sters):
# primul caz (daca nu exista radacina):
if rad is None:
return rad
# daca nodul sters este mai mic decat radacina (atunci sters va fi in arborele din stanga):
if sters < rad.val:
rad.fiu_stang = stergere(rad.fiu_stang, sters) # se repeta algoritmul pentru fiul stang al radacinii curente
# daca nodul sters este mai mare decat radacina (atunci sters va fi in arborele din dreapta):
elif (sters > rad.val):
rad.fiu_drept = stergere(rad.fiu_drept, sters) # se repeta algoritmul pentru fiul drept al radacinii curente
# altfel, daca radacina este sters:
else:
# verificam daca nodul are un fiu sau niciunul:
# daca nu are fii in stanga:
if rad.fiu_stang is None:
x = rad.fiu_drept # x devine fiul drept al radacinii
rad = None # radacina devine 0
return x # returneaza x = noua radacina
# daca nu are fii in dreapta
elif rad.fiu_drept is None:
x = rad.fiu_stang # x devine fiul stang al radacinii
rad = None # radacina devine 0
return x # returneaza x = noua radacina
# daca nodul sters are 2 fii (ia succesorul din inordine):
x = val_min(rad.fiu_drept) # x devine cea mai mica valoare din subarborele din dreapta
rad.val = x.val # introduce valoarea succesorului in nodul curent
rad.fiu_drept = stergere(rad.fiu_drept, x.val) # se repeta algoritmul de stergere pentru succesor
return rad # returneaza noua radacina
# functie care afiseaza in solution:
def afisare(rad,nivel):
sol=str(rad.val)+" "
if rad.fiu_drept is None:
sol=sol+"\n "
else:
sol=sol+afisare(rad.fiu_drept,nivel+1)
if rad.fiu_stang is not None:
for i in range(1,nivel+2):
sol=sol+" "
sol=sol+afisare(rad.fiu_stang,nivel+1)
return sol
"""
solution = solution + "\n"
solution = solution + "Arborele initial este:\n\n"
solution=solution+str(afisare(A,0))+"\n\n"
"""
solution = solution + "\n"
solution = solution + "Arborele initial este:\n\n"
solution = solution + str(print_imbunatatit(A)) + "\n\n"
# functie pentru afisare din consola:
def afisare_arbore(rad,space):
if rad is None: # daca am terminat de parcurs arborele, iese
return
space += " " # creste numarul de spatii la fiecare iteratie
afisare_arbore(rad.fiu_drept, space) # apeleaza functia pentru fiul drept
print(space+str(rad.val)) # afiseaza in consola
afisare_arbore(rad.fiu_stang, space) # apeleaza functia pentru fiul stang
"""
print("Rezolvare in consola:")
print("Arborele initial este (inordine):")
inordine(A)
print()
print("Arborele initial este:")
afisare_arbore(A, " ")
print()
"""
A=stergere(A,nr) # stergem nodul nr din arborele a carui radacina este A
"""
solution = solution + "Arborele final este:\n\n"
solution = solution + str(afisare(A, 0))
"""
solution = solution + "Arborele final, dupa stergerea elementului "+str(nr)+" este:\n\n"
solution = solution + str(print_imbunatatit(A)) + "\n\n"
"""
print("Arborele final este (inordine):")
inordine(A)
print()
print("Arborele final este:")
afisare_arbore(A, " ")
"""
return solution
|
18,025 | d7c4ea2880a336b528fec71fce37af0a8b00eae4 | # -*- test-case-name: twisted.application.runner.test.test_runner -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Twisted application runner.
"""
__all__ = [
"Runner",
"RunnerOptions",
]
from sys import stderr
from signal import SIGTERM
from os import getpid, kill
from twisted.python.constants import Names, NamedConstant
from twisted.logger import (
globalLogBeginner, textFileLogObserver,
FilteringLogObserver, LogLevelFilterPredicate,
LogLevel, Logger,
)
from twisted.internet import default as defaultReactor
from ._exit import exit, ExitStatus
class Runner(object):
"""
Twisted application runner.
"""
log = Logger()
def __init__(self, options):
"""
@param options: Configuration options for this runner.
@type options: mapping of L{RunnerOptions} to values
"""
self.options = options
def run(self):
"""
Run this command.
Equivalent to::
self.killIfRequested()
self.writePIDFile()
self.startLogging()
self.startReactor()
self.reactorExited()
self.removePIDFile()
Additional steps may be added over time, but the order won't change.
"""
self.killIfRequested()
self.writePIDFile()
self.startLogging()
self.startReactor()
self.reactorExited()
self.removePIDFile()
def killIfRequested(self):
"""
Kill a running instance of this application if L{RunnerOptions.kill} is
specified and L{True} in C{self.options}.
This requires that L{RunnerOptions.pidFilePath} also be specified;
exit with L{ExitStatus.EX_USAGE} if kill is requested with no PID file.
"""
pidFilePath = self.options.get(RunnerOptions.pidFilePath)
if self.options.get(RunnerOptions.kill, False):
if pidFilePath is None:
exit(ExitStatus.EX_USAGE, "No PID file specified")
return # When testing, patched exit doesn't exit
else:
pid = ""
try:
for pid in pidFilePath.open():
break
except EnvironmentError:
exit(ExitStatus.EX_IOERR, "Unable to read PID file.")
return # When testing, patched exit doesn't exit
try:
pid = int(pid)
except ValueError:
exit(ExitStatus.EX_DATAERR, "Invalid PID file.")
return # When testing, patched exit doesn't exit
self.startLogging()
self.log.info("Terminating process: {pid}", pid=pid)
kill(pid, SIGTERM)
exit(ExitStatus.EX_OK)
return # When testing, patched exit doesn't exit
def writePIDFile(self):
"""
Write a PID file for this application if L{RunnerOptions.pidFilePath}
is specified in C{self.options}.
"""
pidFilePath = self.options.get(RunnerOptions.pidFilePath)
if pidFilePath is not None:
pid = getpid()
pidFilePath.setContent(u"{}\n".format(pid).encode("utf-8"))
def removePIDFile(self):
"""
Remove the PID file for this application if L{RunnerOptions.pidFilePath}
is specified in C{self.options}.
"""
pidFilePath = self.options.get(RunnerOptions.pidFilePath)
if pidFilePath is not None:
pidFilePath.remove()
def startLogging(self):
"""
Start the L{twisted.logger} logging system.
"""
logFile = self.options.get(RunnerOptions.logFile, stderr)
fileLogObserverFactory = self.options.get(
RunnerOptions.fileLogObserverFactory, textFileLogObserver
)
fileLogObserver = fileLogObserverFactory(logFile)
logLevelPredicate = LogLevelFilterPredicate(
defaultLogLevel=self.options.get(
RunnerOptions.defaultLogLevel, LogLevel.info
)
)
filteringObserver = FilteringLogObserver(
fileLogObserver, [logLevelPredicate]
)
globalLogBeginner.beginLoggingTo([filteringObserver])
def startReactor(self):
"""
Register C{self.whenRunning} with the reactor so that it is called once
the reactor is running and start the reactor.
If L{RunnerOptions.reactor} is specified in C{self.options}, use that
reactor; otherwise use the default reactor.
"""
reactor = self.options.get(RunnerOptions.reactor)
if reactor is None:
reactor = defaultReactor
reactor.install()
self.options[RunnerOptions.reactor] = reactor
reactor.callWhenRunning(self.whenRunning)
self.log.info("Starting reactor...")
reactor.run()
def whenRunning(self):
"""
If L{RunnerOptions.whenRunning} is specified in C{self.options}, call
it.
@note: This method is called when the reactor is running.
"""
whenRunning = self.options.get(RunnerOptions.whenRunning)
if whenRunning is not None:
whenRunning(self.options)
def reactorExited(self):
"""
If L{RunnerOptions.reactorExited} is specified in C{self.options}, call
it.
@note: This method is called after the reactor has exited.
"""
reactorExited = self.options.get(RunnerOptions.reactorExited)
if reactorExited is not None:
reactorExited(self.options)
class RunnerOptions(Names):
"""
Names for options recognized by L{Runner}.
These are meant to be used as keys in the options given to L{Runner}, with
corresponding values as noted below.
@cvar reactor: The reactor to start.
Corresponding value: L{IReactorCore}.
@type reactor: L{NamedConstant}
@cvar pidFilePath: The path to the PID file.
Corresponding value: L{IFilePath}.
@type pidFilePath: L{NamedConstant}
@cvar kill: Whether this runner should kill an existing running instance.
Corresponding value: L{bool}.
@type kill: L{NamedConstant}
@cvar defaultLogLevel: The default log level to start the logging system
with.
Corresponding value: L{NamedConstant} from L{LogLevel}.
@type defaultLogLevel: L{NamedConstant}
@cvar logFile: A file stream to write logging output to.
Corresponding value: writable file like object.
@type logFile: L{NamedConstant}
@cvar fileLogObserverFactory: What file log observer to use when starting
the logging system.
Corresponding value: callable that returns a
L{twisted.logger.FileLogObserver}
@type fileLogObserverFactory: L{NamedConstant}
@cvar whenRunning: Hook to call when the reactor is running.
This can be considered the Twisted equivalent to C{main()}.
Corresponding value: callable that takes the options mapping given to
the runner as an argument.
@type whenRunning: L{NamedConstant}
@cvar reactorExited: Hook to call when the reactor has exited.
Corresponding value: callable that takes an empty arguments list
@type reactorExited: L{NamedConstant}
"""
reactor = NamedConstant()
pidFilePath = NamedConstant()
kill = NamedConstant()
defaultLogLevel = NamedConstant()
logFile = NamedConstant()
fileLogObserverFactory = NamedConstant()
whenRunning = NamedConstant()
reactorExited = NamedConstant()
|
18,026 | 5438d1ba1099855afdd4a810a62ac8ae22128a83 | # 2520 is the smallest number that can be divided by each of
# the numbers from 1 to 10 without any remainder.
#
# What is the smallest positive number that is
# evenly divisible by all of the numbers from 1 to 20?
# SOLUTION FROM 'THE WANDERING ENGINEER'
# num = 0
# keepGoing = True
#
# # In this version, we go until the boolean keepGoing is changed to False
# while keepGoing:
# # increment num by 20 each time
# num += 20
#
# # NumMultiples keeps track of the number of multiples for num
# numMultiples = 0
#
# # Iterate from 20 to 11 going down by 1
# for i in range(20, 10, -1):
# if (num % i != 0):
# # If not evenly divisible, break out of for loop
# # because we know it isn't the LCM
# break
# if (i == 11):
# # If we reached here without breaking, then we've
# # found the number we're looking for.
# # Change keepGoing to False to break out of while loop
# keepGoing = False
# break
#
# # Print out current num to keep track (in millions)
# if num % 1000000 == 0:
# print(num / 1000000)
#
# print('The lowest common multiple is: ' + str(num))
def divider():
numbers = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]
for y in range(20, 1000000000, 20):
for i in numbers:
if (y % i != 0):
break
if i == 20:
print(y)
return y
divider()
|
18,027 | 40410d68316dfb81d593f31d075125fff3d40f74 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 31 19:40:20 2017
@author: park-wanbae
"""
#%%
import pandas as pd
import numpy as np
import datetime as dat
import scipy.optimize as sop
from scipy.interpolate import interp1d
import matplotlib.pyplot as plt
#%%
libor = pd.read_excel('libor.xlsx')
edf = pd.read_excel('edf.xlsx')
swap = pd.read_excel('swap.xlsx')
vol = pd.read_excel('vol.xlsx', index_col = 0) / 10000
today = dat.datetime(2017, 5, 31)
#%%
#Data Handling: Convexity Adjustment and Day Representation
edf['Fut'] = 100 - edf['PX_MID']
impvol = np.array(vol['1Yr'][:'2Yr'])
impvol[5] = impvol[4]
edf['Vol'] = impvol
day = np.zeros(6)
day_temp = edf['FUT_CONTRACT_DT'] - today
for i in range(len(day)):
day[i] = (day_temp[i]).days
edf['T1'] = day / 360
edf['T2'] = (day + 90) / 360
edf['Fwd'] = (edf['Fut'] / 100) - (0.5 * (edf['Vol'] ** 2) \
* edf['T1'] * edf['T2'])
edffwd = edf[['Fwd', 'T1']]['EDU7 Comdty':]
swap['PMT'] = 0.5 * swap['PX_MID']
#%%
#Short - Mid Term
ir_mat = [0.25, 0.5, 0.75, 1, 1.25, 1.5, 2]
ir = np.zeros(7)
ir[0] = 4 * np.log(1 + (libor['PX_MID'] / 100) * 0.25)
dct = np.zeros(7)
dct[0] = np.exp(-ir[0] * edffwd['T1'][0])
for i in range(len(edffwd)):
t1 = edffwd['T1'][i]
t2 = t1 + 0.25
r = ((ir[i] * t1) + edffwd['Fwd'][i] * (t2 - t1)) / t2
d = np.exp(-r * t2)
ir[i+1] = r
dct[i+1] = d
dct[6] = (100 - swap['PMT'][0] * (dct[1] + dct[3] + dct[5])) / (100 + swap['PMT'][0])
ir[6] = -np.log(dct[6]) / 2
#%%
irterm = pd.DataFrame(ir, index = ir_mat, columns = ['IR'])
irterm['DF'] = np.exp(-irterm['IR'] * irterm.index)
#%%
swap_mat = [0.5 * i for i in range(1, 81)]
swapterm = pd.DataFrame(index = swap_mat, columns = ['IR', 'DF'])
for i in range(1, 5):
swapterm['IR'][0.5 * i] = irterm['IR'][0.5 * i]
swapterm['DF'][0.5 * i] = irterm['DF'][0.5 * i]
#%%
#Long Term
def findr(r, startyr, endyr):
c = swap['PMT']['USSWAP%d Curncy' %endyr]
c_sum = (c * swapterm['DF'][:startyr]).sum()
x = np.array([startyr, endyr])
y = np.array([swapterm['IR'][startyr], r])
k = np.arange(startyr + 0.5, endyr + 0.5, 0.5)
f = interp1d(x, y)
df = np.exp(-k * f(k))
result = c_sum + (c * df[:-1]).sum() + (100 + c) * df[-1]
return result - 100
def DfandR(r, startyr, endyr):
freq = (endyr - startyr) * 2
d = np.zeros(freq)
x = np.array([startyr, endyr])
y = np.array([swapterm['IR'][startyr], r0])
f = interp1d(x, y)
k = np.arange(startyr + 0.5, endyr + 0.5, 0.5)
r = f(k)
d = np.exp(-r * k)
return r, d, k
matlist = [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 15, 20, 25, 30, 40]
for i in range(len(matlist) - 1):
startyr = matlist[i]
endyr = matlist[i+1]
r0 = float(sop.root(findr, 0, args = (startyr, endyr)).x)
x = DfandR(r0, startyr, endyr)
swapterm['IR'][x[2]] = x[0]
swapterm['DF'][x[2]] = x[1]
result = irterm.append(swapterm[2.5:])
result.plot(subplots = True, figsize = (12, 5)) |
18,028 | ec8fe838a2c5f559b6c5264c6e2022c4ef345ada | # coding=utf-8
# @Time : 2018/10/22 下午3:24
from sqlalchemy.orm import class_mapper
from application import db
def db_session_commit():
try:
db.session.commit()
except Exception:
print('db_session_commitdb_session_commit')
db.session.rollback()
raise
class CRUDMixin(object):
def __repr__(self):
return "<{}>".format(self.__class__.__name__)
def add(self, commit=False):
db.session.add(self)
commit and db_session_commit()
def create(self, commit=False, **kwargs):
"""
根据parser解析的dict form字段,自动填充相应字段
:param commit: 是否提交,默认为False
:param kwargs: 创建模型关联的字典值
:return self or False: 若commit为False, 则返回false;反之且提交成功,则返回self
"""
for attr, value in kwargs.items():
if value is not None and hasattr(self, attr):
setattr(self, attr, value)
return self.save(commit=commit)
def update(self, commit=False, **kwargs):
"""
更新已有实体的一些提交字段(排除id)
:param commit: 是否提交,默认提交
:param kwargs: 参数值
:return self or False: 若commit为False, 则返回false;反之且提交成功,则返回self
"""
kwargs.pop("id", None)
for attr, value in kwargs.items():
# Flask-RESTful make everything None by default: /
if value is not None and hasattr(self, attr):
setattr(self, attr, value)
return commit and self.save(commit=commit) or self
def save(self, commit=False):
"""
保存对象到数据库中,持久化对象
:param commit: 是否提交,默认提交
:return self or False: 若commit为False, 则返回false;反之且提交成功,则返回self
"""
db.session.add(self)
db.session.flush()
commit and db_session_commit()
return self
def delete(self, commit=False):
"""
删除对象,从数据库中删除记录
:param commit: 是否提交,默认提交
:return self or False: 若commit为False, 则返回false;反之且提交成功,则返回self
"""
db.session.delete(self)
commit and db_session_commit()
return self
@classmethod
def upsert(cls, where, commit=False, **kwargs):
record = cls.query.filter_by(**where).first()
print('record', record, where)
if record:
record.update(commit=commit, **kwargs)
else:
record = cls(**kwargs).save(commit=commit)
return record
def to_json(self):
if hasattr(self, '__table__'):
return {i.name: getattr(self, i.name) for i in self.__table__.columns}
raise AssertionError('<%r> does not have attribute for __table__' % self)
def model_to_dict(obj, visited_children=None, back_relationships=None):
"""
说明:实例模型转化为字典,只转当前对象及直接父对象
----------------------------------------
修改人 修改日期 修改原因
----------------------------------------
Zuyong Du 2018-10-22
----------------------------------------
杜祖永 2018-08-30
----------------------------------------
"""
if visited_children is None:
visited_children = set()
if back_relationships is None:
back_relationships = set()
serialized_data = {c.key: getattr(obj, c.key) for c in obj.__table__.columns}
print(serialized_data)
relationships = class_mapper(obj.__class__).relationships
visitable_relationships = [(name, rel) for name, rel in relationships.items() if name not in back_relationships]
for name, relation in visitable_relationships:
relationship_children = getattr(obj, name)
if relationship_children is not None:
if relation.uselist:
children = []
# for child in [c for c in relationship_children if c not in visited_children]:
# visited_children.add(child)
# children.append(model_to_dict(child, visited_children, back_relationships))
# serialized_data[name] = children
else:
serialized_data[name] = {c.key: getattr(relationship_children, c.key) for c in
relationship_children.__table__.columns}
return serialized_data
|
18,029 | c2ebd8b88e8acba3386a794485e563514c721d23 | """Utils for easy database selection."""
import inspect
import moabb.datasets as db
from moabb.datasets.base import BaseDataset
dataset_list = []
def _init_dataset_list():
for ds in inspect.getmembers(db, inspect.isclass):
if issubclass(ds[1], BaseDataset):
dataset_list.append(ds[1])
def dataset_search( # noqa: C901
paradigm=None,
multi_session=False,
events=None,
has_all_events=False,
interval=None,
min_subjects=1,
channels=(),
):
"""Returns a list of datasets that match a given criteria.
Parameters
----------
paradigm: str | None
'imagery', 'p300', 'ssvep', None
multi_session: bool
if True only returns datasets with more than one session per subject.
If False return all
events: list of strings
events to select
has_all_events: bool
skip datasets that don't have all events in events
interval:
Length of motor imagery interval, in seconds. Only used in imagery
paradigm
min_subjects: int,
minimum subjects in dataset
channels: list of str
list or set of channels
"""
if len(dataset_list) == 0:
_init_dataset_list()
channels = set(channels)
out_data = []
if events is not None and has_all_events:
n_classes = len(events)
else:
n_classes = None
assert paradigm in ["imagery", "p300", "ssvep", None]
for type_d in dataset_list:
d = type_d()
skip_dataset = False
if multi_session and d.n_sessions < 2:
continue
if len(d.subject_list) < min_subjects:
continue
if paradigm is not None and paradigm != d.paradigm:
continue
if interval is not None and d.interval[1] - d.interval[0] < interval:
continue
keep_event_dict = {}
if events is None:
keep_event_dict = d.event_id.copy()
else:
n_events = 0
for e in events:
if n_classes is not None:
if n_events == n_classes:
break
if e in d.event_id.keys():
keep_event_dict[e] = d.event_id[e]
n_events += 1
else:
if has_all_events:
skip_dataset = True
if keep_event_dict and not skip_dataset:
if len(channels) > 0:
s1 = d.get_data([1])[1]
sess1 = s1[list(s1.keys())[0]]
raw = sess1[list(sess1.keys())[0]]
raw.pick_types(eeg=True)
if channels <= set(raw.info["ch_names"]):
out_data.append(d)
else:
out_data.append(d)
return out_data
def find_intersecting_channels(datasets, verbose=False):
"""Given a list of dataset instances return a list of channels shared by
all datasets. Skip datasets which have 0 overlap with the others.
returns: set of common channels, list of datasets with valid channels
"""
allchans = set()
dset_chans = []
keep_datasets = []
for d in datasets:
print("Searching dataset: {:s}".format(type(d).__name__))
s1 = d.get_data([1])[1]
sess1 = s1[list(s1.keys())[0]]
raw = sess1[list(sess1.keys())[0]]
raw.pick_types(eeg=True)
processed = []
for ch in raw.info["ch_names"]:
ch = ch.upper()
if ch.find("EEG") == -1:
# TODO: less hacky way of finding poorly labeled datasets
processed.append(ch)
allchans.update(processed)
if len(processed) > 0:
if verbose:
print("Found EEG channels: {}".format(processed))
dset_chans.append(processed)
keep_datasets.append(d)
else:
print(
"Dataset {:s} has no recognizable EEG channels".format(type(d).__name__)
) # noqa
allchans.intersection_update(*dset_chans)
allchans = [s.replace("Z", "z") for s in allchans]
return allchans, keep_datasets
def _download_all(update_path=True, verbose=None):
"""Download all data.
This function is mainly used to generate the data cache.
"""
# iterate over dataset
for ds in dataset_list:
# call download
ds().download(update_path=True, verbose=verbose, accept=True)
def block_rep(block: int, rep: int):
return f"block_{block}-repetition_{rep}"
def blocks_reps(blocks: list, reps: list):
return [block_rep(b, r) for b in blocks for r in reps]
|
18,030 | 54ac74b8360ab3c45b6f6a67a92aae343706e018 | # Pytane 38 - do czego służą dekoratory @staticmethod i @classmethod?
class Matematyka:
def __init__(self):
self.pi = 3.14
def policz_obwod_okregu(self, r):
return 2 * self.pi * r
@staticmethod # metoda statyczna nie jest swiadoma bycia czescia klasy
def dodaj(a, b): # a wiec nie widzi innych metod tej klasy
return a + b
# @staticmethod
# def dodaj_i_pomnoz(a,b):
# return dodaj(a,b) * 2
@classmethod # metoda klasowa jest swiadoma bycia czescia klasy i widzi inne metody w klasie
def dodaj_i_pomnoz(cls,a,b): # wymaga slowa kluczowego cls (zamiast self) bo operuje na klasie a nie na jej obiekcie/instancji
return cls.dodaj(a,b) * 2
m = Matematyka()
print(m.policz_obwod_okregu(5))
print(Matematyka.dodaj(2,3))
print(Matematyka.dodaj_i_pomnoz(2,3))
print(m.dodaj(2,3))
|
18,031 | 0039b3d219febaae867d4761f1654a8eac5b58cb | # Python combination
from itertools import combinations
def solution(n_buns, n_req):
num_bunnies_per_key = n_buns - n_req + 1
keys_to_distribute = [[] for w in range(n_buns)]
key_sets = list(combinations(range(n_buns), num_bunnies_per_key))
for key, bunnies in enumerate(key_sets):
for bunny in bunnies:
keys_to_distribute[bunny].append(key)
return keys_to_distribute
a = solution(2, 1)
print(a, a == [[0], [0]])
b = solution(4, 4)
print(b, b == [[0], [1], [2], [3]])
c = solution(5, 3)
print(c, c == [
[0,1,2,3,4,5],
[0,1,2,6,7,8],
[0,3,4,6,7,9],
[1,3,5,6,8,9],
[2,4,5,7,8,9]
])
d = solution(3, 2)
print(d, d == [[0,1], [0,2], [1,2]])
e = solution(2, 2)
print(e, e == [[0], [1]])
|
18,032 | 45d5faecbdb69c1e16da9ca01abe0a768b3ef2a0 | # coding=utf-8
from .consts import STATUS_MAPS, UNKNOWN
def generate_status_fields(status_code, message=None):
if status_code in STATUS_MAPS:
if message is None:
message = STATUS_MAPS[status_code]
else:
status_code = UNKNOWN
message = STATUS_MAPS[UNKNOWN]
return {'status': status_code, 'message': message}
|
18,033 | 52f55c2048c55dbdd3b708bfcb4637b9398fb136 | with open('input.txt') as f:
lines = f.read().splitlines()
# lines = ['dabAcCaCBAcCcaDA']
original = lines[0]
upper_case = original.upper()
print('Length of string =', len(original))
positions = []
delete_no = []
iter = 0
while True:
len_start = len(original)
positions.clear()
delete_no.clear()
if len(original) < 2:
break
for num in range(len(original)-1):
if original[num] != original[num + 1] and upper_case[num] == upper_case[num + 1]:
positions.append(num) # Mind that triples are added!
for num_2 in range(len(positions) - 1):
if positions[num_2] == positions[num_2 + 1] - 1:
delete_no.append(num_2)
delete_no.reverse()
for num_3 in delete_no:
del positions[num_3]
positions.reverse()
for x in positions:
if len(original) == 2:
original = []
upper_case = []
elif len(original) - 1 == x:
original = original[:-3]
upper_case = upper_case[:-3]
elif x == 0:
original = original[2:]
upper_case = upper_case[2:]
else:
print('delete number', x)
print('-' * 10)
print(original)
original = original[:x] + original[x + 2:]
upper_case = upper_case[:x] + upper_case[x + 2:]
print('-' * 10)
print(original)
print('END')
iter += 1
print(original)
print('Length of string =', len(original), 'after', iter, 'iterations')
if len_start == len(original):
break # Break if no letters are deleted
print(original)
print(upper_case)
print(len(original))
|
18,034 | 5f90c9b74b10a616dee3fc1ac647f391d33dfe45 | import numpy as np
from qsim.codes import qubit, rydberg
from qsim.codes.quantum_state import State
from qsim import tools
from scipy.linalg import expm
import scipy.sparse as sparse
from scipy.sparse.linalg import expm_multiply
from qsim.graph_algorithms.graph import Graph, enumerate_independent_sets
class HamiltonianDriver(object):
def __init__(self, transition: tuple = (0, 1), energies: tuple = (1,), pauli='X', code=qubit, graph=None,
IS_subspace=False):
"""Default is that the first element in transition is the higher energy s."""
self.transition = transition
self.energies = energies
assert pauli in ['X', 'Y', 'Z']
self.pauli = pauli
self.code = code
self.graph = graph
if self.pauli == 'X' and not self.code.logical_code:
self._operator = np.zeros((self.code.d, self.code.d))
self._operator[self.transition[1], self.transition[0]] = 1
self._operator[self.transition[0], self.transition[1]] = 1
elif self.pauli == 'Y' and not self.code.logical_code:
self._operator = np.zeros((self.code.d, self.code.d))
self._operator[self.transition[1], self.transition[0]] = 1j
self._operator[self.transition[0], self.transition[1]] = -1j
elif self.pauli == 'Z' and not self.code.logical_code:
self._operator = np.zeros((self.code.d, self.code.d))
self._operator[self.transition[0], self.transition[0]] = 1
self._operator[self.transition[1], self.transition[1]] = -1
# If a logical code, we should use the normal qubit operators because we assume the code is a qubit
elif self.pauli == 'X' and self.code.logical_code:
self._operator = self.code.X
elif self.pauli == 'Y' and self.code.logical_code:
self._operator = self.code.Y
elif self.pauli == 'Z' and self.code.logical_code:
self._operator = self.code.Z
self.IS_subspace = IS_subspace
if self.IS_subspace:
# Generate sparse mixing Hamiltonian
assert isinstance(graph, Graph)
if code is not qubit:
IS, num_IS = graph.generate_independent_sets_qudit(self.code)
if self.pauli == 'Z':
self._diagonal_hamiltonian = np.zeros((num_IS, 1))
for k in range(num_IS):
self._diagonal_hamiltonian[k, 0] = np.sum(IS[k, ...] == self.transition[0]) - np.sum(
IS[k, ...] == self.transition[1])
self._csr_hamiltonian = sparse.csr_matrix((self._diagonal_hamiltonian.T[0], (np.arange(num_IS),
np.arange(num_IS))))
self._hamiltonian = self._csr_hamiltonian
else:
# For each IS, look at spin flips generated by the laser
# Over-allocate space
rows = np.zeros(graph.n * num_IS, dtype=int)
columns = np.zeros(graph.n * num_IS, dtype=int)
entries = np.zeros(graph.n * num_IS, dtype=int)
num_terms = 0
for i in range(num_IS):
for j in range(graph.n):
if IS[i, j] == self.transition[0]:
# Flip spin at this location
# Get binary representation
temp = IS[i, ...].copy()
temp[j] = self.transition[1]
where_matched = (np.argwhere(np.sum(np.abs(IS - temp), axis=1) == 0).flatten())
if len(where_matched) > 0:
# This is a valid spin flip by removing a node
rows[num_terms] = where_matched[0]
columns[num_terms] = i
if self.pauli == 'X':
entries[num_terms] = 1
elif self.pauli == 'Y':
# entries[num_terms] = -1j
entries[num_terms] = 1j
num_terms += 1
"""else:
for (i, key) in enumerate(self.graph.independent_sets_dict):
num_neighbors = len(self.graph.independent_sets_dict[key][1])
rows[num_terms:num_terms + num_neighbors] = self.graph.independent_sets_dict[key][1]
columns[num_terms:num_terms + num_neighbors] = np.ones(num_neighbors) * \
self.graph.independent_sets_dict[key][0]
if self.pauli == 'X':
entries[num_terms:num_terms + num_neighbors] = 1
elif self.pauli == 'Y':
entries[num_terms:num_terms + num_neighbors] = -1j
num_terms += num_neighbors"""
# Cut off the excess in the arrays
columns = columns[:2 * num_terms]
rows = rows[:2 * num_terms]
entries = entries[:2 * num_terms]
# Populate the second half of the entries according to self.pauli
if self.pauli == 'X':
columns[num_terms:2 * num_terms] = rows[:num_terms]
rows[num_terms:2 * num_terms] = columns[:num_terms]
entries[num_terms:2 * num_terms] = entries[:num_terms]
elif self.pauli == 'Y':
columns[num_terms:2 * num_terms] = rows[:num_terms]
rows[num_terms:2 * num_terms] = columns[:num_terms]
entries[num_terms:2 * num_terms] = -1 * entries[:num_terms]
# Now, construct the Hamiltonian
self._csr_hamiltonian = sparse.csr_matrix((entries, (rows, columns)), shape=(num_IS, num_IS))
self._hamiltonian = self._csr_hamiltonian
else:
# Use graph generator functions
if self.pauli == 'Z':
independent_sets = enumerate_independent_sets(graph.graph)
# Generate a list of integers corresponding to the independent sets in binary
# All ones
k = self.graph.num_independent_sets - 2
self.mis_size = 0
hamiltonian = np.zeros(self.graph.num_independent_sets, dtype=float)
hamiltonian[-1] = -1 * self.graph.n
for i in independent_sets:
hamiltonian[k] = len(i) - (self.graph.n - len(i))
k -= 1
self._hamiltonian = sparse.csr_matrix(
(hamiltonian,
(np.arange(self.graph.num_independent_sets), np.arange(self.graph.num_independent_sets))),
shape=(self.graph.num_independent_sets, self.graph.num_independent_sets))
else:
independent_sets = enumerate_independent_sets(graph.graph)
# Generate a list of integers corresponding to the independent sets in binary
previous_size = 0
self.mis_size = 0
independent_sets_dict = {(): self.graph.num_independent_sets - 1}
rows = []
columns = []
entries = []
k = self.graph.num_independent_sets - 2
for i in independent_sets:
current_size = len(i)
if current_size - previous_size > 1:
previous_size = current_size - 1
# Clear out the dictionary with terms we can't connect to
for key in list(independent_sets_dict):
if len(key) != previous_size:
independent_sets_dict.pop(key)
independent_sets_dict[tuple(i)] = k
for (j, node) in enumerate(i):
i_removed = i.copy()
i_removed.pop(j)
index = independent_sets_dict[tuple(i_removed)]
# Index is the current independent set with a single node removed
rows.append(k)
columns.append(index)
rows.append(index)
columns.append(k)
if self.pauli == 'Y':
entries.append(-1j)
entries.append(1j)
else:
entries.append(1)
entries.append(1)
k -= 1
# Now, construct the Hamiltonian
self._csr_hamiltonian = sparse.csr_matrix((entries, (rows, columns)),
shape=(self.graph.num_independent_sets,
self.graph.num_independent_sets))
self._hamiltonian = self._csr_hamiltonian
else:
self._hamiltonian = None
self._left_acting_hamiltonian = None
self._right_acting_hamiltonian = None
@property
def hamiltonian(self):
if self._hamiltonian is None:
assert not self.IS_subspace
try:
assert self.graph is not None
except AssertionError:
print('self.graph must be not None to generate the Hamiltonian property.')
self._hamiltonian = sparse.csr_matrix(((self.code.d * self.code.n) ** self.graph.n,
(self.code.d * self.code.n) ** self.graph.n))
for i in range(self.graph.n):
self._hamiltonian = self._hamiltonian + tools.tensor_product(
[sparse.identity((self.code.d * self.code.n) ** i),
self._operator,
sparse.identity((self.code.d * self.code.n) ** (self.graph.n - i - 1))],
sparse=True)
return self.energies[0] * self._hamiltonian
@property
def evolution_operator(self, vector_space='hilbert'):
if vector_space != 'hilbert' and vector_space != 'liouville':
raise Exception('Attribute vector_space must be hilbert or liouville')
if vector_space == 'liouville':
if self._left_acting_hamiltonian is None:
self._left_acting_hamiltonian = sparse.kron(sparse.identity(self._hamiltonian.shape[0]),
self._hamiltonian)
self._right_acting_hamiltonian = sparse.kron(self._hamiltonian.T,
sparse.identity(self._hamiltonian.shape[0]))
return -1j * self.energies[0] * self._left_acting_hamiltonian + 1j * self.energies[0] * \
self._right_acting_hamiltonian
else:
return -1j * self.hamiltonian
def left_multiply(self, state: State):
if not self.IS_subspace:
temp = np.zeros_like(state)
# For each logical qubit
state_shape = state.shape
for i in range(state.number_logical_qudits):
if self.code.logical_code:
if self.pauli == 'X':
temp = temp + self.code.left_multiply(state, [i], ['X'])
elif self.pauli == 'Y':
temp = temp + self.code.left_multiply(state, [i], ['Y'])
elif self.pauli == 'Z':
temp = temp + self.code.left_multiply(state, [i], ['Z'])
elif not self.code.logical_code:
ind = self.code.d ** i
out = np.zeros_like(state, dtype=np.complex128)
if state.is_ket:
state = state.reshape((-1, self.code.d, ind), order='F')
# Note index start from the right (sN,...,s3,s2,s1)
out = out.reshape((-1, self.code.d, ind), order='F')
if self.pauli == 'X': # Sigma_X
# We want to exchange two indices
out[:, [self.transition[0], self.transition[1]], :] = \
state[:, [self.transition[1], self.transition[0]], :]
elif self.pauli == 'Y': # Sigma_Y
out[:, [self.transition[0], self.transition[1]], :] = \
state[:, [self.transition[1], self.transition[0]], :]
out[:, self.transition[0], :] = -1j * out[:, self.transition[0], :]
out[:, self.transition[1], :] = 1j * out[:, self.transition[1], :]
elif self.pauli == 'Z': # Sigma_Z
out[:, [self.transition[0], self.transition[1]], :] = \
state[:, [self.transition[0], self.transition[1]], :]
out[:, self.transition[1], :] = -1 * out[:, self.transition[1], :]
state = state.reshape(state_shape, order='F')
out = out.reshape(state_shape, order='F')
else:
out = out.reshape((-1, self.code.d, self.code.d ** (state.number_physical_qudits - 1),
self.code.d, ind), order='F')
state = state.reshape((-1, self.code.d, self.code.d ** (state.number_physical_qudits - 1),
self.code.d, ind), order='F')
if self.pauli == 'X': # Sigma_X
out[:, [self.transition[0], self.transition[1]], :, :, :] = \
state[:, [self.transition[1], self.transition[0]], :, :, :]
elif self.pauli == 'Y': # Sigma_Y
out[:, [self.transition[0], self.transition[1]], :, :, :] = \
state[:, [self.transition[1], self.transition[0]], :, :, :]
out[:, self.transition[0], :, :, :] = -1j * out[:, self.transition[0], :, :, :]
out[:, self.transition[1], :, :, :] = 1j * out[:, self.transition[1], :, :, :]
elif self.pauli == 'Z': # Sigma_Z
out[:, [self.transition[0], self.transition[1]], :, :, :] = \
state[:, [self.transition[0], self.transition[1]], :, :, :]
out[:, self.transition[1], :, :, :] = -1 * out[:, self.transition[1], :, :, :]
state = state.reshape(state_shape, order='F')
out = out.reshape(state_shape, order='F')
temp = temp + out
return State(self.energies[0] * temp, is_ket=state.is_ket, IS_subspace=state.IS_subspace, code=state.code,
graph=self.graph)
else:
# Handle dimensions
if self.pauli == 'Z' and not self.code.logical_code: # In this case, the Hamiltonian is diagonal
return State(self.energies[0] * self._diagonal_hamiltonian * state, is_ket=state.is_ket,
IS_subspace=state.IS_subspace, code=state.code, graph=self.graph)
else:
return State(self.energies[0] * self._csr_hamiltonian @ state, is_ket=state.is_ket,
IS_subspace=state.IS_subspace, code=state.code, graph=self.graph)
def right_multiply(self, state: State):
if state.is_ket:
print('Warning: right multiply functionality currently applies the operator and daggers the state.')
return self.left_multiply(state).conj().T
if not self.IS_subspace:
temp = np.zeros_like(state)
# For each physical qubit
state_shape = state.shape
for i in range(state.number_logical_qudits):
if self.code.logical_code:
if self.pauli == 'X':
temp = temp + self.code.right_multiply(state, [i], ['X'])
elif self.pauli == 'Y':
temp = temp + self.code.right_multiply(state, [i], ['Y'])
elif self.pauli == 'Z':
temp = temp + self.code.right_multiply(state, [i], ['Z'])
elif not self.code.logical_code:
ind = self.code.d ** i
out = np.zeros_like(state)
out = out.reshape(
(-1, self.code.d, self.code.d ** (state.number_physical_qudits - 1), self.code.d, ind),
order='F')
state = state.reshape(
(-1, self.code.d, self.code.d ** (state.number_physical_qudits - 1), self.code.d, ind),
order='F')
if self.pauli == 'X' and not self.code.logical_code: # Sigma_X
out[:, :, :, [self.transition[0], self.transition[1]], :] = state[:, :, :, [self.transition[1],
self.transition[0]],
:]
elif self.pauli == 'Y' and not self.code.logical_code: # Sigma_Y
out[:, :, :, [self.transition[0], self.transition[1]], :] = state[:, :, :, [self.transition[1],
self.transition[0]],
:]
out[:, :, :, self.transition[0], :] = -1j * out[:, :, :, self.transition[0], :]
out[:, :, :, self.transition[1], :] = 1j * out[:, :, :, self.transition[1], :]
elif self.pauli == 'Z' and not self.code.logical_code: # Sigma_Z
out[:, :, :, [self.transition[0], self.transition[1]], :] = state[:, :, :, [self.transition[0],
self.transition[1]],
:]
out[:, :, :, self.transition[1], :] = -1 * state[:, :, :, self.transition[1], :]
state = state.reshape(state_shape, order='F')
state = state.reshape(state_shape, order='F')
out = out.reshape(state_shape, order='F')
temp = temp + out
return State(self.energies[0] * temp, is_ket=state.is_ket, IS_subspace=state.IS_subspace, code=state.code,
graph=self.graph)
else:
if self.pauli == 'Z' and not self.code.logical_code: # In this case, the Hamiltonian is diagonal
return State(state * self.hamiltonian.T, is_ket=state.is_ket, IS_subspace=state.IS_subspace,
code=state.code, graph=self.graph)
else:
return State(state @ self.hamiltonian.T.conj(), is_ket=state.is_ket, IS_subspace=state.IS_subspace,
code=state.code, graph=self.graph)
def evolve(self, state: State, time):
r"""
Use reshape to efficiently implement evolution under :math:`H_B=\\sum_i X_i`
"""
if not self.IS_subspace:
# We don't want to modify the original s
out = state.copy()
for i in range(state.number_logical_qudits):
# Note that self._operator is not necessarily involutary
if self.pauli == 'X':
out = self.code.rotation(out, [i], self.energies[0] * time, self._operator)
elif self.pauli == 'Y':
out = self.code.rotation(out, [i], self.energies[0] * time, self._operator)
elif self.pauli == 'Z':
out = self.code.rotation(out, [i], self.energies[0] * time, self._operator)
return out
else:
if state.is_ket:
# Handle dimensions
if self.hamiltonian.shape[1] == 1:
return State(np.exp(-1j * time * self.hamiltonian) * state, is_ket=state.is_ket,
IS_subspace=state.IS_subspace, code=state.code, graph=self.graph)
else:
return State(expm_multiply(-1j * time * self.hamiltonian, state),
is_ket=state.is_ket, IS_subspace=state.IS_subspace, code=state.code, graph=self.graph)
else:
if self.hamiltonian.shape[1] == 1:
exp_hamiltonian = np.exp(-1j * time * self.hamiltonian)
return State(exp_hamiltonian * state * exp_hamiltonian.conj().T,
is_ket=state.is_ket, IS_subspace=state.IS_subspace, code=state.code, graph=self.graph)
else:
exp_hamiltonian = expm(-1j * time * self.hamiltonian)
return State(exp_hamiltonian @ state @ exp_hamiltonian.conj().T,
is_ket=state.is_ket, IS_subspace=state.IS_subspace, code=state.code, graph=self.graph)
class HamiltonianMaxCut(object):
def __init__(self, graph: Graph, code=qubit, energies=(1,), cost_function=True, use_Z2_symmetry=False):
# If MIS is true, create an MIS Hamiltonian. Otherwise, make a MaxCut Hamiltonian
r"""
Generate a vector corresponding to the diagonal of the MaxCut Hamiltonian.
"""
self.code = code
self.energies = energies
# Make sure all edges have weight attribute; default to 1
self.graph = graph
self.optimization = 'max'
self.n = self.graph.n
if use_Z2_symmetry:
c = np.zeros([self.code.d ** (self.code.n * (self.n - 1)), 1])
else:
c = np.zeros([self.code.d ** (self.code.n * self.n), 1])
if tools.is_diagonal(self.code.Z):
self._is_diagonal = True
z = np.expand_dims(np.diagonal(self.code.Z), axis=0).T
def my_eye(n):
return np.ones((np.asarray(self.code.d ** self.code.n) ** n, 1))
else:
self._is_diagonal = False
# Compute the optimum first. We don't care that this takes extra time, since it only needs to run once
z = np.expand_dims(np.diagonal(qubit.Z), axis=0).T
def my_eye(n):
return np.ones((np.asarray(qubit.d) ** n, 1))
for a, b in self.graph.edges:
if b < a:
a, b = b, a
if use_Z2_symmetry:
if cost_function:
if a == min(self.graph.nodes):
c = c - 1 / 2 * graph.graph[a][b]['weight'] * (tools.tensor_product(
[my_eye(b - 1), z, my_eye(self.n - b - 1)]) - my_eye(self.n - 1))
else:
c = c - 1 / 2 * graph.graph[a][b]['weight'] * (tools.tensor_product(
[my_eye(a - 1), z, my_eye(b - a - 1), z, my_eye(self.n - b - 1)]) - my_eye(self.n - 1))
else:
if a == min(self.graph.nodes):
c = c + graph.graph[a][b]['weight'] * (tools.tensor_product(
[my_eye(b - 1), z, my_eye(self.n - b - 1)]))
else:
c = c + graph.graph[a][b]['weight'] * (tools.tensor_product(
[my_eye(a - 1), z, my_eye(b - a - 1), z, my_eye(self.n - b - 1)]))
else:
if cost_function:
c = c - 1 / 2 * graph.graph[a][b]['weight'] * (tools.tensor_product(
[my_eye(a), z, my_eye(b - a - 1), z, my_eye(self.n - b - 1)]) - my_eye(self.n))
else:
c = c + graph.graph[a][b]['weight'] * tools.tensor_product(
[my_eye(a), z, my_eye(b - a - 1), z, my_eye(self.n - b - 1)])
self._optimum = np.max(c).real
c = sparse.csr_matrix((self.code.d ** (self.code.n * self.n), self.code.d ** (self.code.n * self.n)))
z = sparse.csr_matrix(self.code.Z)
def my_eye(n):
return sparse.csr_matrix(np.ones(np.asarray(z.shape[0]) ** n),
(np.asarray(z.shape[0]) ** n, np.asarray(z.shape[0]) ** n))
for a, b in self.graph.edges:
if b < a:
a, b = b, a
if cost_function:
if use_Z2_symmetry:
if a == min(self.graph.nodes):
c = c - 1 / 2 * graph.graph[a][b]['weight'] * (tools.tensor_product(
[my_eye(b - 1), z, my_eye(self.n - b - 1)]) - my_eye(self.n - 1))
else:
c = c - 1 / 2 * graph.graph[a][b]['weight'] * (tools.tensor_product(
[my_eye(a - 1), z, my_eye(b - a - 1), z, my_eye(self.n - b - 1)]) - my_eye(self.n - 1))
else:
c = c - 1 / 2 * graph.graph[a][b]['weight'] * (tools.tensor_product(
[my_eye(a), z, my_eye(b - a - 1), z, my_eye(self.n - b - 1)],
sparse=(not self._is_diagonal)) - my_eye(
self.n))
else:
if use_Z2_symmetry:
if a == min(self.graph.nodes):
c = c + graph.graph[a][b]['weight'] * (tools.tensor_product(
[my_eye(b - 1), z, my_eye(self.n - b - 1)]))
else:
c = c + graph.graph[a][b]['weight'] * (tools.tensor_product(
[my_eye(a - 1), z, my_eye(b - a - 1), z, my_eye(self.n - b - 1)]))
else:
c = c + graph.graph[a][b]['weight'] * (tools.tensor_product(
[my_eye(a), z, my_eye(b - a - 1), z, my_eye(self.n - b - 1)],
sparse=(not self._is_diagonal)))
if self._is_diagonal:
self._diagonal_hamiltonian = c
self._optimum = np.max(c).real
if use_Z2_symmetry:
c = sparse.csr_matrix((c.flatten(), (np.arange(self.code.d ** (self.code.n * (self.n - 1))),
np.arange(self.code.d ** (self.code.n * (self.n - 1))))),
shape=(self.code.d ** (self.code.n * (self.n - 1)),
self.code.d ** (self.code.n * (self.n - 1))))
else:
c = sparse.csr_matrix((c.flatten(), (np.arange(self.code.d ** (self.code.n * self.n)),
np.arange(self.code.d ** (self.code.n * self.n)))),
shape=(self.code.d ** (self.code.n * self.n),
self.code.d ** (self.code.n * self.n)))
else:
# c is already the right shape, just convert it to a csr matrix
c = sparse.csr_matrix(c)
self._hamiltonian = c
self._left_acting_hamiltonian = None
self._right_acting_hamiltonian = None
@property
def hamiltonian(self):
return self.energies[0] * self._hamiltonian
@property
def evolution_operator(self, vector_space='hilbert'):
if vector_space != 'hilbert' and vector_space != 'liouville':
raise Exception('Attribute vector_space must be hilbert or liouville')
if vector_space == 'liouville':
if self._left_acting_hamiltonian is None:
self._left_acting_hamiltonian = sparse.kron(sparse.identity(self._hamiltonian.shape[0]),
self._hamiltonian)
self._right_acting_hamiltonian = sparse.kron(self._hamiltonian.T,
sparse.identity(self._hamiltonian.shape[0]))
return -1j * self.energies[0] * self._left_acting_hamiltonian + 1j * self.energies[0] * \
self._right_acting_hamiltonian
else:
return -1j * self.hamiltonian
@property
def optimum(self):
# Optimum for non-diagonal Hamiltonians can be found by computing the optimum in the standard basis,
# which is done in self.__init__()
return self.energies[0] * self._optimum
def evolve(self, state: State, time):
if state.is_ket:
if self._is_diagonal:
# It's quicker to exponentiate a diagonal array than use expm_multiply
return State(np.exp(-1j * time * self._diagonal_hamiltonian) * state, is_ket=state.is_ket,
IS_subspace=state.IS_subspace, code=state.code, graph=self.graph)
else:
return State(expm_multiply(-1j * time * self.hamiltonian, state), is_ket=state.is_ket,
IS_subspace=state.IS_subspace, code=state.code, graph=self.graph)
else:
if self._is_diagonal:
return State(np.exp(-1j * time * self._diagonal_hamiltonian) * state * np.exp(
1j * time * self._diagonal_hamiltonian).T, is_ket=state.is_ket, IS_subspace=state.IS_subspace,
code=state.code, graph=self.graph)
else:
temp = expm(-1j * time * self.hamiltonian)
return State(temp @ state @ temp.conj().T, is_ket=state.is_ket, IS_subspace=state.IS_subspace,
code=state.code, graph=self.graph)
def left_multiply(self, state: State):
if self._is_diagonal:
return State(self._diagonal_hamiltonian * state, is_ket=state.is_ket, IS_subspace=state.IS_subspace,
code=state.code, graph=self.graph)
else:
return State(self.hamiltonian @ state, is_ket=state.is_ket, IS_subspace=state.IS_subspace,
code=state.code, graph=self.graph)
def right_multiply(self, state: State):
# Already real, so you don't need to conjugate
if state.is_ket:
if self._is_diagonal:
return State(state.conj().T * self._diagonal_hamiltonian.T, is_ket=state.is_ket,
IS_subspace=state.IS_subspace, code=state.code, graph=self.graph)
else:
return State((state @ self.hamiltonian.T).conj().T, is_ket=state.is_ket,
IS_subspace=state.IS_subspace, code=state.code, graph=self.graph)
else:
if self._is_diagonal:
# Density matrices are already Hermitian, so you don't need to dagger
return State(state * self._diagonal_hamiltonian.T, is_ket=state.is_ket,
IS_subspace=state.IS_subspace, code=state.code, graph=self.graph)
else:
return State(state @ self.hamiltonian.T, is_ket=state.is_ket,
IS_subspace=state.IS_subspace, code=state.code, graph=self.graph)
def cost_function(self, state: State):
# Need to project into the IS subspace
# Returns <s|C|s>
if state.is_ket:
if self._is_diagonal:
return np.real(np.vdot(state, self._diagonal_hamiltonian * state))
else:
return np.real(np.vdot(state, self.hamiltonian @ state))
else:
# Density matrix
if self._is_diagonal:
return np.real(np.squeeze(tools.trace(self._diagonal_hamiltonian * state)))
else:
return np.real(np.squeeze(tools.trace(self.hamiltonian @ state)))
def optimum_overlap(self, state: State):
# Returns \sum_i <s|opt_i><opt_i|s>
if self._is_diagonal:
optimum_indices = np.argwhere(self._diagonal_hamiltonian == self.optimum).T[0]
# Construct an operator that is zero everywhere except at the optimum
optimum = np.zeros(self._diagonal_hamiltonian.shape)
optimum[optimum_indices] = 1
else:
# The plan for this is to basically use the code for _is_diagonal to identify the logical qubit subspaces
# which encode the optimum. Then, make an operator that's the identity in those subspaces
raise NotImplementedError('Optimum overlap not implemented for non-diagonal Hamiltonians')
if state.is_ket:
return np.real(np.vdot(state, optimum * state))
else:
# Density matrix
return np.real(np.squeeze(tools.trace(optimum * state)))
def approximation_ratio(self, state: State):
# Returns <s|C|s>/optimum
return self.cost_function(state) / self.optimum
class HamiltonianMIS(object):
def __init__(self, graph: Graph, energies=(1, 1), code=qubit, IS_subspace=False):
r"""
Generate a vector corresponding to the diagonal of the MIS Hamiltonian.
"""
if energies == (1, 1) and IS_subspace:
energies = (1,)
self.code = code
self.graph = graph
self.n = self.graph.n
self.energies = energies
self.IS_subspace = IS_subspace
self.optimization = 'max'
if not self.IS_subspace:
# Store node and edge terms separately so the Hamiltonian can be dynamically updated when energies
# are changed
if tools.is_diagonal(self.code.Q):
self._hamiltonian_edge_terms = np.zeros([1, (self.code.d ** self.code.n) ** self.n])
self._hamiltonian_node_terms = np.zeros([1, (self.code.d ** self.code.n) ** self.n])
self._is_diagonal = True
Q = np.expand_dims(np.diagonal(self.code.Q), axis=0)
def my_eye(n):
return np.ones(np.asarray(self.code.d ** self.code.n) ** n)
else:
# TODO: generate a sparse matrix instead
self._hamiltonian_edge_terms = np.zeros([(self.code.d ** self.code.n) ** self.n,
(self.code.d ** self.code.n) ** self.n])
self._hamiltonian_node_terms = np.zeros([(self.code.d ** self.code.n) ** self.n,
(self.code.d ** self.code.n) ** self.n])
Q = np.expand_dims(np.diagonal(qubit.Q), axis=0)
def my_eye(n):
return np.ones(np.asarray(qubit.d ** qubit.n) ** n)
self._optimum_edge_terms = np.zeros([(qubit.d ** qubit.n) ** self.n,
(qubit.d ** qubit.n) ** self.n])
self._optimum_node_terms = np.zeros([(qubit.d ** qubit.n) ** self.n,
(qubit.d ** qubit.n) ** self.n])
for i, j in graph.graph.edges:
if j < i:
i, j = j, i
self._optimum_edge_terms = self._optimum_edge_terms + graph.graph.edges[(i, j)]['weight'] * \
tools.tensor_product(
[my_eye(i), Q, my_eye(j - i - 1), Q, my_eye(self.n - j - 1)])
for i in graph.graph.nodes:
self._optimum_node_terms = self._optimum_node_terms + graph.graph.nodes[i]['weight'] * \
tools.tensor_product([my_eye(i), Q, my_eye(self.n - i - 1)])
self._is_diagonal = False
Q = self.code.Q
def my_eye(n):
return np.identity(np.asarray(self.code.d ** self.code.n) ** n)
for i, j in graph.graph.edges:
if j < i:
i, j = j, i
self._hamiltonian_edge_terms = self._hamiltonian_edge_terms + graph.graph.edges[(i, j)]['weight'] * \
tools.tensor_product(
[my_eye(i), Q, my_eye(j - i - 1), Q, my_eye(self.n - j - 1)])
for i in graph.graph.nodes:
self._hamiltonian_node_terms = self._hamiltonian_node_terms + graph.graph.nodes[i]['weight'] * \
tools.tensor_product([my_eye(i), Q, my_eye(self.n - i - 1)])
self._hamiltonian_node_terms = self._hamiltonian_node_terms.T
self._hamiltonian_edge_terms = self._hamiltonian_edge_terms.T
if self._is_diagonal:
self._optimum_edge_terms = self._hamiltonian_edge_terms
self._optimum_node_terms = self._hamiltonian_node_terms
self._diagonal_hamiltonian_edge_terms = self._hamiltonian_edge_terms.copy()
self._diagonal_hamiltonian_node_terms = self._hamiltonian_node_terms.copy()
self._hamiltonian_node_terms = sparse.csr_matrix(
(self._hamiltonian_node_terms.flatten(), (np.arange(self.code.d ** (self.code.n * self.n)),
np.arange(self.code.d ** (self.code.n * self.n)))),
shape=(self.code.d ** (self.code.n * self.n),
self.code.d ** (self.code.n * self.n)))
self._hamiltonian_edge_terms = sparse.csr_matrix(
(self._hamiltonian_edge_terms.flatten(), (np.arange(self.code.d ** (self.code.n * self.n)),
np.arange(self.code.d ** (self.code.n * self.n)))),
shape=(self.code.d ** (self.code.n * self.n),
self.code.d ** (self.code.n * self.n)))
# TODO: what happens to _optimum_node_terms if not _is_diagonal
else:
self._diagonal_hamiltonian_edge_terms = self._hamiltonian_edge_terms.copy()
self._diagonal_hamiltonian_node_terms = self._hamiltonian_node_terms.copy()
self._hamiltonian_edge_terms = sparse.csr_matrix(self._hamiltonian_edge_terms)
self._hamiltonian_node_terms = sparse.csr_matrix(self._hamiltonian_node_terms)
self._left_acting_hamiltonian_edge_terms = None
self._right_acting_hamiltonian_edge_terms = None
else:
self._is_diagonal = True
if not (self.code == qubit or self.code == rydberg):
raise NotImplementedError("IS subspace only implemented for qubit and Rydberg codes.")
# Don't generate anything that depends on the entire Hilbert space as to save space
# These are your independent sets of the original graphs, ordered by node and size
if self.code == qubit:
node_weights = np.asarray([self.graph.graph.nodes[i]['weight'] for i in range(self.graph.n)])
independent_sets = enumerate_independent_sets(self.graph.graph)
# Generate a list of integers corresponding to the independent sets in binary
# All ones
k = self.graph.num_independent_sets - 2
self.mis_size = 0
C = np.zeros(self.graph.num_independent_sets, dtype=float)
C[-1] = 0
for i in independent_sets:
C[k] = np.sum([node_weights[j] for j in i])
k -= 1
self._hamiltonian = sparse.csr_matrix((C, (np.arange(self.graph.num_independent_sets),
np.arange(self.graph.num_independent_sets))))
C = np.expand_dims(C, axis=0).T
# Otherwise, we need to include the possibility that we are in one of many ground space states
elif self.code == rydberg:
# TODO: fix this to reflect the new way of notating independent sets!
# Count the number of elements in the ground space and map to their representation in ternary
# Determine how large to make the array
independent_sets, num_IS = self.graph.generate_independent_sets_qudit(self.code)
# Generate Hamiltonian from independent sets
node_weights = np.asarray([self.graph.graph.nodes[i]['weight'] for i in range(self.graph.n)])
C = np.zeros((num_IS, 1), dtype=np.complex128)
for k in independent_sets:
C[k, 0] = np.sum((independent_sets[k][2] == 0) * node_weights)
self._diagonal_hamiltonian_node_terms = C
C = C.flatten()
self._hamiltonian_node_terms = sparse.csr_matrix((
C, (np.arange(len(C)), np.arange(len(C)))), shape=(len(C), len(C)))
self._left_acting_hamiltonian_node_terms = None
self._right_acting_hamiltonian_node_terms = None
@property
def hamiltonian(self):
if not self.IS_subspace:
return self.energies[0] * self._hamiltonian_node_terms - self.energies[1] * self._hamiltonian_edge_terms
else:
return self.energies[0] * self._hamiltonian_node_terms
@property
def evolution_operator(self, vector_space='hilbert'):
if vector_space != 'hilbert' and vector_space != 'liouville':
raise Exception('Attribute vector_space must be hilbert or liouville')
if vector_space == 'liouville':
if self._left_acting_hamiltonian_node_terms is None and not self.IS_subspace:
self._left_acting_hamiltonian_edge_terms = sparse.kron(sparse.identity(
self._hamiltonian_node_terms.shape[0]), self._hamiltonian_edge_terms)
self._right_acting_hamiltonian_edge_terms = sparse.kron(
self._hamiltonian_edge_terms.T, sparse.identity(self._hamiltonian_edge_terms.shape[0]))
self._left_acting_hamiltonian_node_terms = sparse.kron(sparse.identity(
self._hamiltonian_node_terms.shape[0]), self._hamiltonian_node_terms)
self._right_acting_hamiltonian_node_terms = sparse.kron(
self._hamiltonian_node_terms.T, sparse.identity(self._hamiltonian_node_terms.shape[0]))
elif self._left_acting_hamiltonian_node_terms is None and self.IS_subspace:
self._left_acting_hamiltonian_node_terms = sparse.kron(sparse.identity(
self._hamiltonian_node_terms.shape[0]), self._hamiltonian_node_terms)
self._right_acting_hamiltonian_node_terms = sparse.kron(
self._hamiltonian_node_terms.T, sparse.identity(self._hamiltonian_node_terms.shape[0]))
if not self.IS_subspace:
return -1j * (self.energies[0] * self._left_acting_hamiltonian_node_terms - self.energies[1] *
self._left_acting_hamiltonian_edge_terms) + 1j * \
(self.energies[0] * self._right_acting_hamiltonian_node_terms - self.energies[1] *
self._right_acting_hamiltonian_edge_terms)
elif self.IS_subspace:
return -1j * (self.energies[0] * self._left_acting_hamiltonian_node_terms) + 1j * \
(self.energies[0] * self._right_acting_hamiltonian_node_terms)
else:
return -1j * self.hamiltonian
@property
def _diagonal_hamiltonian(self):
if not self.IS_subspace:
return self.energies[0] * self._diagonal_hamiltonian_node_terms - self.energies[
1] * self._diagonal_hamiltonian_edge_terms
else:
return self.energies[0] * self._diagonal_hamiltonian_node_terms
@property
def optimum(self):
# This needs to be recomputed because the optimum depends on the energies
# TODO: figure out what to compute if not _is_diagonal
if self._is_diagonal:
return np.max(self._diagonal_hamiltonian).real
else:
raise NotImplementedError('Optimum unknown for non-diagonal Hamiltonians')
def evolve(self, state: State, time):
if state.is_ket:
if self._is_diagonal:
return State(np.exp(-1j * time * self._diagonal_hamiltonian) * state, is_ket=state.is_ket,
IS_subspace=state.IS_subspace, code=state.code, graph=self.graph)
else:
return State(expm_multiply(-1j * time * self.hamiltonian, state), is_ket=state.is_ket,
IS_subspace=state.IS_subspace, code=state.code, graph=self.graph)
else:
if self._is_diagonal:
return State(np.exp(-1j * time * self._diagonal_hamiltonian) * state * np.exp(
1j * time * self._diagonal_hamiltonian).T, is_ket=state.is_ket, IS_subspace=state.IS_subspace,
code=state.code, graph=self.graph)
else:
temp = expm(-1j * time * self.hamiltonian)
return State(temp @ state @ temp.conj().T, is_ket=state.is_ket, IS_subspace=state.IS_subspace,
code=state.code, graph=self.graph)
def left_multiply(self, state: State):
if self._is_diagonal:
return State(self._diagonal_hamiltonian * state, is_ket=state.is_ket, IS_subspace=state.IS_subspace,
code=state.code, graph=self.graph)
else:
return State(self.hamiltonian @ state, is_ket=state.is_ket, IS_subspace=state.IS_subspace,
code=state.code, graph=self.graph)
def right_multiply(self, state: State):
# Already real, so you don't need to conjugate
if state.is_ket:
if self._is_diagonal:
return State(state.conj().T * self._diagonal_hamiltonian.T, is_ket=state.is_ket,
IS_subspace=state.IS_subspace, code=state.code, graph=self.graph)
else:
return State((state @ self.hamiltonian.T).conj().T, is_ket=state.is_ket,
IS_subspace=state.IS_subspace, code=state.code, graph=self.graph)
else:
if self._is_diagonal:
# Density matrices are already Hermitian, so you don't need to dagger
return State(state * self._diagonal_hamiltonian.T, is_ket=state.is_ket,
IS_subspace=state.IS_subspace, code=state.code, graph=self.graph)
else:
return State(state @ self.hamiltonian.T, is_ket=state.is_ket,
IS_subspace=state.IS_subspace, code=state.code, graph=self.graph)
def cost_function(self, state: State):
# Returns <s|C|s>
if state.is_ket:
if self._is_diagonal:
return np.real(np.vdot(state, self._diagonal_hamiltonian * state))
else:
return np.real(np.vdot(state, self.hamiltonian @ state))
else:
# Density matrix
if self._is_diagonal:
return np.real(np.squeeze(tools.trace(self._diagonal_hamiltonian * state)))
else:
return np.real(np.squeeze(tools.trace(self.hamiltonian @ state)))
def optimum_overlap(self, state: State):
# Returns \sum_i <s|opt_i><opt_i|s>
if self._is_diagonal:
optimum_indices = np.argwhere(self._diagonal_hamiltonian == self.optimum).T[0]
# Construct an operator that is zero everywhere except at the optimum
optimum = np.zeros(self._diagonal_hamiltonian.shape)
optimum[optimum_indices] = 1
else:
raise NotImplementedError('Optimum overlap not implemented for non-diagonal Hamiltonians')
if state.is_ket:
if self._is_diagonal:
return np.real(np.vdot(state, optimum * state))
else:
return np.real(np.vdot(state, self.hamiltonian @ state))
else:
# Density matrix
if self._is_diagonal:
return np.real(np.squeeze(tools.trace(optimum * state)))
else:
return np.real(np.squeeze(tools.trace(self.hamiltonian @ state)))
def approximation_ratio(self, state: State):
# Returns <s|C|s>/optimum
return self.cost_function(state) / self.optimum
class HamiltonianGlobalPauli(object):
def __init__(self, pauli: str = 'X', code=qubit):
self.code = code
self.pauli = pauli
if self.pauli == 'X':
self._operator = self.code.X
elif self.pauli == 'Y':
self._operator = self.code.Y
elif self.pauli == 'Z':
self._operator = self.code.Z
self.hamiltonian = None
def evolve(self, state: State, alpha):
if self.hamiltonian is None:
"""Initialize the Hamiltonian only once, as it is costly."""
# TODO: make this a sparse matrix!
self.hamiltonian = tools.tensor_product([self._operator] * state.number_logical_qudits)
return self.code.multiply(np.cos(alpha) * np.identity(state.dimension) - 1j * np.sin(alpha) * self.hamiltonian)
def left_multiply(self, state: State):
all_qubits = list(range(state.number_logical_qudits))
return self.code.left_multiply(state, all_qubits, [self.pauli] * state.number_logical_qudits)
def right_multiply(self, state: State):
all_qubits = list(range(state.number_logical_qudits))
return self.code.right_multiply(state, all_qubits, [self.pauli] * state.number_logical_qudits)
class HamiltonianBookatzPenalty(object):
def __init__(self, code=qubit, energies=(1,)):
self.code = code
self.projector = np.identity(self.code.d ** self.code.n) - self.code.code_space_projector
self.energies = energies
def evolve(self, state: State, time):
# Term for a single qubit
for i in range(state.number_logical_qudits):
state = self.code.rotation(state, [i], self.energies[0] * time, self.projector, is_idempotent=True)
return state
def left_multiply(self, state: State):
out = np.zeros_like(state, dtype=np.complex128)
for i in range(state.number_logical_qudits):
out = out + self.code.left_multiply(state, [i], self.projector)
return State(self.energies[0] * out, is_ket=state.is_ket, IS_subspace=state.IS_subspace, code=state.code,
graph=self.graph)
def right_multiply(self, state: State):
out = np.zeros_like(state, dtype=np.complex128)
for i in range(state.number_logical_qudits):
out = out + self.code.right_multiply(state, [i], self.projector)
return State(self.energies[0] * out, is_ket=state.is_ket, IS_subspace=state.IS_subspace, code=state.code,
graph=self.graph)
class HamiltonianMarvianPenalty(object):
def __init__(self, Nx, Ny):
super().__init__()
self.Nx = Nx
self.Ny = Ny
self.n = 3 * Nx * Ny
# Generate Hamiltonian
# Two by two geometry (can be generalized in the future)
hp = np.zeros([2 ** self.n, 2 ** self.n])
for i in range(int(self.Nx * self.Ny)):
# Add gauge interactions within a single logical qubit
hp = hp + tools.tensor_product(
[tools.identity(i * 3), tools.Z(), tools.Z(),
tools.identity(self.n - i * 3 - 2)]) + tools.tensor_product(
[tools.identity(i * 3 + 1), tools.X(), tools.X(), tools.identity(self.n - i * 3 - 3)])
# Between rows
for j in range(self.Ny):
# j is the number of rows
for k in range(self.Nx):
# k is the number of columns
# Need to deal with edge effects
# Add gauge interactions within a single logical qubit
if k != self.Nx - 1:
# Along the same row
hp = hp + tools.tensor_product(
[tools.identity(j * self.Nx * 3 + k * 3), tools.X(), tools.identity(2), tools.X(),
tools.identity(self.n - (j * self.Nx * 3 + k * 3) - 4)]) + \
tools.tensor_product(
[tools.identity(j * self.Nx * 3 + k * 3 + 2), tools.Z(), tools.identity(2), tools.Z(),
tools.identity(self.n - (j * self.Nx * 3 + k * 3 + 2) - 4)])
# Along the same column
if j != self.Ny - 1:
hp = hp + tools.tensor_product(
[tools.identity(j * self.Nx * 3 + k * 3), tools.X(), tools.identity(3 * self.Nx - 1),
tools.X(),
tools.identity(self.n - (j * self.Nx * 3 + k * 3) - 3 * self.Nx - 1)]) + \
tools.tensor_product(
[tools.identity(j * self.Nx * 3 + k * 3 + 2), tools.Z(),
tools.identity(3 * self.Nx - 1),
tools.Z(), tools.identity(self.n - (j * self.Nx * 3 + k * 3 + 2) - 3 * self.Nx - 1)])
self.hamiltonian = -1 * hp
class HamiltonianHeisenberg(object):
def __init__(self, graph: Graph, energies=(1, 1), subspace='all', code=qubit, IS_subspace=False):
self.code = code
self.graph = graph
self.n = self.graph.n
self.energies = energies
self._is_diagonal = True
assert code is qubit
if IS_subspace:
raise NotImplementedError
self.IS_subspace = IS_subspace
self._hamiltonian = None
self.subspace = subspace
if self.subspace == 'all':
"""Initialize the Hamiltonian."""
self._hamiltonian_zz = None
self._hamiltonian_xy = None
else:
# For each IS, look at spin flips generated by the laser
# Over-allocate space
from scipy.special import comb
dim = comb(graph.n, (graph.n + self.subspace) / 2)
if dim % 1 != 0:
raise Exception('Invalid subspace for number of spins')
dim = int(dim)
hamiltonian_zz = np.zeros([dim, 1])
rows = np.zeros(graph.n * dim, dtype=int)
columns = np.zeros(graph.n * dim, dtype=int)
entries = np.zeros(graph.n * dim, dtype=int)
num_terms = 0
states = np.zeros((dim, graph.n))
for i in range(self.code.d ** (self.code.n * self.n)):
nary = tools.int_to_nary(i, size=graph.n)
if graph.n - np.sum(nary) == (self.subspace + graph.n) / 2:
states[num_terms, ...] = nary
num_terms += 1
num_terms = 0
for i in range(dim):
zz = 0
for a, b in self.graph.edges:
if b < a:
a, b = b, a
if np.abs(states[i, a] - states[i, b]) == 1:
zz -= 1
else:
zz += 1
if states[i, a] == 0 and states[i, b] == 1:
# Flip spin at this location
# Get binary representation
temp = states[i, ...].copy()
temp[a] = 1
temp[b] = 0
where_matched_temp = (np.argwhere(np.sum(np.abs(states - temp), axis=1) == 0).flatten())
entries[num_terms] = 1
rows[num_terms] = where_matched_temp
columns[num_terms] = i
entries[num_terms + 1] = 1
rows[num_terms + 1] = i
columns[num_terms + 1] = where_matched_temp
num_terms += 2
hamiltonian_zz[i, 0] = zz
# Now, construct the Hamiltonian
self._hamiltonian_zz = sparse.csr_matrix(
(hamiltonian_zz.flatten(), (np.arange(dim),
np.arange(dim))), shape=(dim, dim))
self._hamiltonian_xy = sparse.csr_matrix((entries, (rows, columns)), shape=(dim, dim))
self.states = states
@property
def hamiltonian(self):
if self._hamiltonian_zz is None or self._hamiltonian_xy is None and self.subspace == 'all':
z = np.expand_dims(np.diagonal(self.code.Z), axis=0).T
def my_eye(n):
return np.ones((np.asarray(self.code.d ** self.code.n) ** n, 1))
hamiltonian_zz = np.zeros([self.code.d ** (self.code.n * self.n), 1])
for a, b in self.graph.edges:
if b < a:
a, b = b, a
hamiltonian_zz = hamiltonian_zz + self.graph.graph[a][b]['weight'] * (tools.tensor_product(
[my_eye(a), z, my_eye(b - a - 1), z, my_eye(self.n - b - 1)],
sparse=(not self._is_diagonal)))
self._hamiltonian_zz = sparse.csr_matrix(
(hamiltonian_zz.flatten(), (np.arange(self.code.d ** (self.code.n * self.n)),
np.arange(self.code.d ** (self.code.n * self.n)))),
shape=(self.code.d ** (self.code.n * self.n),
self.code.d ** (self.code.n * self.n)))
# For each IS, look at spin flips generated by the laser
# Over-allocate space
rows = np.zeros(self.graph.n * self.code.d ** (self.code.n * self.n), dtype=int)
columns = np.zeros(self.graph.n * self.code.d ** (self.code.n * self.n), dtype=int)
entries = np.zeros(self.graph.n * self.code.d ** (self.code.n * self.n), dtype=int)
num_terms = 0
for i in range(self.code.d ** (self.code.n * self.n)):
nary = tools.int_to_nary(i, size=self.graph.n)
for a, b in self.graph.edges:
if b < a:
a, b = b, a
if nary[a] == 0 and nary[b] == 1:
# Flip spin at this location
# Get binary representation
temp = nary.copy()
temp[a] = 1
temp[b] = 0
temp = tools.nary_to_int(temp)
entries[num_terms] = 1
rows[num_terms] = temp
columns[num_terms] = i
entries[num_terms + 1] = 1
rows[num_terms + 1] = i
columns[num_terms + 1] = temp
num_terms += 2
# Now, construct the Hamiltonian
self._hamiltonian_xy = sparse.csr_matrix((entries, (rows, columns)),
shape=(self.code.d ** (self.code.n * self.n),
self.code.d ** (self.code.n * self.n)))
return self.energies[0] * self._hamiltonian_zz + self.energies[1] * self._hamiltonian_xy
def left_multiply(self, state: State):
temp = np.zeros(state.shape, dtype=np.complex128)
for edge in self.graph.edges:
if self.energies[0] != 0:
term = self.code.left_multiply(state, [edge[0], edge[1]], ['X', 'X'])
temp = temp + self.energies[0] * term
term = self.code.left_multiply(state, [edge[0], edge[1]], ['Y', 'Y'])
temp = temp + self.energies[0] * term
if self.energies[1] != 0:
term = self.code.left_multiply(state, [edge[0], edge[1]], ['Z', 'Z'])
temp = temp + self.energies[1] * term
return State(temp, is_ket=state.is_ket, IS_subspace=state.IS_subspace, code=state.code, graph=self.graph)
def right_multiply(self, state: State):
temp = np.zeros(state.shape, dtype=np.complex128)
for edge in self.graph.edges:
if self.energies[0] != 0:
term = self.code.right_multiply(state, [edge[0], edge[1]], ['X', 'X'])
temp = temp + self.energies[0] * term
term = self.code.right_multiply(state, [edge[0], edge[1]], ['Y', 'Y'])
temp = temp + self.energies[0] * term
if self.energies[1] != 0:
term = self.code.right_multiply(state, [edge[0], edge[1]], ['Z', 'Z'])
temp = temp + self.energies[1] * term
return State(temp, is_ket=state.is_ket, IS_subspace=state.IS_subspace, code=state.code, graph=self.graph)
def evolve(self, state: State, time):
if not state.is_ket:
exp_hamiltonian = expm(-1j * time * self.hamiltonian)
return State(exp_hamiltonian @ state @ exp_hamiltonian.conj().T,
is_ket=state.is_ket, IS_subspace=state.IS_subspace, code=state.code, graph=self.graph)
if state.is_ket:
return State(expm_multiply(-1j * time * self.hamiltonian, state), is_ket=state.is_ket,
IS_subspace=state.IS_subspace, code=state.code, graph=self.graph)
def cost_function(self, state: State):
# Need to project into the IS subspace
# Returns <s|C|s>
if state.is_ket:
return np.real(np.vdot(state, self.hamiltonian * state))
else:
# Density matrix
return np.real(np.squeeze(tools.trace(self.hamiltonian * state)))
class HamiltonianEnergyShift(object):
def __init__(self, index: int = 0, energies=(1,), code=qubit, IS_subspace=False, graph=None):
"""Default is that the first element in transition is the higher energy s."""
self.index = index
self.graph = graph
self.energies = energies
self.code = code
if not self.code.logical_code:
if not 0 <= self.index < self.code.d:
raise Exception('Index exceeds qudit dimension.')
self._operator = np.zeros((self.code.d, self.code.d))
self._operator[self.index, self.index] = 1
else:
if self.index != 0 and self.index != 1:
raise Exception('Logical codes are qubits, so index must be 0 or 1.')
if self.index == 0:
self._operator = self.code.Q
elif self.index == 1:
self._operator = self.code.P
self.IS_subspace = IS_subspace
if self.IS_subspace:
# Generate sparse mixing Hamiltonian
assert graph is not None
assert isinstance(graph, Graph)
if code is not qubit:
IS, num_IS = graph.independent_sets_qudit(self.code)
self._diagonal_hamiltonian = np.zeros((num_IS, 1), dtype=float)
for k in range(num_IS):
self._diagonal_hamiltonian[k, 0] = np.sum(IS[k, ...] == self.index)
self._hamiltonian = sparse.csr_matrix(
(self._diagonal_hamiltonian.T[0], (np.arange(num_IS), np.arange(num_IS))),
shape=(num_IS, num_IS))
else:
# We have already solved for this information
independent_sets = enumerate_independent_sets(graph.graph)
# Generate a list of integers corresponding to the independent sets in binary
# All ones
k = self.graph.num_independent_sets - 2
self.mis_size = 0
hamiltonian = np.zeros(self.graph.num_independent_sets, dtype=float)
hamiltonian[-1] = 0
for i in independent_sets:
hamiltonian[k] = len(i)
k -= 1
self._hamiltonian = sparse.csr_matrix(
(hamiltonian,
(np.arange(self.graph.num_independent_sets), np.arange(self.graph.num_independent_sets))),
shape=(self.graph.num_independent_sets, self.graph.num_independent_sets))
else:
# Use full Hilbert space
self._hamiltonian = None
@property
def hamiltonian(self):
if self._hamiltonian is None:
assert not self.IS_subspace
try:
assert self.graph is not None
except AssertionError:
print('self.graph must be not None to generate the Hamiltonian property.')
self._hamiltonian = sparse.csr_matrix(((self.code.d * self.code.n) ** self.graph.n,
(self.code.d * self.code.n) ** self.graph.n))
for i in range(self.graph.n):
self._hamiltonian = self._hamiltonian + tools.tensor_product(
[sparse.identity((self.code.d * self.code.n) ** i),
self._operator,
sparse.identity((self.code.d * self.code.n) ** (self.graph.n - i - 1))],
sparse=True)
return self.energies[0] * self._hamiltonian
def left_multiply(self, state: State):
if not self.IS_subspace:
temp = np.zeros_like(state)
# For each logical qubit
state_shape = state.shape
for i in range(state.number_logical_qudits):
if self.code.logical_code:
temp = temp + self.code.left_multiply(state, [i], self._operator)
elif not self.code.logical_code:
ind = self.code.d ** i
out = np.zeros_like(state, dtype=np.complex128)
if state.is_ket:
state = state.reshape((-1, self.code.d, ind), order='F')
# Note index start from the right (sN,...,s3,s2,s1)
out = out.reshape((-1, self.code.d, ind), order='F')
out[:, self.index, :] = state[:, self.index, :]
state = state.reshape(state_shape, order='F')
out = out.reshape(state_shape, order='F')
else:
out = out.reshape((-1, self.code.d, self.code.d ** (state.number_physical_qudits - 1),
self.code.d, ind), order='F')
state = state.reshape((-1, self.code.d, self.code.d ** (state.number_physical_qudits - 1),
self.code.d, ind), order='F')
out[:, self.index, :, :, :] = state[:, self.index, :, :, :]
state = state.reshape(state_shape, order='F')
out = out.reshape(state_shape, order='F')
temp = temp + out
return State(self.energies[0] * temp, is_ket=state.is_ket, IS_subspace=state.IS_subspace, code=state.code,
graph=self.graph)
else:
# Handle dimensions
return State(self.energies[0] * self._diagonal_hamiltonian * state, is_ket=state.is_ket,
IS_subspace=state.IS_subspace,
code=state.code, graph=self.graph)
def right_multiply(self, state: State):
if state.is_ket:
print('Warning: right multiply functionality currently applies the operator and daggers the state.')
return self.left_multiply(state).conj().T
elif not self.IS_subspace:
temp = np.zeros_like(state)
# For each physical qubit
state_shape = state.shape
for i in range(state.number_logical_qudits):
if self.code.logical_code:
temp = temp + self.code.right_multiply(state, [i], self._operator)
else:
ind = self.code.d ** i
out = np.zeros_like(state)
out = out.reshape(
(-1, self.code.d, self.code.d ** (state.number_physical_qudits - 1), self.code.d, ind),
order='F')
state = state.reshape(
(-1, self.code.d, self.code.d ** (state.number_physical_qudits - 1), self.code.d, ind),
order='F')
out[:, :, :, self.index, :] = state[:, :, :, self.index, :]
state = state.reshape(state_shape, order='F')
out = out.reshape(state_shape, order='F')
temp = temp + out
return State(self.energies[0] * temp, is_ket=state.is_ket, IS_subspace=state.IS_subspace, code=state.code,
graph=self.graph)
else:
return State(self.energies[0] * state * self._diagonal_hamiltonian.T, is_ket=state.is_ket,
IS_subspace=state.IS_subspace,
code=state.code, graph=self.graph)
def evolve(self, state: State, time):
r"""
Use reshape to efficiently implement evolution under :math:`H_B=\\sum_i X_i`
"""
if not self.IS_subspace:
# We don't want to modify the original s
out = state.copy()
for i in range(state.number_logical_qudits):
# Note that self._operator is not necessarily involutary
out = self.code.rotation(out, [i], self.energies[0] * time, self._operator)
return out
else:
if state.is_ket:
# Handle dimensions
return State(np.exp(-1j * time * self.energies[0] * self._diagonal_hamiltonian) * state,
is_ket=state.is_ket, IS_subspace=state.IS_subspace, code=state.code, graph=self.graph)
else:
exp_hamiltonian = np.exp(-1j * time * self.energies[0] * self._diagonal_hamiltonian)
return State(exp_hamiltonian * state * exp_hamiltonian.conj().T,
is_ket=state.is_ket, IS_subspace=state.IS_subspace, code=state.code, graph=self.graph)
class HamiltonianRydberg(object):
def __init__(self, tails_graph: Graph, hard_constraint_graph=None, index: int = 0,
energies=(1,), code=qubit, IS_subspace=False):
"""Default is that the first element in transition is the higher energy s."""
self.index = index
self.energies = energies
self.code = code
self.tails_graph = tails_graph
self.hard_constraint_graph = hard_constraint_graph
if self.code.logical_code:
raise Exception('Logical codes not supported.')
self.IS_subspace = IS_subspace
try:
if self.tails_graph.periodic or self.hard_constraint_graph.periodic:
raise Exception('Periodic graphs not yet supported.')
except:
pass
# Generate tails matrix
if self.IS_subspace:
# Generate sparse mixing Hamiltonian
assert hard_constraint_graph is not None
assert isinstance(hard_constraint_graph, Graph)
if code is not qubit:
IS, num_IS = hard_constraint_graph.generate_independent_sets_qudit(self.code)
self._diagonal_hamiltonian = np.zeros((num_IS, 1), dtype=float)
for k in range(num_IS):
# Get state as bit string
# Multiply by weights matrix
# Sum result
weight = 0
where_rydberg = np.argwhere(self.hard_constraint_graph.independent_sets[k] == 0)
if len(where_rydberg) > 0:
where_rydberg = where_rydberg.T[0]
for (i, node1) in enumerate(where_rydberg):
for (j, node2) in enumerate(where_rydberg[i + 1:]):
weight += self.tails_graph.graph[node1][node2]['weight']
self._diagonal_hamiltonian[k, 0] = weight
self._hamiltonian = sparse.csr_matrix(
(self._diagonal_hamiltonian.T[0], (np.arange(num_IS), np.arange(num_IS))),
shape=(num_IS, num_IS))
else:
# We have already solved for this information
independent_sets = enumerate_independent_sets(self.hard_constraint_graph.graph)
# Generate a list of integers corresponding to the independent sets in binary
# All ones
k = self.hard_constraint_graph.num_independent_sets - 2
self.mis_size = 0
self._diagonal_hamiltonian = np.zeros(self.hard_constraint_graph.num_independent_sets, dtype=float)
self._diagonal_hamiltonian[-1] = 0
for i in independent_sets:
weight = 0
where_rydberg = np.array(i)
if len(where_rydberg) > 0:
for (i, node1) in enumerate(where_rydberg):
for (j, node2) in enumerate(where_rydberg[i + 1:]):
weight += self.tails_graph.graph[node1][node2]['weight']
self._diagonal_hamiltonian[k] = weight
k -= 1
self._hamiltonian = sparse.csr_matrix(
(self._diagonal_hamiltonian,
(np.arange(self.hard_constraint_graph.num_independent_sets), np.arange(self.hard_constraint_graph.num_independent_sets))),
shape=(self.hard_constraint_graph.num_independent_sets, self.hard_constraint_graph.num_independent_sets))
self._diagonal_hamiltonian = np.expand_dims(self._diagonal_hamiltonian, axis=1)
else:
# Use full Hilbert space
self._hamiltonian = None
@property
def hamiltonian(self):
if self._hamiltonian is None:
raise Exception
return self.energies[0] * self._hamiltonian
def left_multiply(self, state: State):
return State(self.energies[0] * self._diagonal_hamiltonian * state, is_ket=state.is_ket,
IS_subspace=state.IS_subspace,
code=state.code, graph=self.hard_constraint_graph)
def right_multiply(self, state: State):
if state.is_ket:
print('Warning: right multiply functionality currently applies the operator and daggers the state.')
return self.left_multiply(state).conj().T
elif not self.IS_subspace:
temp = np.zeros_like(state)
# For each physical qubit
state_shape = state.shape
for i in range(state.number_logical_qudits):
if self.code.logical_code:
temp = temp + self.code.right_multiply(state, [i], self._operator)
else:
ind = self.code.d ** i
out = np.zeros_like(state)
out = out.reshape(
(-1, self.code.d, self.code.d ** (state.number_physical_qudits - 1), self.code.d, ind),
order='F')
state = state.reshape(
(-1, self.code.d, self.code.d ** (state.number_physical_qudits - 1), self.code.d, ind),
order='F')
out[:, :, :, self.index, :] = state[:, :, :, self.index, :]
state = state.reshape(state_shape, order='F')
out = out.reshape(state_shape, order='F')
temp = temp + out
return State(self.energies[0] * temp, is_ket=state.is_ket, IS_subspace=state.IS_subspace, code=state.code,
graph=self.hard_constraint_graph)
else:
return State(self.energies[0] * state * self._diagonal_hamiltonian.T, is_ket=state.is_ket,
IS_subspace=state.IS_subspace,
code=state.code, graph=self.hard_constraint_graph)
def evolve(self, state: State, time):
r"""
Use reshape to efficiently implement evolution under :math:`H_B=\\sum_i X_i`
"""
if not self.IS_subspace:
# We don't want to modify the original s
out = state.copy()
for i in range(state.number_logical_qudits):
# Note that self._operator is not necessarily involutary
out = self.code.rotation(out, [i], self.energies[0] * time, self._operator)
return out
else:
if state.is_ket:
# Handle dimensions
return State(np.exp(-1j * time * self.energies[0] * self._diagonal_hamiltonian) * state,
is_ket=state.is_ket, IS_subspace=state.IS_subspace, code=state.code,
graph=self.hard_constraint_graph)
else:
exp_hamiltonian = np.exp(-1j * time * self.energies[0] * self._diagonal_hamiltonian)
return State(exp_hamiltonian * state * exp_hamiltonian.conj().T,
is_ket=state.is_ket, IS_subspace=state.IS_subspace, code=state.code,
graph=self.hard_constraint_graph)
|
18,035 | f9e56d83b09cff3eeb3dc74012dcfe4befb25a74 | # Generated by Django 2.2.11 on 2020-03-19 00:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0003_restaurant_date_modified'),
]
operations = [
migrations.AlterField(
model_name='restaurant',
name='email',
field=models.EmailField(blank=True, max_length=255),
),
]
|
18,036 | cb71addcf19ae8580745b2943d023eccd7360683 | """
samples price data with timestamps close enough get summarized
"""
import luigi
import config
from btc.preprocess.DecompressPrices import DecompressPrices
class GroupByTimeStamp(luigi.Task):
def requires(self):
return [DecompressPrices()]
def output(self):
return luigi.LocalTarget(config.data_dir+"preprocess/coinbaseUSD_grouped.csv")
def run(self):
with self.input()[0].open() as fin, self.output().open('w') as fout:
# assumes timestamps are sorted
# computes weighted average using volume & timestamps
count = 0
weighted_sum = 0
volume_sum = 0
last_stamp = -1
for line in fin:
timestamp, price, volume = [x.strip() for x in line.split(',')]
timestamp = int(timestamp)
price = float(price)
volume = float(volume)
if last_stamp < 0: # set first timeStamp
assert count == 0
last_stamp = timestamp
if timestamp > last_stamp:
if volume_sum != 0:
average = weighted_sum/volume_sum
fout.write("{},{},{}\n".format(timestamp, average, volume_sum))
# print(timestamp,':',average)
# else:
# print("!")
# reset values
last_stamp = timestamp
count = 0
weighted_sum = 0
volume_sum = 0
else:
# print("-")
weighted_sum += price * volume
volume_sum += volume
count += 1
|
18,037 | 6bd5ba5af9e75a1a0edee14891c5263f1a63a2dc | user_name = "Selfieking"
name = input("Enter the Username :")
if user_name == name:
print("Welcome to worstmail:", name)
else:
print("Invalid login")
|
18,038 | 64a3fd37e07318bf54139d4417a032cd59a7d477 | import logging
import time
from common.common_fun import Common,NoSuchElementException
from common.desired_caps_app import appium_desired
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
#########注册###########
class RegisterView(Common):
# '//android.widget.EditText[@text="注册手机号"]'
registerBtn = (By.XPATH,'//android.widget.Button[@text="注 册"]')
phoneNumber = (By.XPATH,'//android.widget.EditText[@text="注册手机号"]')
passwd1 = (By.XPATH,'//android.widget.EditText[@text="初始密码"]')
passwd2 = (By.XPATH,'//android.widget.EditText[@text="确认密码"]')
subjectType = (By.XPATH,'//android.widget.TextView[@text="主体类型"]')
subjectType2 = (By.XPATH,'//android.widget.TextView[@text="农户"]')
name = (By.XPATH,'//android.widget.EditText[@text="姓名"]')
documentType = (By.XPATH,'//android.widget.TextView[@text="证件类型"]')
documentType2 = (By.XPATH,'//android.widget.TextView[@text="军人证"]')
documentNumber = (By.XPATH,'//android.widget.EditText[@text="证件号码"]')
region = (By.XPATH,'//android.widget.TextView[@text="所在区域"]')
region2 = (By.XPATH,'//android.widget.TextView[@text="山东省"]')
region3 = (By.XPATH,'//android.widget.TextView[@text="德州市"]')
region4 = (By.XPATH,'//android.widget.Button[@text="确定"]')
address = (By.XPATH,'//android.widget.EditText[@text="常用服务地址"]')
agreement = (By.XPATH,'//android.widget.TextView[@text="阅读并同意协议"]')
submissionBtn = (By.XPATH,'//android.widget.Button[@text="提交注册"]')
check = (By.XPATH,'//android.widget.Button[@text="进圈看看"]')
def register_action(self,phone,password1,password2,name,documentType,documentNumber,region,address):
logging.info("========select region=========")
self.driver.find_elements(By.CLASS_NAME,"android.widget.ImageView")[4].click()
self.driver.implicitly_wait(20)
logging.info('============register_action==============')
self.driver.find_element_by_id("android:id/button2").click()
time.sleep(2)
self.driver.find_element_by_id("android:id/button2").click()
self.driver.find_elements_by_android_uiautomator("new UiSelector().text(\"我的\")")[0].click()
self.driver.find_element(*self.registerBtn).click()
self.driver.find_element(*self.phoneNumber).send_keys(phone)
time.sleep(3)
self.driver.find_element(*self.passwd1).send_keys(password1)
self.driver.find_element(*self.passwd2).send_keys(password2)
self.driver.find_element(*self.subjectType).click()
self.driver.find_element(*self.subjectType2).click()
self.driver.find_element(*self.name).send_keys(name)
self.driver.find_element(*self.documentType).click()
self.driver.find_element_by_xpath('//android.widget.TextView[@text="%s"]'%documentType).click()
self.driver.find_element(*self.documentNumber).send_keys(documentNumber)
li1 = str(region).split("-")
logging.info("------region is %s------"%region)
if len(li1) <2:
logging.error("区域不正确")
elif len(li1) ==2:
self.driver.find_element(*self.region).click()
self.driver.find_element(*self.region2).click()
self.driver.find_element(*self.region3).click()
self.driver.find_element(*self.region4).click()
elif len(li1) ==3:
self.driver.find_element(*self.region).click()
self.driver.find_element(*self.region2).click()
self.driver.find_element(*self.region3).click()
self.driver.find_element_by_xpath('//android.widget.TextView[@text="%s"]'%li1[2]).click()
self.driver.find_element(*self.region4).click()
elif len(li1) ==4:
self.driver.find_element(*self.region).click()
self.driver.find_element(*self.region2).click()
self.driver.find_element(*self.region3).click()
self.driver.find_element_by_xpath('//android.widget.TextView[@text="%s"]'%li1[2]).click()
self.driver.find_element_by_xpath('//android.widget.TextView[@text="%s"]'%li1[3]).click()
self.driver.find_element(*self.region4).click()
elif len(li1) ==5:
self.driver.find_element(*self.region).click()
self.driver.find_element(*self.region2).click()
self.driver.find_element(*self.region3).click()
self.driver.find_element_by_xpath('//android.widget.TextView[@text="%s"]'%li1[2]).click()
self.driver.find_element_by_xpath('//android.widget.TextView[@text="%s"]'%li1[3]).click()
self.driver.find_element_by_xpath('//android.widget.TextView[@text="%s"]'%li1[4]).click()
self.driver.find_element(*self.region4).click()
else:
self.driver.find_element(*self.region).click()
self.driver.find_element(*self.region2).click()
self.driver.find_element(*self.region3).click()
self.driver.find_element_by_xpath('//android.widget.TextView[@text="%s"]'%li1[2]).click()
self.driver.find_element_by_xpath('//android.widget.TextView[@text="%s"]'%li1[3]).click()
self.driver.find_element_by_xpath('//android.widget.TextView[@text="%s"]'%li1[4]).click()
self.driver.find_element_by_xpath('//android.widget.TextView[@text="%s"]'%li1[5]).click()
self.driver.find_element(*self.address).send_keys(address)
self.driver.find_element(*self.agreement).click()
self.driver.find_element(*self.submissionBtn).click()
# logging.info('username is:%s' %username)
# self.driver.find_element(*self.username_type).send_keys(username)
#
# logging.info('password is:%s'%password)
# self.driver.find_elements(*self.password_type)[1].send_keys(password)
#
# logging.info('click loginBtn')
# self.driver.find_element(*self.loginBtn).click()
# logging.info('login finished!')
def check_registerStatus(self):
logging.info('====check_register_Status======')
try:
time.sleep(4)
element = self.driver.find_element(*self.check)
except NoSuchElementException:
logging.error('regist Fail!')
self.getScreenShot('regist fail')
return False
else:
logging.info('regist success!')
return True
if __name__ == '__main__':
driver=appium_desired()
l=RegisterView(driver)
# l.register_action('17708179510',123456)
l.check_registerStatus() |
18,039 | b42a7ac2c3399c41635819193614aee0171048c6 | import pandas as pd
import numpy as np
from read_file import read_file
class user(object):
"""
Class function to read and process data from Yelp Challenge User Dataset.
"""
def __init__(self):
self.df = pd.DataFrame() #All user information.
self.friends = pd.DataFrame() #user, [friend1, friend2,..].
def load(self):
"""
Load data from mongodb.
INPUT: None.
OUTPUT: None.
"""
#self.df = read_file("../data/yelp_academic_dataset_user.json") #Full Data.
self.df = read_file("../data/user300.json") #For local machine.
#self.get_friend_list()
#self.save_friend_nodes()
def preprocess(self):
"""
Pre-process data.
INPUT: None.
OUTPUT: None.
"""
self.get_dummies_dict()
self.get_dummies_list()
self.drop_columns()
self.df.fillna(0,inplace=True)
def get_friend_list(self):
"""
Get an adjacency list of friends from a list of friends.
INPUT: None.
OUTPUT: None.
"""
self.friends = self.df[['user_id','friends']]
def save_friend_nodes(self):
"""
Turn an adjacency list into an edge list.
INPUT: None.
OUTPUT: A .tsv file
"""
print "Exporting to file tsv ..."
count_edge = 0
count_node = 0
with open('../data/yelp.tsv','w') as f:
for user in self.df['user_id']:
for friends in self.df['friends']:
count_node += 1
for friend in friends:
f.write("%s\t%s\n" % (user, friend))
count_edge += 1
print "Graph Summary:", count_node, "nodes,", count_edge, "edges."
def get_dummies_dict(self, \
cols=['votes',\
'compliments'],\
drop_=True):
"""
Get dummies for dictionaries.
INPUT:
- cols : (list) a list of columns names.
- drop_: (boolean) a checker for dropping after dumifying.
OUTPUT: None
"""
for col in cols:
print "Pre-processing " + col + "..."
temp = pd.DataFrame(self.df[col].tolist())
temp.columns = col + "_" + temp.columns
if drop_:
self.df.drop(col,axis = 1, inplace=True)
self.df = pd.concat([self.df, temp],axis=1)
def get_dummies_list(self,
cols=['elite'],\
drop_=True):
"""
Get dummies for lists.
INPUT:
- cols : (list) a list of columns names.
- drop_: (boolean) a checker for dropping after dumifying.
OUTPUT: None
"""
for col in cols:
print "Pre-processing " + col + "..."
temp = pd.get_dummies(self.df[col].apply(pd.Series).stack(),drop_first=True)\
.astype(int).sum(level=0).astype(int)
# temp.columns.apply(str).apply(lambda x: col + "_" + x)
if drop_:
self.df.drop(col,axis = 1, inplace=True)
self.df = pd.concat([self.df, temp],axis=1)
def drop_columns(self, \
cols=['_id',\
'friends',\
'type',\
'yelping_since']):
"""
Remove nomial data.
INPUT: None.
OUTPUT: None.
"""
for col in cols:
del self.df[col]
class tip(object):
'''
'''
def __init__(self):
self.df = pd.DataFrame()
def load(self):
self.df = read_file("../data/yelp_academic_dataset_tip.json")
class business(object):
'''
'''
def __init__(self):
self.df = pd.DataFrame()
def load(self):
self.df = read_file("../data/yelp_academic_dataset_business.json")
class checkin(object):
'''
'''
def __init__(self):
self.df = pd.DataFrame()
def load(self):
self.df = read_file("../data/yelp_academic_dataset_checkin.json")
class review(object):
'''
'''
def __init__(self):
self.df = pd.DataFrame()
def load(self):
self.df = read_file("../data/yelp_academic_dataset_review.json")
|
18,040 | 586350b19f44240114d7d9923d5b6fee9f63e69c | msg3 = "This sentence has both \"double quotes\" and 'singe quotes'."
print(msg3)
print('-'*20)
print('-'*50)
|
18,041 | ab789ed0f7e61f9e58593caf15b338b67adddd27 | import argparse
import grpc
import timestamp_service_pb2
import timestamp_service_pb2_grpc
def create_arg_parser():
arg_parser = argparse.ArgumentParser(
description="Send a Timestamp request to the Timestamp server")
arg_parser.add_argument("--server", "-a", default="localhost:50051",
help="The address of the Timestamp server")
return arg_parser
def main():
arg_parser = create_arg_parser()
args = arg_parser.parse_args()
print("Connecting to Timestamp server:")
print(args.server)
print("")
channel = grpc.insecure_channel(args.server)
stub = timestamp_service_pb2_grpc.TimestampServiceStub(channel)
request = timestamp_service_pb2.TimestampRequest()
print("Sending Timestamp request:")
print("{}\n")
reply = stub.Timestamp(request)
print("Received reply:")
print(reply)
if __name__ == "__main__":
main()
|
18,042 | ba53aa79eb7a74858880ab4533dba3b9cffdd6ae | # Generated by Django 2.1.2 on 2018-11-25 18:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0014_auto_20181125_1850'),
]
operations = [
migrations.CreateModel(
name='Carttyy',
fields=[
('item_no', models.AutoField(primary_key=True, serialize=False)),
],
),
migrations.DeleteModel(
name='Cartty',
),
]
|
18,043 | 1c062aae1271e6451fb6fc691aa31d7d6fc5b9a2 | #
# @lc app=leetcode id=1293 lang=python3
#
# [1293] Shortest Path in a Grid with Obstacles Elimination
#
# https://leetcode.com/problems/shortest-path-in-a-grid-with-obstacles-elimination/description/
#
# algorithms
# Hard (42.53%)
# Total Accepted: 13.9K
# Total Submissions: 32.6K
# Testcase Example: '[[0,0,0],[1,1,0],[0,0,0],[0,1,1],[0,0,0]]\n1'
#
# Given a m * n grid, where each cell is either 0 (empty) or 1 (obstacle). In
# one step, you can move up, down, left or right from and to an empty cell.
#
# Return the minimum number of steps to walk from the upper left corner (0, 0)
# to the lower right corner (m-1, n-1) given that you can eliminate at most k
# obstacles. If it is not possible to find such walk return -1.
#
#
# Example 1:
#
#
# Input:
# grid =
# [[0,0,0],
# [1,1,0],
# [0,0,0],
# [0,1,1],
# [0,0,0]],
# k = 1
# Output: 6
# Explanation:
# The shortest path without eliminating any obstacle is 10.
# The shortest path with one obstacle elimination at position (3,2) is 6. Such
# path is (0,0) -> (0,1) -> (0,2) -> (1,2) -> (2,2) -> (3,2) -> (4,2).
#
#
#
#
# Example 2:
#
#
# Input:
# grid =
# [[0,1,1],
# [1,1,1],
# [1,0,0]],
# k = 1
# Output: -1
# Explanation:
# We need to eliminate at least two obstacles to find such a walk.
#
#
#
# Constraints:
#
#
# grid.length == m
# grid[0].length == n
# 1 <= m, n <= 40
# 1 <= k <= m*n
# grid[i][j] == 0 or 1
# grid[0][0] == grid[m-1][n-1] == 0
#
#
#
class Solution:
def shortestPath(self, grid: List[List[int]], k: int) -> int:
|
18,044 | a36779def51d900573592f6a8b799cb73307d65f | # Python Standard Library Imports
import json
# Third Party (PyPI) Imports
import requests
from requests_oauthlib import OAuth1
# Django Imports
from django.conf import settings
# HTK Imports
from htk.lib.linkedin.constants import *
def get_profile_data(resource_owner_key, resource_owner_secret):
oauth = OAuth1(
settings.SOCIAL_AUTH_LINKEDIN_KEY,
client_secret=settings.SOCIAL_AUTH_LINKEDIN_SECRET,
resource_owner_key=resource_owner_key,
resource_owner_secret=resource_owner_secret
)
linkedin_api_endpoint = LINKEDIN_PROFILE_API_BASE_URL % ','.join(LINKEDIN_PROFILE_FIELDS)
response = requests.get(
url=linkedin_api_endpoint,
auth=oauth
)
# TODO: uncomment to purposely raise Exception and see format
#raise Exception(response.text)
linkedin_profile_data = json.loads(response.text)
return linkedin_profile_data
|
18,045 | 932cd3dc524430bf18993a90134881662048ea8a | import random
import numpy as np
import tensorflow as tf
import copy
def tensor_to_array(tf_array):
sess = tf.Session()
a = sess.run(tf_array)
b = a.tolist()
return b
def only_show_next_board(board, num, player):
cur = copy.deepcopy(board)
stone_num = cur[7*player+num]
if num < 0 or num > 5:
return cur
else:
id = 7*player+num
temp = cur[id]
cur[id] = 0
next_house = (id+1)%14
while(temp > 0):
if temp > 1:
cur[next_house] += 1
temp -= 1
next_house = (next_house+1)%14
else:
if next_house == 7*player+6:
cur[next_house] += 1
temp -= 1
elif 7*player <= next_house <= 7*player+5 and cur[next_house] == 0 and cur[12-next_house] != 0:
cur[7*player+6] += (1+cur[12-next_house])
cur[12-next_house] = 0
temp -= 1
else:
cur[next_house] += 1
temp -= 1
return cur
class Kalah_Board():
def __init__(self):
L = []
for i in range(6*2+2):
L.append(4)
L[6] = 0
L[13] = 0
self.board = L
def move_stones(self, num, player):
moveable = self.list_houses_of_next_possible_move(player)
opponent = 0 if player == 1 else 1
if len(moveable) == 0:
for i in range(6):
self.board[7*opponent+6] += self.board[7*opponent+i]
self.board[7*opponent+i] = 0
return 0 #pass
else:
id = 7*player+num
temp = self.board[id]
self.board[id] = 0
next_house = (id+1)%14
check_flag = 0
for i in range(6):
if self.board[7*opponent+i] != 0:
check_flag = 1
if check_flag == 1:
while(temp > 0):
if temp > 1:
self.board[next_house] += 1
temp -= 1
next_house = (next_house+1)%14
else:
if next_house == 7*player+6:
self.board[next_house] += 1
temp -= 1
check_flag_dash = 0
for i in range(6):
if self.board[7*player+i] != 0:
check_flag_dash = 1
if check_flag_dash == 1:
return 1 #continue player's turn
else:
for i in range(6):
self.board[7*opponent+6] += self.board[7*opponent+i]
self.board[7*opponent+i] = 0
return 0
elif 7*player <= next_house <= 7*player+5 and self.board[next_house] == 0 and self.board[12-next_house] != 0:
self.board[7*player+6] += (1+self.board[12-next_house])
self.board[12-next_house] = 0
temp -= 1
check_flag_dash = 0
for i in range(6):
if self.board[7*opponent+i] != 0:
check_flag_dash = 1
if check_flag_dash == 1:
return 2 #ryoudori
else:
for i in range(6):
self.board[7*player+6] += self.board[7*player+i]
self.board[7*player+i] = 0
return 0
else:
self.board[next_house] += 1
temp -= 1
return 3 #normal
else:
for i in range(6):
self.board[7*player+6] += self.board[7*player+i]
self.board[7*player+i] = 0
return 0 #pass
def show_board(self):
print(" ", end = "")
for i in range(6):
print(self.board[12-i], end = " ")
print(" ", end = "")
print("\n", end = "")
print(self.board[13], end = " ")
for i in range(6):
print(" ", end = "")
print(self.board[6], end = "")
print("\n", end = "")
print(" ", end = "")
for i in range(6):
print(self.board[i], end = " ")
print(" ", end = "")
print("\n", end = "")
def list_houses_of_next_possible_move(self, player): #player is 0 or 1
non_null_houses = []
for i in range(6):
if self.board[7*player+i] != 0:
non_null_houses.append(i)
return non_null_houses
class Playout_Kalah(Kalah_Board):
def __init__(self, turn = 0, start_player = 0):
super().__init__()
self.player = start_player
self.turn = turn
self.winner = None
def is_finished(self):
return self.winner is not None
def list_houses_of_next_possible_move(self, player):
return super().list_houses_of_next_possible_move(self.player)
def get_current_player(self):
return self.player
def get_next_player(self):
if self.player == 0:
return 1
else:
return 0
def shift_player(self):
self.player = self.get_next_player()
def move_stones(self, num, player):
cur = super().move_stones(num, player)
if cur == 0:
return self.finish_game()
else:
if self.board[6] > 24 or self.board[13] > 24:
return self.finish_game()
else:
if cur == 1:
self.turn += 1
else:
self.player = self.get_next_player()
self.turn += 1
def show_score(self):
print("player 0", self.board[6])
print("player 1", self.board[13])
def finish_game(self):
points_of_player_0 = self.board[6]
points_of_player_1 = self.board[13]
if points_of_player_0 > points_of_player_1:
self.winner = 0
elif points_of_player_0 < points_of_player_1:
self.winner = 1
else:
self.winner = -1
return self.winner
#入力ベクトル(1行14列、現在の盤面を表す)
X = tf.placeholder(tf.float32, shape = (1,14))
#重み
W_0 = tf.Variable(tf.truncated_normal([14, 32], dtype=tf.float32))
W_1 = tf.Variable(tf.truncated_normal([32, 14], dtype=tf.float32))
b_0 = tf.Variable(tf.zeros([32], dtype=tf.float32))
b_1 = tf.Variable(tf.zeros([14], dtype=tf.float32))
#ネットワーク(4層)
hidden_1 = tf.sigmoid(tf.nn.softmax(tf.matmul(X, W_0)+b_0))
y = tf.nn.softmax(tf.matmul(hidden_1, W_1)+b_1)
epsilon = tf.Variable(0.5)
with tf.compat.v1.Session() as sess:
saver = tf.train.Saver()
ckpt = tf.train.get_checkpoint_state('./')
if ckpt:
last_model = ckpt.model_checkpoint_path
saver.restore(sess, last_model)
print("successfully restored")
else:
sess.run(tf.global_variables_initializer())
gamma = 0.9 #割引率
episode_num = 13 #
trial_num = 80 #各エピソードで何回移動を試すか
for i in range(episode_num):
print("episode", i)
playout = Playout_Kalah()
playouts = [] #一旦データを貯めておく
for j in range(trial_num):
if playout.is_finished():
break
player_name = playout.get_current_player()
before_player_name = 1 if player_name == 1 else 0
old_board = copy.deepcopy(playout.board)
possible_choices = playout.list_houses_of_next_possible_move(player_name) #0~5が入る
possible_choices = list(map(lambda x: 7*player_name+x, possible_choices)) #0~13が入る
a = np.random.rand()
next_action = 0 #どのハウスを選ぶか(0~13が入る)
print(player_name)
print(old_board)
if a > sess.run(epsilon): #Q値に基づいて行動する場合
[tensor_of_q] = sess.run(y, feed_dict = {X : [playout.board]})
list_of_q = tensor_of_q.tolist()
for k in range(14):
list_of_q[k] = [list_of_q[k], k]
list_of_q.sort() #[Q値, house]がソートされて出てくる
for k in range(14):
if list_of_q[13-k][1] in possible_choices:
next_action = list_of_q[13-k][1]
break
#high_q = sess.run(tf.argmax(sess.run(y, feed_dict = {X : [pos]}), axis = 1))[0] #Q値の最も高い行動のindex
else: #冒険する(ランダムに移動する)場合
rand_move = np.random.randint(0, len(possible_choices))
next_action = possible_choices[rand_move]
new_board = only_show_next_board(playout.board, next_action-7*player_name, player_name)
playout.move_stones(next_action-7*player_name, player_name)
#max_next_q = γ*次の環境において最も高いQ値
#max_next_qにおいては更新する前のネットワークを用いてQ値を計算するため、ここで計算しておく必要がある(Fixed Target Q-Network)
max_next_q = tf.multiply(tf.constant(gamma), tf.reduce_max(sess.run(y, feed_dict = {X : [new_board]})))
if playout.is_finished():
#報酬のclipping(ゴールしたなら+1、そうでなければ+0)
if before_player_name == playout.winner:
playouts.append([old_board, next_action, 1, new_board, max_next_q, 1]) #最後の1/0はfinish flag
elif playout.winner == -1:
playouts.append([old_board, next_action, 0.5, new_board, max_next_q, 1])
else:
playouts.append([old_board, next_action, -1, new_board, max_next_q, 1])
playout = Playout_Kalah()
else:
if before_player_name == playout.get_current_player(): #手番が変化してたら次のターンで得られる報酬をマイナス
playouts.append([old_board, next_action, 0, new_board, max_next_q, 0]) #(s, a, r, s', max_next_q) 前から順に現在の環境,行動,報酬,新しい環境
else:
playouts.append([old_board, next_action, 0, new_board, tf.multiply(tf.constant(-1, tf.float32), max_next_q), 0])
epsilon = tf.math.maximum(tf.constant(0.1), tf.constant(0.99)*epsilon)
random_index_list = [] #データをランダムな順番で取り出す
L = [j for j in range(len(playouts))]
for j in range(len(playouts)): #取り出す順番を決定
cur = np.random.randint(0, len(playouts)-j)
num = L[cur]
random_index_list.append(num)
L.pop(cur)
for j in range(len(playouts)):
print(j)
cur = random_index_list[j]
current_data = playouts[cur] #データの取り出し
print(current_data)
action_num = current_data[1] #どのハウスから移動させたか
max_next_q = tf.constant(0, dtype = tf.float32)
if current_data[5] == 0: #次の状態s'が終状態でなければmax_next_qを以前計算した値にする
max_next_q = current_data[4]
#(r+max_next_q-Q(s,a))^2を最小にするように学習. ここcurrent_data[2]をtf.float32に指定しないと型が合ってないぞって怒られが発生する
train_step = tf.compat.v1.train.GradientDescentOptimizer(0.3).minimize(tf.reduce_sum(tf.square(tf.constant(current_data[2], dtype = tf.float32)+max_next_q-y[0][action_num])))
sess.run(train_step, feed_dict={X : [current_data[0]]})
print(sess.run(W_0))
print(sess.run(W_1))
print(sess.run(b_0))
print(sess.run(b_1))
if (i+1) % 5 == 0:
saver = tf.train.Saver()
saver.save(sess, "model.ckpt")
if (i+1) % 5 == 0:
DQN_0 = 0
DQN_1 = 0
RANDOM_0 = 0
RANDOM_1 = 0
DRAW = 0
for j in range(100):
playout = Playout_Kalah()
while(True):
if playout.is_finished():
playout.show_board()
playout.show_score()
if playout.winner == -1:
print("DRAW")
else:
print("The winner is player", playout.winner)
if j%2 == 0:
if playout.winner == 0:
DQN_0 += 1
elif playout.winner == 1:
RANDOM_1 += 1
else:
DRAW += 1
else:
if playout.winner == 0:
RANDOM_0 += 1
elif playout.winner == 1:
DQN_1 += 1
else:
DRAW += 1
break
#playout.show_board()
player_name = playout.get_current_player()
possible_choices = playout.list_houses_of_next_possible_move(player_name)
#print("player", player_name)
if possible_choices == []:
playout.move_stones(-1, player_name)
else:
if j%2 == 0:
if player_name == 0:
possible_choices = list(map(lambda x: 7*player_name+x, possible_choices))
next_action = 0
[tensor_of_q] = sess.run(y, feed_dict = {X : [playout.board]})
list_of_q = tensor_of_q.tolist()
for k in range(14):
list_of_q[k] = [list_of_q[k], k]
list_of_q.sort() #[Q値, house]がソートされて出てくる
for k in range(14):
if list_of_q[13-k][1] in possible_choices:
next_action = list_of_q[13-k][1]
break
playout.move_stones(next_action, 0)
else:
cur = random.randint(0, len(possible_choices)-1)
playout.move_stones(possible_choices[cur], 1)
else:
if player_name == 0:
cur = random.randint(0, len(possible_choices)-1)
playout.move_stones(possible_choices[cur], 1)
else:
possible_choices = list(map(lambda x: 7*player_name+x, possible_choices))
next_action = 7
[tensor_of_q] = sess.run(y, feed_dict = {X : [playout.board]})
list_of_q = tensor_of_q.tolist()
for k in range(14):
list_of_q[k] = [list_of_q[k], k]
list_of_q.sort() #[Q値, house]がソートされて出てくる
for k in range(14):
if list_of_q[13-k][1] in possible_choices:
next_action = list_of_q[13-k][1]
break
playout.move_stones(next_action-7, 1)
print("episode", i, "WIN:", DQN_0+DQN_1, [DQN_0, DQN_1], "LOSE:", RANDOM_1+RANDOM_0, [RANDOM_1, RANDOM_0], "DRAW:", DRAW)
saver = tf.train.Saver()
saver.save(sess, "model.ckpt")
|
18,046 | 51ea24c62b5f435ee42dc85211c11e2f3c40fc4f | import forca
import adivinhacaovFinal
print("***********************************")
print("********Escolha o seu jogo!********")
print("***********************************")
print("(1) Forca (2) Adivinhação")
jogo = int(input("Qual o jogo?"))
if(jogo == 1):
print("Jogando forca!!")
forca.jogar_forca()
elif(jogo == 2):
print("Jogando adivinhação!!")
adivinhacaovFinal.jogar_adivinhacao()
|
18,047 | a2c1f24c84acbbb93d0031e664273cb951a45a0c | from django.shortcuts import render, redirect
from django.http import HttpResponseRedirect
from .models import *
from .forms import *
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required, user_passes_test
@login_required(login_url='/login/')
def view_company_list(request):
lists = ProductCompany.objects.all()
return render(request, 'company_list.html', {'lists': lists, })
@login_required(login_url='/login/')
@user_passes_test(lambda u:u.is_superuser, login_url='/login/')
def add_company_view(request):
if request.method == "POST":
company_form = AddCompanyForm(request.POST)
if company_form.is_valid():
add_company = company_form.save(commit=False)
add_company.save()
return redirect('/company_list/')
else:
company_form = AddCompanyForm()
return render(request, 'add_company.html', {'form': company_form})
@login_required(login_url='/login/')
def view_group_list(request):
lists = ProductGroup.objects.all()
return render(request, 'group_list.html', {'lists': lists, })
@login_required(login_url='/login/')
@user_passes_test(lambda u:u.is_superuser, login_url='/login/')
def add_group_view(request):
if request.method == "POST":
group_form = AddGroupForm(request.POST)
if group_form.is_valid():
group_form.save()
return redirect('/group_list/')
else:
group_form = AddGroupForm()
return render(request, 'add_group.html', {'form': group_form})
@login_required(login_url='/login/')
@user_passes_test(lambda u: u.is_superuser, login_url='/login/')
def add_product_view(request):
company_name = ProductCompany.objects.all()
group_name = ProductGroup.objects.all()
if request.method == "POST":
product_form = AddProductForm(request.POST)
if product_form.is_valid():
product_form.save()
return redirect('/product_list/')
else:
messages.error(request, 'All fields required')
else:
product_form = AddProductForm()
return render(request, 'add_product.html',
{'form': product_form, 'company_name': company_name, 'group_name': group_name})
@login_required(login_url='/login/')
def view_product_list(request):
# list = get_object_or_404()
lists = Product.objects.all()
return render(request, 'product_list.html', {'lists': lists, })
@login_required(login_url='/login/')
@user_passes_test(lambda u:u.is_superuser, login_url='/login/')
def add_supplier_view(request):
if request.method == "POST":
supplier_form = AddSupplierForm(request.POST)
if supplier_form.is_valid():
supplier_form.save()
return redirect('/supplier_list/')
else:
messages.error(request, 'All fields required')
else:
supplier_form = AddSupplierForm()
return render(request, 'add_supplier.html', {'form': supplier_form})
@login_required(login_url='/login/')
def view_supplier_list(request):
lists = Supplier.objects.all()
return render(request, 'supplier_list.html', {'lists': lists, })
@login_required(login_url='/login/')
def stock_in_view(request):
product_info = Product.objects.all()
supplier_info = Supplier.objects.all()
if request.method == "POST":
stock_in_form = AddStockInForm(request.POST)
if stock_in_form.is_valid():
stock_in_form.stockin_date = timezone.now()
p_name = stock_in_form.cleaned_data['product_info']
p_price = stock_in_form.cleaned_data['product_price']
p_unit = stock_in_form.cleaned_data['product_unit']
try:
ob = Stocks.objects.get(product_name=p_name)
p_unit = p_unit + ob.product_unit
Stocks.objects.filter(product_name=p_name).update(product_price=p_price, product_unit=p_unit)
except Stocks.DoesNotExist:
ob = Stocks(product_name=p_name, product_price=p_price, product_unit=p_unit)
ob.save()
stock_in_form.save()
return redirect('/stock_in_list/')
else:
messages.error(request, 'All fields required')
else:
stock_in_form = AddStockInForm()
return render(request, 'stock_in.html', {'form': stock_in_form, 'prduct_info':product_info, 'supplier_info':supplier_info})
@login_required(login_url='/login/')
def stock_in_list(request):
lists = StockIn.objects.all()
return render(request, 'stock_in_list.html', {'lists': lists, })
@login_required(login_url='/login/')
def stock_out_view(request):
product_info = Stocks.objects.all()
if request.method=="POST":
stock_out_form = AddStockOutForm(request.POST)
if stock_out_form.is_valid():
stock_out_form.stockout_date = timezone.now()
p_name = stock_out_form.cleaned_data['product_info']
p_price = stock_out_form.cleaned_data['product_sell_price']
p_unit = stock_out_form.cleaned_data['product_sell_unit']
try:
ob = Stocks.objects.get(product_name=p_name)
p_unit = ob.product_unit - p_unit
Stocks.objects.filter(product_name=p_name).update(product_price=ob.product_price, product_unit=p_unit)
except Stocks.DoesNotExist:
print('Something error!')
stock_out_form.save()
return redirect('/stock_out_list/')
else:
messages.error(request, 'All fields required')
else:
stock_out_form = AddStockOutForm()
return render(request, 'stock_out.html',{'form': stock_out_form, 'product_info': product_info})
@login_required(login_url='/login/')
def stock_out_list(request):
lists = StockOut.objects.all()
return render(request, 'stock_out_list.html', {'lists': lists, })
@login_required(login_url='/login/')
def stock_view(request):
lists = Stocks.objects.all()
return render(request, 'stocks.html', {'lists': lists})
def mod_login_view(request):
title = "Login"
if request.method == 'POST':
form = ModLoginForm(request.POST)
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username, password=password)
if user:
if user.is_active:
login(request, user)
return HttpResponseRedirect('/')
else:
return HttpResponseRedirect('/login/')
else:
messages.error(request, "Username or password invalid")
else:
form = ModLoginForm()
return render(request, 'login.html', {'form': form, 'title': title})
@login_required(login_url='/login/')
def mod_logout_view(request):
logout(request)
return HttpResponseRedirect('/login/') |
18,048 | d635151dedb0df9989aa4d4d08a50c1966007190 |
from abc import ABC, abstractmethod
class Strategy(ABC):
def __init__(self):
self.data = None
@abstractmethod
def process_next_candle(self):
pass
def set_data(self, data):
self.data = data
def get_data(self):
return self.data
|
18,049 | f8901ba794e643e8be58c2e38a96744e25608c44 | #!/usr/bin/env python2
import argparse
import fsutils
# Set up command line argument parsing
parser = argparse.ArgumentParser()
# Require a directory to be specified
parser.add_argument("directory", nargs='*')
parser.add_argument("-p", "--parents",
action='store_true',
default=False,
help="create parent directories if they don't exist")
# Parse arguments
args = parser.parse_args()
fsutils.create_dirs(args.parents, args.directory)
|
18,050 | 6e2b96102ad65c99163c4e34f774b1f0d6c730fb | print 'loading modules'
import pickle
import argparse
import numpy as np
import pandas as pd
import time
import os
import scipy.optimize as spo
from sklearn.metrics import roc_auc_score
from sklearn.metrics import silhouette_score
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.cluster import KMeans
# parse them args
parser = argparse.ArgumentParser()
parser.add_argument('-b', '--brick',help='which brick n', default=1)
parser.add_argument('-t', '--train', help='training or test set', action='store_true', default=False)
args = parser.parse_args()
class electron:
def __init__(self, x1, y1, z1, tx, ty):
self.x1 = x1
self.y1 = y1
self.z1 = z1
self.tx = tx
self.ty = ty
def get_x_pos(self, z):
x_pos = self.x1 + z*np.sin(self.tx)
return x_pos
def get_y_pos(self, z):
y_pos = self.y1 + z*np.sin(self.ty)
return y_pos
def get_distance_from(self,x,y,z):
dx = x - self.get_x_pos(z)
dy = y - self.get_y_pos(z)
return (dx**2 + dy**2)**0.5
def get_dTX(self, tx):
return tx - self.tx
def get_dTY(self, ty):
return ty - self.ty
def linear(x, intercept, slope):
return intercept + x*slope
def write_out(output):
# write out the results
if args.train:
output.to_csv('/data/atlas/atlasdata/zgubic/MLHEP17/ElecPathAdded/DS_2_train_brick'+str(args.brick)+'_ElecPathAdded.csv')
else:
output.to_csv('/data/atlas/atlasdata/zgubic/MLHEP17/ElecPathAdded/DS_2_test_brick'+str(args.brick)+'_ElecPathAdded.csv')
exit()
###############################
print 'loading the datasets'
###############################
# import the training set and choose what the test set is
if args.train:
D_train = pd.read_hdf('/data/atlas/atlasdata/zgubic/MLHEP17/DS_2_train_extended_float32.hdf')
D_test = D_train[D_train.brick_number == np.int8(args.brick)]
else:
D_test = pd.read_hdf('/data/atlas/atlasdata/zgubic/MLHEP17/DS_2_test_extended_float32.hdf')
D_test = D_test[D_test.brick_number == np.int8(args.brick)]
if args.train:
X_test = D_test.drop('index', axis=1).drop('event_id', axis=1).drop('signal', axis=1).drop('brick_number', axis=1)
else:
X_test = D_test.drop('index', axis=1).drop('brick_number', axis=1)
##############################
print 'determine the number of showers'
###############################
filename = 'level1_model.pkl'
model = pickle.load(open(filename, 'rb'))
pred_test = model.predict_proba(X_test)[:,1]
# predict tracks belonging to the shower:
D_test['prediction'] = pred_test
showers = D_test[D_test.prediction > 0.30]
# in case of no showers, save these values
print 'showers.shape', showers.shape
if showers.shape[0] < 2:
print 'outputting early'
output = D_test[['index', 'brick_number']]
output['e_dR'] = 40000*np.ones(len(D_test['X']))
output['e_dT'] = 0.5*np.ones(len(D_test['X']))
output['e_dZ'] = 30000*np.ones(len(D_test['X']))
if args.train:
output['signal'] = D_test['signal']
write_out(output)
exit()
# now guess the number of showers
shower_coords = showers[['X', 'Y']]
shower_pred = list(range(5))
shower_pred[0] = np.zeros(showers.shape[0])
sil_scores = np.array(range(5), dtype=float)
# compute the kmeans and save the silhouette score
max_clusters = min(6, showers.shape[0])
for n_clus in range(2, max_clusters):
print 'n_clust', n_clus
kmeans = KMeans(n_clusters=n_clus)
shower_pred[n_clus-1] = kmeans.fit_predict(shower_coords)
silhouette_avg = silhouette_score(shower_coords, shower_pred[n_clus-1])
sil_scores[n_clus-1] = silhouette_avg
print "Silhouette score avg is:", sil_scores[n_clus-1]
# determine the number of clusters
pred_n_showers = np.argmax(sil_scores)+1
x_sd = np.std(showers['X'])
y_sd = np.std(showers['Y'])
sdev = np.sqrt(x_sd**2+y_sd**2)
# correct for very small number of tracks:
if showers.shape[0] < 80:
pred_n_showers = 1
# correct for the 2/1 cluster case
if pred_n_showers == 2 and sdev < 4500:
pred_n_showers = 1
# let us know the outcome
print
print sil_scores
print 'x_sd', x_sd
print 'y_sd', y_sd
print 'standard deviation is:', sdev
print
print '###############################'
print 'Predicted number of clusters is:', pred_n_showers
print '###############################'
shower_coords['shower_id'] = shower_pred[pred_n_showers-1]
shower_coords['Z'] = showers['Z']
###############################
print 'fit the electron tracks'
###############################
electrons = []
for sh_i in range(pred_n_showers):
print 'computing shower', sh_i
# define and plot the shower
current_shower = shower_coords[shower_coords.shower_id == sh_i]
print 'showers', shower_coords
print 'current shower', current_shower
# fit a line for electron position
#print 'Z'
print current_shower['Z']
#print 'Y'
print current_shower['Y']
popt, pcov = spo.curve_fit(linear, current_shower['Z'], current_shower['Y'], p0=[np.mean(current_shower['Y']), 0])
y0 = popt[0]
yk = popt[1]
popt, pcov = spo.curve_fit(linear, current_shower['Z'], current_shower['X'], p0=[np.mean(current_shower['X']), 0])
x0 = popt[0]
xk = popt[1]
zmin = np.min(current_shower['Z'])
electrons.append(electron(x0, y0, zmin, xk, yk))
###########################
print 'Now take all the tracks in the set and compute the distance to the electron path'
###########################
tracks_xs = D_test['X']
tracks_ys = D_test['Y']
tracks_zs = D_test['Z']
# compute the distance of each track to each electron
distances = np.zeros(shape=(len(tracks_xs), len(electrons)))
for i, elec in enumerate(electrons):
distances[:,i] = elec.get_distance_from(tracks_xs, tracks_ys, tracks_zs)
print 'distances', distances
# get the index of the closest electron for each track
closest_e = distances.argsort()[:, 0]
print closest_e
# compute the dTheta from the closest electorn track, and dZ (distance along Z from the start of the shower)
dR = np.zeros(len(tracks_xs))
dT = np.zeros(len(tracks_xs))
dZ = np.zeros(len(tracks_xs))
for tr_i in range(len(D_test)):
e_i = closest_e[tr_i]
dTX = electrons[e_i].get_dTX(D_test.iloc[tr_i]['TX'])
dTY = electrons[e_i].get_dTY(D_test.iloc[tr_i]['TY'])
dT[tr_i] = np.sqrt(dTX**2 + dTY**2)
dZ[tr_i] = electrons[e_i].z1 - D_test.iloc[tr_i]['Z']
# and save the distance to the closest electron track
dR = np.min(distances, axis=1)
###########################
print 'and finally add to the dataset'
###########################
output = D_test[['index', 'brick_number']]
output['e_dR'] = dR
output['e_dT'] = dT
output['e_dZ'] = dZ
if args.train:
output['signal'] = D_test['signal']
write_out(output)
|
18,051 | 2960b3af996a9514af13a99f693308da01168e4e | from utils import euler_utils
import math
import sympy
def is_sqube(num):
factors = euler_utils.get_factors(num)
if len(factors()) != 2:
return False
a = factors()[0]
b = factors()[1]
if (math.pow(a,2) * math.pow(b,3) == num) or (math.pow(b,2) * math.pow(a,3) == num):
return True
return False
def is_prime_proof(num):
ll = euler_utils.get_digits(num)
for i in xrange(0, len(ll)):
x = ll[i]
for y in xrange(0,10):
if y == x:
continue
ll[i] = y
new_num = euler_utils.create_num_from_list(ll)
if sympy.ntheory.isprime(new_num):
return False
ll[i] = x
return True
def contains_200(num):
if str(200) in str(num):
return True
return False
found = 0
num = 1
while found < 200:
if contains_200(num):
if is_sqube(num):
if is_prime_proof(num):
print num
found += 1
num += 1
|
18,052 | 0f4eec7eb81b528559980a24227ed70e17e83f7d | # You are given a DNA sequence: a string consisting of characters A, C, G, and T.
# Your task is to find the longest repetition in the sequence.
# This is a maximum-length substring containing only one type of character.
# https://cses.fi/problemset/task/1069/
string= input()+" "
c=1
max=1
for i in range(len(string)-1):
if string[i]==string[i+1]:
c+=1
else:
if c>max:
max=c
c=1
print(max)
# author : PrernaBabber
# https://cses.fi/user/32267 |
18,053 | b0cb0f3a0aa7e0ed29c3e8a48103f10d4386c487 | import land.land
static import global allegro5.allegro_color
static import global allegro5.allegro_native_dialog if !defined(ANDROID)
def platform_color_hsv(float hue, sat, val) -> LandColor:
LandColor c
al_color_hsv_to_rgb(hue, sat, val, &c.r, &c.g, &c.b)
c.a = 1
return c
def platform_color_name(char const *name) -> LandColor:
LandColor c
if land_equals(name, "sequoia"): return land_color_name("#905060")
if land_equals(name, "banana"): return land_color_name("#ffeb00")
if land_equals(name, "citron"): return land_color_name("#d9f51f")
# if there's spaces ignore them
int i = 0
while name[i]:
if name[i] == ' ':
# "light blue" [i = 5]
char name2[strlen(name)]
strncpy(name2, name, i) # "light"
int j = i
i++ # "light" [i = 6, J = 5]
while name[i]:
if name[i] == ' ':
i++
continue
name2[j++] = name[i++]
name2[j] = 0
al_color_name_to_rgb(name2, &c.r, &c.g, &c.b)
c.a = 1
return c
i++
al_color_name_to_rgb(name, &c.r, &c.g, &c.b)
c.a = 1
return c
def platform_popup(str title, str text):
*** "if" defined(ANDROID) or defined(NO_NATIVE_DIALOG)
*** "else"
al_show_native_message_box(al_get_current_display(), title,
title, text, None, ALLEGRO_MESSAGEBOX_ERROR)
*** "endif"
def platform_get(str what) -> char*:
if land_equals(what, "opengl"):
char *s = land_strdup("")
if al_get_opengl_variant() == ALLEGRO_DESKTOP_OPENGL:
land_append(&s, "OpenGL")
if al_get_opengl_variant() == ALLEGRO_OPENGL_ES:
land_append(&s, "OpenGL ES")
uint32_t v = al_get_opengl_version()
land_append(&s," %d.%d.%d", v >> 24, (v >> 16) & 255, (v >> 8) & 255)
return s
return None
|
18,054 | 8c4e72829c51f016da6888dd5715b93f996800fa | # coding: utf-8
# flake8: noqa
"""
LogicMonitor API-Ingest Rest API
LogicMonitor is a SaaS-based performance monitoring platform that provides full visibility into complex, hybrid infrastructures, offering granular performance monitoring and actionable data and insights. API-Ingest provides the entry point in the form of public rest APIs for ingesting metrics into LogicMonitor. For using this application users have to create LMAuth token using access id and key from santaba. # noqa: E501
OpenAPI spec version: 3.0.0
"""
from __future__ import absolute_import
# import models into model package
from lmingest.models.list_rest_data_point_v1 import ListRestDataPointV1
from lmingest.models.list_rest_data_source_instance_v1 import \
ListRestDataSourceInstanceV1
from lmingest.models.map_string_string import MapStringString
from lmingest.models.push_metric_api_response import PushMetricAPIResponse
from lmingest.models.rest_data_point_v1 import RestDataPointV1
from lmingest.models.rest_data_source_instance_v1 import \
RestDataSourceInstanceV1
from lmingest.models.rest_instance_properties_v1 import RestInstancePropertiesV1
from lmingest.models.rest_metrics_v1 import RestMetricsV1
from lmingest.models.rest_resource_properties_v1 import RestResourcePropertiesV1
|
18,055 | 5ce87fa27a45977698f745d7f854a276061eeac4 | from environment.gym_tremor.envs.tremor_env import TremorEnv
|
18,056 | a31b39ec49b53793cb85c3290ee2715c91e9f967 | # _*_ coding:utf-8 _*_
print("Hello Static File!") |
18,057 | 95c6d98200bdac5ba707f1c0b6b323a54f35fa80 | import Listener
import os
import pyttsx3
import sys
import speech_recognition
voice = pyttsx3.init()
voice.setProperty('voice', voice.getProperty('voices')[1].id)
voice.setProperty('rate', voice.getProperty('rate')-35)
questions = ['What is the equation for the area of a circle?']
answers = ['Pie are squared']
voice.say('What is the equation for the area of a circle?')
answer = Listener.Listener.process(Listener.Listener.listen())
if answer == answers[0]:
voice.say("Correct")
else:
voice.say("Incorrect")
|
18,058 | 04f0d9621b024b4c63ddf70e3d1c0c9b0aa3c2c4 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 6 21:40:46 2020
@author: estanislau
"""
import cv2
import glob
print("Abrindo pasta Frames_Video/")
for i in sorted(glob.glob('Frames_Video/*.jpg')):
#print(i)
imagem = cv2.imread(i)
cv2.imshow("Apresenta Imagens", imagem)
cv2.waitKey(1000)
cv2.destroyAllWindows() |
18,059 | 0ef2d429dcebe5a6c07a0f381269abdaa7d9b1ad | import asyncio
import click
import logging
import pprint
from .session import Session
from .socket import SocketSession
_LOGGER = logging.getLogger(__name__)
@click.group(chain=True)
@click.option('-a', '--api-name', required=True, help='API name')
@click.option('-b', '--basic-auth-creds', required=True, help='API basic auth credentials')
@click.option('-u', '--username', required=True, help='API username')
@click.option('-p', '--password', required=True, help='API password')
@click.option('-v', '--verbose/--no-verbose', default=False, help='Enable verbose logging')
@click.pass_context
def smartbox(ctx, api_name, basic_auth_creds, username, password, verbose):
ctx.ensure_object(dict)
logging.basicConfig(format='%(asctime)s %(levelname)-8s [%(name)s.%(funcName)s:%(lineno)d] %(message)s',
level=logging.DEBUG if verbose else logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S')
session = Session(api_name, basic_auth_creds, username, password)
ctx.obj['session'] = session
ctx.obj['verbose'] = verbose
@smartbox.command(help='Show devices')
@click.pass_context
def devices(ctx):
session = ctx.obj['session']
devices = session.get_devices()
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(devices)
@smartbox.command(help='Show nodes')
@click.pass_context
def nodes(ctx):
session = ctx.obj['session']
devices = session.get_devices()
pp = pprint.PrettyPrinter(indent=4)
for device in devices:
print(f"{device['name']} (dev_id: {device['dev_id']})")
nodes = session.get_nodes(device['dev_id'])
pp.pprint(nodes)
@smartbox.command(help='Show node status')
@click.pass_context
def status(ctx):
session = ctx.obj['session']
devices = session.get_devices()
pp = pprint.PrettyPrinter(indent=4)
for device in devices:
print(f"{device['name']} (dev_id: {device['dev_id']})")
nodes = session.get_nodes(device['dev_id'])
for node in nodes:
print(f"{node['name']} (addr: {node['addr']})")
status = session.get_status(device['dev_id'], node)
pp.pprint(status)
@smartbox.command(help='Set node status (pass settings as extra args, e.g. mode=auto)')
@click.option('-d', '--device-id', required=True, help='Device ID for node to set status on')
@click.option('-n', '--node-addr', type=int, required=True, help='Address of node to set status on')
@click.option('--locked', type=bool)
@click.option('--mode')
@click.option('--stemp')
@click.option('--units')
# TODO: other options
@click.pass_context
def set_status(ctx, device_id, node_addr, **kwargs):
session = ctx.obj['session']
devices = session.get_devices()
device = next(d for d in devices if d['dev_id'] == device_id)
nodes = session.get_nodes(device['dev_id'])
node = next(n for n in nodes if n['addr'] == node_addr)
session.set_status(device['dev_id'], node, kwargs)
@smartbox.command(help='Show node setup')
@click.pass_context
def setup(ctx):
session = ctx.obj['session']
devices = session.get_devices()
pp = pprint.PrettyPrinter(indent=4)
for device in devices:
print(f"{device['name']} (dev_id: {device['dev_id']})")
nodes = session.get_nodes(device['dev_id'])
for node in nodes:
print(f"{node['name']} (addr: {node['addr']})")
setup = session.get_setup(device['dev_id'], node)
pp.pprint(setup)
@smartbox.command(help='Set node setup (pass settings as extra args, e.g. mode=auto)')
@click.option('-d', '--device-id', required=True, help='Device ID for node to set setup on')
@click.option('-n', '--node-addr', type=int, required=True, help='Address of node to set setup on')
@click.option('--true-radiant-enabled', type=bool)
# TODO: other options
@click.pass_context
def set_setup(ctx, device_id, node_addr, **kwargs):
session = ctx.obj['session']
devices = session.get_devices()
device = next(d for d in devices if d['dev_id'] == device_id)
nodes = session.get_nodes(device['dev_id'])
node = next(n for n in nodes if n['addr'] == node_addr)
session.set_setup(device['dev_id'], node, kwargs)
@smartbox.command(help='Show device away_status')
@click.pass_context
def device_away_status(ctx):
session = ctx.obj['session']
devices = session.get_devices()
pp = pprint.PrettyPrinter(indent=4)
for device in devices:
print(f"{device['name']} (dev_id: {device['dev_id']})")
device_away_status = session.get_device_away_status(device['dev_id'])
pp.pprint(device_away_status)
@smartbox.command(help='Set device away_status (pass settings as extra args, e.g. mode=auto)')
@click.option('-d', '--device-id', required=True, help='Device ID to set away_status on')
@click.option('--away', type=bool)
@click.option('--enabled', type=bool)
@click.option('--forced', type=bool)
@click.pass_context
def set_device_away_status(ctx, device_id, **kwargs):
session = ctx.obj['session']
devices = session.get_devices()
device = next(d for d in devices if d['dev_id'] == device_id)
session.set_device_away_status(device['dev_id'], kwargs)
@smartbox.command(help='Open socket.io connection to device.')
@click.option('-d', '--device-id', required=True, help='Device ID to open socket for')
@click.pass_context
def socket(ctx, device_id):
session = ctx.obj['session']
verbose = ctx.obj['verbose']
pp = pprint.PrettyPrinter(indent=4)
def on_dev_data(data):
_LOGGER.info("Received dev_data:")
pp.pprint(data)
def on_update(data):
_LOGGER.info("Received update:")
pp.pprint(data)
socket_session = SocketSession(session, device_id, on_dev_data, on_update, verbose, add_sigint_handler=True)
event_loop = asyncio.get_event_loop()
task = event_loop.create_task(socket_session.run())
event_loop.run_until_complete(task)
|
18,060 | 2bc226554d854e07f08b3f8e1812c0303614eed6 | ITEM: TIMESTEP
7500
ITEM: NUMBER OF ATOMS
2048
ITEM: BOX BOUNDS pp pp pp
4.6048424245750752e-01 4.6739515757536559e+01
4.6048424245750752e-01 4.6739515757536559e+01
4.6048424245750752e-01 4.6739515757536559e+01
ITEM: ATOMS id type xs ys zs
8 1 0.120176 0.065186 0.064358
35 1 0.0574224 0.12483 0.0615378
130 1 0.0585798 0.0605274 0.121905
165 1 0.123426 0.129516 0.126746
4 1 0.995199 0.0620851 0.0595221
11 1 0.315131 0.00939566 0.058151
267 1 0.315668 0.00148549 0.306201
484 1 1.0037 0.43451 0.430317
1309 1 0.874186 0.493118 0.259925
45 1 0.379094 0.130803 -0.000139393
12 1 0.247682 0.0630854 0.0627086
39 1 0.187472 0.126733 0.0704321
43 1 0.31042 0.128451 0.0625433
134 1 0.191191 0.0642932 0.129167
138 1 0.312689 0.0694603 0.121723
169 1 0.255712 0.127375 0.121503
1305 1 0.757843 0.496763 0.251475
147 1 0.56215 -0.00018733 0.187246
1437 1 0.875044 0.498908 0.380325
265 1 0.248319 -0.00238158 0.251156
139 1 0.31341 0.00580181 0.183173
16 1 0.372108 0.0690663 0.0609338
47 1 0.433396 0.127445 0.0651622
142 1 0.433755 0.0686387 0.121
173 1 0.372771 0.126051 0.124723
20 1 0.508877 0.0665381 0.0660115
177 1 0.498509 0.121588 0.123822
1169 1 0.500284 0.494719 0.125921
145 1 0.500952 0.00138308 0.12587
82 1 0.56594 0.313188 0.00508242
1541 1 0.125016 0.500066 0.500178
24 1 0.621088 0.0572068 0.0611845
51 1 0.569464 0.12393 0.0593318
146 1 0.568077 0.0635558 0.124351
181 1 0.627432 0.126568 0.126031
481 1 0.00279501 0.375161 0.371225
94 1 0.937765 0.31205 -0.00143194
141 1 0.374904 0.00236904 0.118804
512 1 0.872433 0.437667 0.435702
28 1 0.744238 0.0579916 0.0615289
55 1 0.682445 0.116757 0.0549047
59 1 0.814008 0.122643 0.0724388
150 1 0.678362 0.061187 0.120044
154 1 0.810859 0.0552407 0.129682
185 1 0.743018 0.120733 0.118373
1045 1 0.632103 0.499245 0.000996974
161 1 1.00377 0.123605 0.129138
32 1 0.869452 0.0591464 0.0654998
63 1 0.945588 0.12168 0.0558703
158 1 0.93562 0.0603408 0.123469
189 1 0.875217 0.114925 0.128455
121 1 0.745657 0.375699 0.00372177
40 1 0.126147 0.18957 0.0624821
67 1 0.0520467 0.247891 0.0573645
72 1 0.119563 0.310862 0.0648149
162 1 0.0576003 0.184419 0.122596
194 1 0.0681375 0.313666 0.132552
197 1 0.120056 0.251806 0.12535
36 1 0.999262 0.185443 0.0596358
69 1 0.121846 0.253455 -0.000727953
399 1 0.43404 0.000873861 0.433561
44 1 0.251486 0.185905 0.061424
71 1 0.184773 0.251219 0.063622
75 1 0.314403 0.252928 0.060444
76 1 0.249402 0.312566 0.0690046
166 1 0.187653 0.193898 0.123088
170 1 0.316368 0.190389 0.118331
198 1 0.186922 0.307567 0.131747
201 1 0.255196 0.248889 0.128142
202 1 0.314488 0.310757 0.128452
401 1 0.499222 0.000592529 0.371105
1031 1 0.196645 0.497211 0.0650812
129 1 -0.0052004 -0.00134889 0.126233
126 1 0.938889 0.438755 0.000547989
48 1 0.372037 0.192829 0.0581989
79 1 0.437894 0.249143 0.0550375
80 1 0.3781 0.316052 0.0593407
174 1 0.438569 0.191217 0.120018
205 1 0.382494 0.25216 0.120663
206 1 0.439894 0.306208 0.122397
52 1 0.502092 0.189941 0.0652113
209 1 0.50579 0.249247 0.121677
1431 1 0.687242 0.496596 0.441574
1435 1 0.812148 0.496642 0.44381
84 1 0.500752 0.307957 0.064616
56 1 0.630613 0.182154 0.0712924
83 1 0.567745 0.244312 0.0656675
88 1 0.624434 0.306615 0.0661638
178 1 0.564108 0.189896 0.132737
210 1 0.563267 0.319185 0.119622
213 1 0.62605 0.250509 0.127118
1409 1 0.993888 0.498121 0.373767
261 1 0.119672 0.00127526 0.249873
526 1 0.435887 0.0636471 0.503473
60 1 0.753751 0.189491 0.0642154
87 1 0.678302 0.242703 0.0590686
91 1 0.818996 0.252254 0.0636659
92 1 0.748707 0.313787 0.0611821
182 1 0.690645 0.18817 0.11828
186 1 0.803431 0.18822 0.11998
214 1 0.687889 0.31395 0.121
217 1 0.74811 0.251431 0.124994
218 1 0.814334 0.313964 0.126236
193 1 -0.00142727 0.249486 0.121553
68 1 0.997503 0.311796 0.0645054
64 1 0.884035 0.182219 0.0669705
95 1 0.937453 0.25117 0.0568632
96 1 0.879655 0.309661 0.064044
190 1 0.940965 0.181052 0.125795
221 1 0.885375 0.249014 0.129545
222 1 0.938613 0.314569 0.123736
99 1 0.0650502 0.369454 0.0625781
104 1 0.119133 0.434554 0.0646055
226 1 0.0596918 0.441593 0.119196
229 1 0.124306 0.372776 0.123648
613 1 0.124695 0.377933 0.495673
103 1 0.185571 0.373653 0.0692458
107 1 0.31731 0.371125 0.0719189
108 1 0.258922 0.434357 0.071174
230 1 0.191066 0.438857 0.127977
233 1 0.24966 0.371577 0.132226
234 1 0.32133 0.434488 0.12378
511 1 0.93833 0.376717 0.440799
1165 1 0.377694 0.498929 0.117945
15 1 0.444247 0.00704546 0.063544
111 1 0.437035 0.376176 0.0591281
112 1 0.380857 0.436973 0.0655119
237 1 0.384052 0.369288 0.121304
238 1 0.436127 0.433264 0.124244
241 1 0.503194 0.374943 0.12404
116 1 0.496543 0.436631 0.0583018
1425 1 0.49785 0.499334 0.377609
1175 1 0.688682 0.500342 0.19279
510 1 0.939635 0.431376 0.379263
115 1 0.561247 0.374036 0.0609162
120 1 0.618803 0.434596 0.0594165
242 1 0.561123 0.440302 0.122474
245 1 0.627571 0.37133 0.131858
509 1 0.869709 0.364707 0.383392
135 1 0.186402 -0.00083503 0.18719
26 1 0.806447 0.0655261 0.00555151
119 1 0.681023 0.381417 0.0599169
123 1 0.806942 0.369873 0.0671454
124 1 0.746877 0.442973 0.0746567
246 1 0.684757 0.43645 0.13274
249 1 0.741754 0.373901 0.127358
250 1 0.813473 0.432233 0.127253
225 1 0.00703884 0.370261 0.12466
100 1 0.00317662 0.434143 0.0564469
1295 1 0.43392 0.500834 0.317407
58 1 0.819081 0.194676 0.00571958
127 1 0.937165 0.381158 0.0616307
128 1 0.874737 0.43495 0.0635761
253 1 0.875296 0.380297 0.128815
254 1 0.943521 0.433582 0.123612
415 1 0.937427 0.00288283 0.435072
590 1 0.439109 0.316261 0.498743
136 1 0.131188 0.0652601 0.188629
163 1 0.0621903 0.123574 0.192249
258 1 0.0659423 0.0643161 0.247907
264 1 0.129521 0.061911 0.315914
291 1 0.0644813 0.114661 0.311622
293 1 0.130158 0.125456 0.244984
132 1 0.000550287 0.0611894 0.187381
260 1 0.990778 0.0547809 0.3098
131 1 0.0599415 0.000458219 0.186933
23 1 0.688552 -0.00499816 0.0643925
637 1 0.877135 0.366785 0.499043
140 1 0.253774 0.0639363 0.193759
167 1 0.191844 0.130807 0.1877
171 1 0.311355 0.124918 0.188477
262 1 0.191997 0.0677433 0.250057
266 1 0.317909 0.0607341 0.244342
268 1 0.255014 0.063534 0.314428
295 1 0.195564 0.12653 0.319955
297 1 0.257274 0.125941 0.254406
299 1 0.319667 0.115872 0.318053
102 1 0.189789 0.434869 0.00660576
506 1 0.801005 0.433753 0.375708
144 1 0.378091 0.0659872 0.180918
175 1 0.438332 0.12164 0.183085
270 1 0.439894 0.0701888 0.248618
272 1 0.378027 0.0568061 0.31434
301 1 0.372438 0.127922 0.253049
303 1 0.435941 0.122375 0.312873
305 1 0.501208 0.129597 0.25033
148 1 0.50202 0.059614 0.184277
407 1 0.683228 0.0039288 0.437385
276 1 0.500475 0.0635598 0.309212
152 1 0.623837 0.0656128 0.184523
179 1 0.556372 0.122042 0.192024
274 1 0.56544 0.0616955 0.248483
280 1 0.627671 0.0546048 0.307474
307 1 0.565608 0.131459 0.308515
309 1 0.623098 0.116742 0.246335
105 1 0.256434 0.371511 0.00327181
581 1 0.121759 0.255838 0.500456
156 1 0.744548 0.0592465 0.182796
183 1 0.691715 0.128228 0.184194
187 1 0.809784 0.115493 0.183722
278 1 0.678499 0.0593244 0.242105
282 1 0.809569 0.0512448 0.24615
284 1 0.748405 0.0587175 0.305562
311 1 0.68435 0.123019 0.304323
313 1 0.749207 0.125236 0.248733
315 1 0.814049 0.124177 0.304182
1041 1 0.500411 0.497876 0.00356107
70 1 0.184338 0.311371 0.00081722
289 1 0.998524 0.117425 0.258567
160 1 0.876281 0.05309 0.184436
191 1 0.932476 0.123503 0.188442
286 1 0.938788 0.0551871 0.24426
288 1 0.867406 0.0469835 0.313994
317 1 0.870563 0.107731 0.247029
319 1 0.926972 0.11969 0.313073
168 1 0.130624 0.188576 0.185326
195 1 0.0655498 0.247381 0.187367
200 1 0.128928 0.319193 0.198048
290 1 0.0651274 0.183196 0.252567
296 1 0.121715 0.188738 0.319901
322 1 0.0634569 0.305114 0.253668
323 1 0.0606269 0.243131 0.314033
325 1 0.126889 0.241858 0.248606
328 1 0.123158 0.311615 0.307427
196 1 0.993854 0.314278 0.191631
172 1 0.256205 0.187138 0.184687
199 1 0.191868 0.243359 0.189088
203 1 0.31849 0.250565 0.188428
204 1 0.248239 0.309479 0.192954
294 1 0.187914 0.186208 0.258255
298 1 0.312978 0.190416 0.25728
300 1 0.251622 0.190257 0.3135
326 1 0.189648 0.318819 0.251496
327 1 0.187324 0.255408 0.307022
329 1 0.250796 0.253257 0.254184
330 1 0.313715 0.310676 0.2496
331 1 0.311409 0.248492 0.31552
332 1 0.245086 0.312159 0.318122
176 1 0.36721 0.182067 0.183209
207 1 0.429356 0.240838 0.190339
208 1 0.376174 0.310969 0.185189
302 1 0.43781 0.182439 0.256793
304 1 0.378859 0.188178 0.316714
333 1 0.373659 0.246087 0.255594
334 1 0.440736 0.310703 0.256521
335 1 0.438193 0.24941 0.307502
336 1 0.373691 0.314634 0.314695
308 1 0.505352 0.195064 0.310164
180 1 0.497356 0.18829 0.186167
212 1 0.505824 0.322436 0.185991
337 1 0.504831 0.250991 0.246516
340 1 0.504933 0.305864 0.313747
184 1 0.627279 0.187873 0.183727
211 1 0.56311 0.25171 0.187636
216 1 0.623669 0.307442 0.184128
306 1 0.566981 0.188095 0.251058
312 1 0.625273 0.188309 0.315022
338 1 0.563668 0.315895 0.25169
339 1 0.565281 0.247427 0.314615
341 1 0.61959 0.249619 0.248067
344 1 0.621866 0.311329 0.314948
188 1 0.74809 0.185205 0.182026
215 1 0.687912 0.253765 0.187749
219 1 0.813103 0.256681 0.18264
220 1 0.749037 0.308581 0.188963
310 1 0.690359 0.195427 0.246236
314 1 0.807229 0.181583 0.23671
316 1 0.748295 0.189071 0.31394
342 1 0.686945 0.305306 0.252918
343 1 0.685584 0.244774 0.314495
345 1 0.755821 0.247227 0.246699
346 1 0.821067 0.310666 0.26515
347 1 0.813867 0.237098 0.303663
348 1 0.747082 0.307663 0.315605
324 1 0.99688 0.303303 0.314081
321 1 0.0035418 0.244308 0.250024
164 1 0.00660515 0.185231 0.19081
292 1 0.00385726 0.183847 0.31353
192 1 0.874625 0.18155 0.180627
223 1 0.944087 0.236275 0.187051
224 1 0.876367 0.305132 0.188971
318 1 0.942698 0.18344 0.25111
320 1 0.869203 0.182764 0.324565
349 1 0.879753 0.237151 0.25387
350 1 0.936416 0.299752 0.258568
351 1 0.93707 0.235734 0.314163
352 1 0.878308 0.298342 0.319926
227 1 0.0636929 0.375284 0.19153
232 1 0.128546 0.43687 0.18345
354 1 0.0624186 0.437514 0.254282
355 1 0.0587458 0.37252 0.313504
357 1 0.127542 0.381246 0.254751
360 1 0.117358 0.438288 0.310246
353 1 0.997329 0.366781 0.255183
356 1 0.996295 0.444213 0.312976
1173 1 0.620838 0.495512 0.125175
554 1 0.314499 0.183494 0.503528
3 1 0.066135 0.00253878 0.0588499
508 1 0.750252 0.431205 0.440438
259 1 0.0654713 0.00915166 0.312172
231 1 0.190291 0.376188 0.190451
235 1 0.313408 0.380831 0.183806
236 1 0.25371 0.435988 0.189343
358 1 0.18984 0.44424 0.240073
359 1 0.187633 0.378138 0.310967
361 1 0.251953 0.375281 0.254597
362 1 0.314276 0.442344 0.252024
363 1 0.30852 0.379 0.309651
364 1 0.245984 0.443536 0.305794
239 1 0.435327 0.372138 0.189005
240 1 0.375895 0.437017 0.188253
365 1 0.375252 0.376608 0.252987
366 1 0.437771 0.43598 0.252708
367 1 0.441539 0.378269 0.309944
368 1 0.376792 0.436789 0.310262
244 1 0.499658 0.434416 0.185349
372 1 0.501253 0.440049 0.321308
369 1 0.504081 0.38277 0.251544
503 1 0.690842 0.375931 0.443838
502 1 0.691942 0.433052 0.37237
505 1 0.75228 0.362801 0.376808
389 1 0.123096 0.0110621 0.382354
243 1 0.567003 0.378885 0.188291
248 1 0.622049 0.440565 0.193569
370 1 0.564949 0.436848 0.259195
371 1 0.564137 0.377323 0.320443
373 1 0.624435 0.37802 0.244902
376 1 0.623192 0.442261 0.320988
507 1 0.813134 0.369123 0.437065
137 1 0.253975 0.000248422 0.119726
610 1 0.0674548 0.437195 0.495637
1285 1 0.125811 0.498082 0.248496
247 1 0.688326 0.369591 0.194665
251 1 0.810807 0.364599 0.188774
252 1 0.760992 0.432595 0.192237
374 1 0.688618 0.440866 0.25625
375 1 0.690347 0.376252 0.310717
377 1 0.757679 0.368338 0.255812
378 1 0.818821 0.42556 0.25217
379 1 0.811589 0.368141 0.320467
380 1 0.755444 0.43571 0.305958
497 1 0.498208 0.383335 0.38037
1049 1 0.749792 0.499601 0.00115869
498 1 0.56135 0.443721 0.384452
228 1 1.0011 0.435498 0.194717
255 1 0.935813 0.371711 0.186927
256 1 0.879165 0.438437 0.187767
381 1 0.88352 0.366277 0.24962
382 1 0.935166 0.438541 0.251518
383 1 0.932606 0.372815 0.319954
384 1 0.87185 0.427487 0.32557
1287 1 0.183169 0.493146 0.309908
386 1 0.0580869 0.0641634 0.3792
392 1 0.130954 0.0661788 0.442473
419 1 0.0647284 0.126573 0.438269
421 1 0.129139 0.120966 0.380726
1293 1 0.382532 0.493364 0.249621
143 1 0.435726 0.0102011 0.180867
500 1 0.492286 0.442912 0.439022
561 1 0.507026 0.118413 0.498356
390 1 0.193434 0.0615976 0.373955
394 1 0.31716 0.0583292 0.375644
396 1 0.248206 0.0640733 0.442212
423 1 0.189118 0.125388 0.439543
425 1 0.2516 0.130189 0.381566
427 1 0.320422 0.119069 0.437458
1177 1 0.757807 0.503773 0.129021
409 1 0.745771 0.00189959 0.379661
1299 1 0.556111 0.505216 0.315008
398 1 0.4375 0.059383 0.372109
400 1 0.375298 0.0577852 0.434701
429 1 0.383618 0.126271 0.377772
431 1 0.439685 0.121332 0.439159
404 1 0.49435 0.0622853 0.43444
433 1 0.495851 0.126024 0.375416
499 1 0.568108 0.372155 0.43821
402 1 0.562344 0.0561471 0.373154
408 1 0.622972 0.0621953 0.428193
435 1 0.561861 0.1173 0.438422
437 1 0.619524 0.121885 0.374508
125 1 0.877269 0.370893 0.00540028
1181 1 0.874207 0.503061 0.122015
602 1 0.810524 0.310173 0.494672
97 1 0.00140952 0.369541 0.00123053
1155 1 0.0574438 0.507124 0.184985
406 1 0.684455 0.0695859 0.369186
410 1 0.810367 0.0587661 0.373094
412 1 0.745886 0.0617328 0.437891
439 1 0.684034 0.131817 0.444791
441 1 0.750271 0.119625 0.36237
443 1 0.805805 0.128236 0.436715
549 1 0.127124 0.129212 0.502272
387 1 0.0612362 0.0003292 0.440493
388 1 0.0011208 0.0587679 0.442019
417 1 0.992506 0.123579 0.373793
414 1 0.934519 0.0566521 0.368639
416 1 0.870861 0.0592937 0.436296
445 1 0.867867 0.120406 0.381159
447 1 0.936576 0.120424 0.440254
1163 1 0.309248 0.490266 0.18507
133 1 0.128002 0.00995495 0.125789
418 1 0.0574915 0.184886 0.380249
424 1 0.12456 0.188028 0.439667
450 1 0.062821 0.311432 0.377155
451 1 0.0631618 0.251007 0.439146
453 1 0.11958 0.251581 0.376549
456 1 0.1267 0.306413 0.434476
7 1 0.185611 0.00595747 0.0623838
422 1 0.186631 0.192039 0.369693
426 1 0.316511 0.188026 0.379826
428 1 0.25635 0.194564 0.442449
454 1 0.18917 0.313069 0.372619
455 1 0.185037 0.251731 0.43266
457 1 0.249874 0.251691 0.37705
458 1 0.311687 0.312657 0.372169
459 1 0.319582 0.249791 0.444791
460 1 0.252634 0.309973 0.438785
101 1 0.130872 0.377807 0.00297648
385 1 -0.00138079 0.00180797 0.381025
1161 1 0.257028 0.497057 0.124135
430 1 0.442929 0.187143 0.369542
432 1 0.374722 0.188752 0.437418
461 1 0.382154 0.25171 0.378974
462 1 0.443838 0.316159 0.369464
463 1 0.438425 0.259712 0.436141
464 1 0.374077 0.320077 0.433128
436 1 0.493848 0.186298 0.440838
468 1 0.503902 0.311206 0.432372
593 1 0.496053 0.248565 0.500664
465 1 0.505721 0.247466 0.370475
434 1 0.561712 0.186249 0.388495
469 1 0.62746 0.249026 0.37687
472 1 0.621589 0.308081 0.437172
466 1 0.563632 0.316242 0.373584
440 1 0.623466 0.184346 0.444143
467 1 0.559295 0.249314 0.440802
496 1 0.373585 0.438604 0.431922
473 1 0.747002 0.246586 0.37703
474 1 0.813528 0.297617 0.381052
444 1 0.739871 0.191395 0.442367
470 1 0.68299 0.308973 0.369229
475 1 0.817313 0.242991 0.443143
471 1 0.683175 0.248272 0.439435
442 1 0.808106 0.18424 0.381361
476 1 0.74922 0.304519 0.436317
438 1 0.684346 0.186052 0.378353
495 1 0.433947 0.379892 0.430071
110 1 0.432707 0.433547 0.00426127
478 1 0.934792 0.312356 0.376906
446 1 0.927347 0.179537 0.385424
448 1 0.871403 0.175157 0.447843
452 1 1.00149 0.308514 0.426252
420 1 0.00028631 0.182496 0.444401
480 1 0.872796 0.308127 0.436461
477 1 0.872279 0.241867 0.379995
479 1 0.937989 0.24243 0.436029
449 1 0.998665 0.240746 0.379013
494 1 0.437505 0.444389 0.371095
483 1 0.0599854 0.371768 0.431778
482 1 0.0574943 0.437614 0.373292
633 1 0.750719 0.368878 0.500635
485 1 0.126452 0.372626 0.368565
488 1 0.12455 0.438573 0.432922
626 1 0.558664 0.440834 0.50084
1307 1 0.813465 0.499068 0.322824
501 1 0.629282 0.371763 0.3813
504 1 0.631178 0.431505 0.443311
487 1 0.185816 0.374084 0.438044
491 1 0.311842 0.370055 0.435481
490 1 0.309667 0.435031 0.370224
489 1 0.248112 0.38058 0.372136
486 1 0.185448 0.441586 0.376822
492 1 0.251508 0.438724 0.444648
493 1 0.373848 0.378002 0.368486
1027 1 0.0613047 0.497921 0.0589171
1051 1 0.812337 0.49582 0.0639791
1039 1 0.443574 0.502633 0.0661325
1055 1 0.933295 0.500993 0.0641534
614 1 0.198698 0.42668 0.503227
1035 1 0.317379 0.498887 0.0587003
541 1 0.87182 -0.000621002 0.49557
1427 1 0.560852 0.500907 0.443352
1167 1 0.437224 0.498341 0.189376
31 1 0.933294 -0.00119458 0.0526487
86 1 0.684299 0.316493 0.00345846
594 1 0.566891 0.320328 0.503801
273 1 0.504964 0.00212352 0.247809
1413 1 0.128434 0.505808 0.370006
577 1 4.08117e-05 0.248359 0.496608
609 1 0.00233759 0.363905 0.488668
634 1 0.825304 0.436673 0.499594
538 1 0.809064 0.0608729 0.497663
530 1 0.560021 0.0571797 0.49212
38 1 0.182356 0.189844 -0.00133514
61 1 0.873864 0.12296 0.0061955
625 1 0.503153 0.3743 0.496453
638 1 0.936242 0.439677 0.501122
395 1 0.3124 0.00121529 0.439363
159 1 0.938945 -0.00162691 0.179912
1183 1 0.944629 0.494713 0.192791
1419 1 0.316465 0.495356 0.441242
27 1 0.81009 -0.000707424 0.0569388
277 1 0.62414 0.00149238 0.246968
393 1 0.254654 0.00528414 0.380447
1297 1 0.499046 0.501866 0.257503
553 1 0.251228 0.132364 0.497555
605 1 0.875471 0.250848 0.501592
74 1 0.317083 0.313765 -0.00121361
585 1 0.252683 0.254417 0.497803
2 1 0.0581173 0.0658204 4.98269e-05
122 1 0.810592 0.428287 0.00616676
534 1 0.676258 0.0588638 0.497336
118 1 0.683981 0.438397 -0.0022236
78 1 0.443808 0.311007 0.00377754
598 1 0.688098 0.305666 0.496729
606 1 0.940396 0.303169 0.493933
89 1 0.751427 0.251388 0.00398504
50 1 0.567446 0.188138 0.00511524
1037 1 0.387181 0.500201 0.0058091
106 1 0.313657 0.430351 0.00734541
1557 1 0.624935 0.497417 0.49798
520 1 0.127564 0.0607236 0.561713
547 1 0.0633185 0.122967 0.565615
642 1 0.0637346 0.0615657 0.625004
677 1 0.127038 0.122803 0.622873
516 1 0.00565265 0.0618431 0.56688
673 1 0.00123444 0.123183 0.62537
522 1 0.309153 0.0571375 0.502825
909 1 0.372161 0.00656379 0.875774
597 1 0.618296 0.252887 0.497937
524 1 0.258687 0.0669078 0.564066
551 1 0.195535 0.122629 0.568525
555 1 0.318204 0.125433 0.55906
646 1 0.189618 0.0671703 0.617095
650 1 0.308975 0.0712036 0.635041
681 1 0.245327 0.12659 0.634505
1947 1 0.814399 0.497886 0.939193
643 1 0.0586674 0.00294577 0.690221
913 1 0.510578 0.00734237 0.873422
1053 1 0.868551 0.493689 0.998042
641 1 0.99368 0.00633662 0.628817
528 1 0.368819 0.0582442 0.560572
559 1 0.433475 0.123093 0.562432
654 1 0.435326 0.0628442 0.628561
685 1 0.375413 0.123063 0.630689
1949 1 0.877023 0.494204 0.875705
647 1 0.186192 0.000451196 0.688767
532 1 0.495643 0.0667098 0.558633
689 1 0.495527 0.123674 0.627611
536 1 0.620388 0.0614547 0.56403
563 1 0.561902 0.122533 0.566562
658 1 0.55855 0.0627066 0.62452
693 1 0.623443 0.120973 0.624405
1951 1 0.933847 0.500786 0.938724
1693 1 0.876753 0.498414 0.63044
1024 1 0.874611 0.430619 0.93305
540 1 0.748879 0.0563948 0.562895
567 1 0.684209 0.115946 0.559222
571 1 0.813832 0.113183 0.56127
662 1 0.683419 0.0613422 0.632983
666 1 0.808591 0.0651276 0.637972
697 1 0.746138 0.121313 0.61573
618 1 0.319911 0.435482 0.499535
1023 1 0.940968 0.378779 0.93727
1022 1 0.940684 0.443168 0.876026
1809 1 0.499028 0.506332 0.742996
544 1 0.875257 0.054635 0.561542
575 1 0.936522 0.11831 0.562183
670 1 0.928745 0.0686138 0.623502
701 1 0.869095 0.125104 0.630777
582 1 0.184766 0.307876 0.497671
1021 1 0.875542 0.375607 0.872815
552 1 0.119508 0.190288 0.563073
579 1 0.0587516 0.243749 0.562514
584 1 0.124465 0.319577 0.560802
674 1 0.0627838 0.183218 0.629205
706 1 0.0588103 0.308792 0.623113
709 1 0.125669 0.257653 0.621882
580 1 1.00071 0.309827 0.558839
705 1 -0.000662339 0.247155 0.626043
548 1 0.000999386 0.181347 0.567774
573 1 0.872071 0.113223 0.504696
518 1 0.192328 0.0680869 0.503492
558 1 0.434841 0.181401 0.498004
1933 1 0.382365 0.496182 0.873446
556 1 0.256247 0.184472 0.563238
583 1 0.180259 0.246342 0.557801
587 1 0.314993 0.241628 0.562318
588 1 0.253847 0.309602 0.562562
678 1 0.185986 0.198798 0.630338
682 1 0.316986 0.184694 0.622601
710 1 0.186831 0.315204 0.615669
713 1 0.25618 0.249678 0.621813
714 1 0.30952 0.308974 0.623124
560 1 0.38107 0.184416 0.562944
591 1 0.443567 0.255686 0.567955
592 1 0.382333 0.312606 0.560895
686 1 0.433909 0.187568 0.628922
717 1 0.375626 0.254048 0.624499
718 1 0.438484 0.320475 0.63417
621 1 0.377411 0.371591 0.498486
915 1 0.557126 -0.00278044 0.930837
17 1 0.500479 0.00858648 0.996975
535 1 0.689545 -0.00238625 0.556625
721 1 0.513675 0.249467 0.624813
564 1 0.501944 0.185935 0.555336
596 1 0.506434 0.315337 0.560226
568 1 0.626389 0.17943 0.565482
595 1 0.561614 0.256206 0.557363
600 1 0.628457 0.314446 0.560198
690 1 0.565997 0.18138 0.625327
722 1 0.566375 0.321116 0.619199
725 1 0.619415 0.24787 0.621482
562 1 0.558346 0.191965 0.498488
589 1 0.382871 0.251163 0.508588
572 1 0.740292 0.18549 0.560561
599 1 0.68359 0.255396 0.5673
603 1 0.808439 0.253346 0.570795
604 1 0.754062 0.31109 0.564842
694 1 0.686617 0.185928 0.62961
698 1 0.80637 0.185902 0.620648
726 1 0.683635 0.308074 0.630533
729 1 0.74577 0.242308 0.624866
730 1 0.813102 0.317984 0.626625
775 1 0.185823 -0.00210174 0.804554
542 1 0.936848 0.0598313 0.502039
57 1 0.746745 0.119988 1.00256
576 1 0.88008 0.181176 0.561989
607 1 0.939632 0.242792 0.562183
608 1 0.876288 0.312459 0.568327
702 1 0.938554 0.184401 0.621937
733 1 0.881836 0.250989 0.626879
734 1 0.943147 0.323572 0.620388
671 1 0.928486 0.00838665 0.683611
1679 1 0.433424 0.49741 0.679038
73 1 0.255318 0.250903 0.995127
611 1 0.0520325 0.381025 0.557039
616 1 0.12328 0.439049 0.562525
738 1 0.0549818 0.440369 0.623984
741 1 0.119417 0.376929 0.624212
612 1 0.997108 0.442472 0.558233
42 1 0.303909 0.18312 0.99406
615 1 0.188142 0.379315 0.56546
619 1 0.3171 0.379114 0.559406
620 1 0.260209 0.438637 0.554937
742 1 0.182828 0.435762 0.624039
745 1 0.249421 0.372236 0.618103
746 1 0.319171 0.43982 0.622194
661 1 0.62155 -0.00168275 0.623264
569 1 0.746708 0.130517 0.501411
623 1 0.439829 0.372678 0.558244
624 1 0.375296 0.442632 0.560511
749 1 0.38141 0.37956 0.629261
750 1 0.437959 0.437056 0.619089
574 1 0.933706 0.180013 0.49723
653 1 0.369076 0.00614128 0.631261
586 1 0.314515 0.313444 0.499973
523 1 0.308598 0.00448838 0.571511
753 1 0.504572 0.378491 0.623076
628 1 0.500911 0.438124 0.562919
797 1 0.877302 0.00602972 0.746001
627 1 0.564397 0.373363 0.563555
632 1 0.623277 0.439797 0.56501
754 1 0.563348 0.438113 0.617513
757 1 0.632373 0.380461 0.623279
899 1 0.0629144 0.0020163 0.930179
631 1 0.691731 0.366674 0.55918
635 1 0.813745 0.384942 0.56607
636 1 0.751753 0.438343 0.559814
758 1 0.690626 0.436853 0.618301
761 1 0.745207 0.375079 0.627343
762 1 0.813116 0.442454 0.629287
1821 1 0.873817 0.494199 0.748785
41 1 0.244417 0.127473 0.999938
601 1 0.748684 0.250417 0.496487
543 1 0.939834 0.00405752 0.567859
737 1 -0.00040705 0.375329 0.62481
639 1 0.951901 0.372924 0.552127
640 1 0.878127 0.441449 0.565118
765 1 0.882159 0.37644 0.619474
766 1 0.944065 0.440854 0.623924
649 1 0.249754 0.0040027 0.62666
1561 1 0.747207 0.49986 0.501946
645 1 0.132569 0.00758942 0.627778
546 1 0.0668557 0.188663 0.498283
1675 1 0.307671 0.499809 0.691366
648 1 0.12725 0.0619415 0.688752
675 1 0.0672881 0.126455 0.686659
770 1 0.0612801 0.061198 0.754652
776 1 0.125898 0.0571221 0.809094
803 1 0.0552939 0.121931 0.817189
805 1 0.118102 0.12968 0.752342
772 1 0.991931 0.0616156 0.815181
1803 1 0.317228 0.503793 0.809884
652 1 0.244478 0.0630273 0.69235
679 1 0.182913 0.132792 0.694141
683 1 0.309269 0.132941 0.695034
774 1 0.187946 0.0698815 0.75183
778 1 0.309812 0.0657662 0.744131
780 1 0.245798 0.0667306 0.807985
807 1 0.183829 0.131173 0.817685
809 1 0.243155 0.12939 0.75226
811 1 0.30892 0.127958 0.801456
566 1 0.676837 0.195319 0.508968
33 1 1.00093 0.126303 0.99375
781 1 0.370582 0.00187829 0.74823
656 1 0.381079 0.0625429 0.691354
687 1 0.441483 0.123594 0.691902
782 1 0.442591 0.061301 0.744429
784 1 0.375125 0.0717555 0.805248
813 1 0.377719 0.130142 0.744856
815 1 0.436966 0.13152 0.80517
788 1 0.510455 0.0639452 0.815041
817 1 0.504336 0.117946 0.748212
1017 1 0.749088 0.373602 0.87326
660 1 0.498277 0.0567129 0.688841
664 1 0.623664 0.0617559 0.686295
691 1 0.561224 0.123181 0.68549
786 1 0.566925 0.061191 0.745678
792 1 0.629172 0.0609459 0.80759
819 1 0.564952 0.124169 0.811975
821 1 0.621412 0.12177 0.748097
531 1 0.555295 -0.00101599 0.559314
668 1 0.745778 0.0623978 0.691857
695 1 0.686578 0.125227 0.689095
699 1 0.81214 0.130776 0.685596
790 1 0.68813 0.0642216 0.750445
794 1 0.811022 0.0586136 0.756408
796 1 0.7551 0.0519406 0.810268
823 1 0.694641 0.121163 0.814718
825 1 0.751211 0.124845 0.750591
827 1 0.814216 0.118838 0.813923
6 1 0.185437 0.0581924 0.999712
570 1 0.809471 0.187291 0.517802
644 1 -0.000921651 0.0684953 0.689659
801 1 0.00408059 0.131836 0.755923
672 1 0.868891 0.0615588 0.688384
703 1 0.942145 0.121332 0.691257
798 1 0.935673 0.0671664 0.749524
800 1 0.878609 0.0563457 0.816965
829 1 0.871225 0.126995 0.75274
831 1 0.936642 0.122436 0.815418
1015 1 0.682377 0.374838 0.93935
1799 1 0.188696 0.493931 0.814634
680 1 0.119044 0.187071 0.686815
707 1 0.0679303 0.248748 0.688649
712 1 0.122336 0.317839 0.688745
802 1 0.0626281 0.194083 0.754478
808 1 0.122352 0.194483 0.810588
834 1 0.0603676 0.305696 0.746909
835 1 0.053277 0.253575 0.815418
837 1 0.129225 0.257071 0.745436
840 1 0.117899 0.303603 0.813608
833 1 0.996619 0.245088 0.754413
804 1 -0.00304026 0.187087 0.820709
836 1 1.0005 0.311645 0.816177
708 1 -0.000189765 0.318019 0.685051
684 1 0.254115 0.198055 0.690108
711 1 0.184406 0.26542 0.685308
715 1 0.314794 0.252061 0.679473
716 1 0.250195 0.31314 0.682425
806 1 0.185232 0.194083 0.745239
810 1 0.314101 0.193051 0.752556
812 1 0.248154 0.197553 0.817234
838 1 0.182945 0.316372 0.749887
839 1 0.18674 0.257624 0.814477
841 1 0.246514 0.251522 0.748528
842 1 0.312967 0.31729 0.745354
843 1 0.317326 0.257868 0.806983
844 1 0.250296 0.316735 0.8136
688 1 0.371003 0.187517 0.687693
719 1 0.435584 0.256253 0.681731
720 1 0.370311 0.31159 0.68558
814 1 0.440267 0.190063 0.743256
816 1 0.372778 0.188691 0.808025
845 1 0.372127 0.253016 0.744276
846 1 0.433942 0.313922 0.744516
847 1 0.442295 0.248379 0.815571
848 1 0.376256 0.318731 0.807048
852 1 0.495541 0.310581 0.819381
692 1 0.495563 0.19114 0.687014
724 1 0.501618 0.310514 0.692805
820 1 0.500921 0.187133 0.807366
849 1 0.496782 0.248067 0.750443
696 1 0.626494 0.190703 0.691628
723 1 0.560125 0.246077 0.698578
728 1 0.613281 0.310229 0.688642
818 1 0.567807 0.18163 0.748736
824 1 0.623586 0.193727 0.808794
850 1 0.558743 0.313238 0.751308
851 1 0.562637 0.250141 0.809154
853 1 0.622582 0.250821 0.752111
856 1 0.620287 0.310656 0.812013
700 1 0.749248 0.191004 0.691536
727 1 0.684817 0.251867 0.693833
731 1 0.818523 0.249018 0.679799
732 1 0.754941 0.306018 0.687402
822 1 0.682181 0.183188 0.75001
826 1 0.806951 0.190882 0.757775
828 1 0.752079 0.188817 0.816198
854 1 0.685002 0.317068 0.750632
855 1 0.685176 0.245158 0.813821
857 1 0.747447 0.258467 0.754624
858 1 0.812995 0.315171 0.752805
859 1 0.813479 0.248648 0.814422
860 1 0.750966 0.316043 0.812557
676 1 0.0051437 0.185097 0.690551
704 1 0.880981 0.187029 0.688088
735 1 0.939714 0.244228 0.68849
736 1 0.875953 0.311167 0.683804
830 1 0.938727 0.185998 0.759674
832 1 0.867631 0.185802 0.814686
861 1 0.874888 0.243378 0.751501
862 1 0.939799 0.314159 0.746682
863 1 0.93949 0.24722 0.8101
864 1 0.884082 0.311601 0.81272
739 1 0.0560664 0.375091 0.688246
744 1 0.125958 0.43334 0.689909
866 1 0.064474 0.444444 0.75254
867 1 0.0593892 0.373463 0.820164
869 1 0.111926 0.374919 0.753486
872 1 0.120472 0.434501 0.820623
865 1 0.00036485 0.381635 0.758953
868 1 0.00648774 0.438322 0.81905
740 1 0.00319781 0.43858 0.695849
1020 1 0.749751 0.436922 0.940416
34 1 0.0604213 0.184803 0.999509
1687 1 0.689537 0.503623 0.686616
743 1 0.188766 0.372043 0.682806
747 1 0.308483 0.378665 0.690861
748 1 0.251321 0.436832 0.68117
870 1 0.186847 0.438149 0.752251
871 1 0.195313 0.384642 0.814543
873 1 0.249301 0.374035 0.743613
874 1 0.31057 0.43818 0.748858
875 1 0.310568 0.370733 0.811585
876 1 0.25648 0.436829 0.809207
779 1 0.314357 0.0110619 0.811972
1793 1 0.00275587 0.501131 0.756988
911 1 0.43457 0.00773644 0.93774
751 1 0.448933 0.380454 0.685202
752 1 0.370359 0.437782 0.684645
877 1 0.375012 0.374274 0.746255
878 1 0.431295 0.445528 0.749448
879 1 0.434651 0.37646 0.807254
880 1 0.372221 0.433717 0.805576
881 1 0.50384 0.379821 0.751278
1014 1 0.689933 0.437438 0.876514
1019 1 0.80941 0.370908 0.936693
1018 1 0.809806 0.439779 0.875632
756 1 0.502994 0.437886 0.688281
884 1 0.497084 0.440647 0.811937
755 1 0.555556 0.375741 0.68568
760 1 0.627801 0.444595 0.687606
882 1 0.564963 0.449278 0.74498
883 1 0.561329 0.382618 0.815771
885 1 0.622505 0.381497 0.747018
888 1 0.617809 0.441509 0.810729
759 1 0.684233 0.376972 0.684265
763 1 0.813684 0.376967 0.685236
764 1 0.754031 0.429643 0.684846
886 1 0.690255 0.440548 0.747107
887 1 0.69152 0.3826 0.809414
889 1 0.755845 0.371347 0.747077
890 1 0.812598 0.438699 0.750977
891 1 0.817564 0.375197 0.812263
892 1 0.755438 0.441772 0.812547
1923 1 0.0618748 0.493342 0.946735
785 1 0.5049 0.000803315 0.751276
767 1 0.939985 0.383436 0.694863
768 1 0.874551 0.432668 0.683669
893 1 0.878542 0.375784 0.749296
894 1 0.932152 0.443288 0.755295
895 1 0.938644 0.379906 0.80886
896 1 0.874461 0.43938 0.810158
1671 1 0.179402 0.499633 0.680723
53 1 0.622625 0.120409 0.99586
1921 1 0.0052879 0.501048 0.88162
898 1 0.0640204 0.0560284 0.86613
904 1 0.124821 0.0573843 0.93814
931 1 0.0621829 0.117572 0.934958
933 1 0.118682 0.118317 0.872605
929 1 1.00175 0.119787 0.876172
902 1 0.183574 0.0627523 0.872204
906 1 0.315918 0.0673728 0.867136
908 1 0.247343 0.0641761 0.92707
935 1 0.189862 0.125654 0.934146
937 1 0.243463 0.129261 0.873852
939 1 0.308209 0.125999 0.930719
1931 1 0.321515 0.49898 0.937095
109 1 0.369512 0.374107 1.00489
910 1 0.433553 0.0629018 0.865491
912 1 0.372268 0.0670725 0.931709
941 1 0.374789 0.123413 0.868964
943 1 0.439981 0.128625 0.939079
945 1 0.502141 0.132115 0.868157
1673 1 0.243034 0.495322 0.622405
916 1 0.50302 0.0689692 0.933671
914 1 0.566651 0.0654028 0.873393
920 1 0.622618 0.0563177 0.931013
947 1 0.568022 0.124645 0.933583
949 1 0.629134 0.133043 0.866249
515 1 0.0644905 0.00098275 0.570204
46 1 0.437438 0.190829 0.998177
907 1 0.308957 0.00201568 0.930568
1695 1 0.948785 0.504068 0.687396
1016 1 0.625167 0.432328 0.937493
918 1 0.695801 0.056644 0.86807
922 1 0.807078 0.0629071 0.880646
924 1 0.746993 0.0549971 0.936727
951 1 0.679596 0.118119 0.929893
953 1 0.748733 0.125649 0.883531
955 1 0.81482 0.126353 0.946243
659 1 0.560557 0.00256748 0.688624
1009 1 0.49603 0.373366 0.871348
77 1 0.374558 0.249411 0.998409
1013 1 0.627806 0.376899 0.871999
1563 1 0.812065 0.49754 0.563852
900 1 0.00184669 0.0554348 0.930054
926 1 0.932107 0.0611167 0.879958
928 1 0.866235 0.0662403 0.9364
957 1 0.871844 0.121006 0.875112
959 1 0.928412 0.124523 0.935617
1010 1 0.560654 0.436076 0.875946
657 1 0.490008 0.00371296 0.620925
930 1 0.0547279 0.184203 0.881313
936 1 0.125572 0.187356 0.931249
962 1 0.0661267 0.312164 0.878728
963 1 0.066032 0.247694 0.939121
965 1 0.123682 0.248098 0.883205
968 1 0.130679 0.308404 0.941155
961 1 0.992541 0.253054 0.873793
964 1 0.993839 0.315619 0.93445
85 1 0.632194 0.245857 0.993673
934 1 0.185005 0.193927 0.877518
938 1 0.315903 0.184865 0.866015
940 1 0.244096 0.18887 0.935068
966 1 0.191535 0.31437 0.875219
967 1 0.186165 0.247602 0.939214
969 1 0.243797 0.253538 0.876645
970 1 0.314765 0.31193 0.872061
971 1 0.310014 0.251429 0.928513
972 1 0.253067 0.310189 0.935107
10 1 0.309599 0.0730745 0.996913
973 1 0.371697 0.252841 0.870471
944 1 0.374306 0.188607 0.937398
942 1 0.431001 0.185401 0.877248
976 1 0.371464 0.316988 0.931772
975 1 0.433541 0.247759 0.930452
974 1 0.427223 0.311918 0.870938
977 1 0.50338 0.246791 0.875422
1008 1 0.377836 0.435848 0.940883
948 1 0.511593 0.187322 0.940079
980 1 0.498612 0.312719 0.932381
981 1 0.628541 0.251139 0.878951
978 1 0.561489 0.312274 0.871966
984 1 0.623171 0.315737 0.93829
946 1 0.564193 0.184691 0.870096
952 1 0.627359 0.185653 0.933281
979 1 0.559982 0.253235 0.934659
1667 1 0.065651 0.497547 0.682808
14 1 0.43626 0.0686072 1.00438
545 1 1.00231 0.122647 0.500383
956 1 0.753508 0.187462 0.946328
987 1 0.804666 0.255143 0.936157
983 1 0.686801 0.25362 0.935302
950 1 0.693518 0.186213 0.881267
985 1 0.744578 0.25718 0.868324
988 1 0.740432 0.315175 0.940426
986 1 0.810409 0.317633 0.869679
982 1 0.688021 0.319154 0.873158
954 1 0.813713 0.193588 0.875263
777 1 0.249905 -0.000821899 0.750576
527 1 0.431867 0.00336696 0.561011
1007 1 0.440899 0.37036 0.939147
1006 1 0.439753 0.43335 0.885234
990 1 0.936231 0.311232 0.875457
932 1 0.992179 0.182881 0.936057
958 1 0.936035 0.189229 0.874291
960 1 0.874707 0.194595 0.931356
992 1 0.87416 0.309383 0.935341
989 1 0.871675 0.248395 0.872013
991 1 0.938221 0.246132 0.93619
1011 1 0.561368 0.37251 0.932414
62 1 0.933901 0.181566 0.995171
993 1 0.996786 0.370387 0.871841
995 1 0.0583518 0.370189 0.93626
1000 1 0.132503 0.438778 0.942816
997 1 0.135327 0.378068 0.887011
994 1 0.0607622 0.428042 0.881219
996 1 0.997719 0.437978 0.942133
998 1 0.191528 0.440528 0.879208
999 1 0.190174 0.371417 0.944092
1002 1 0.312044 0.437165 0.871816
1004 1 0.253221 0.434408 0.942439
1003 1 0.315693 0.378816 0.939895
1001 1 0.250872 0.374367 0.878389
1012 1 0.496287 0.439636 0.945721
1005 1 0.379162 0.379134 0.87031
13 1 0.376217 0.00847049 0.999416
117 1 0.620193 0.374478 0.99915
1691 1 0.808157 0.49583 0.689807
54 1 0.684452 0.178172 0.998023
66 1 0.0594934 0.309573 0.998384
22 1 0.683038 0.0542062 0.999802
795 1 0.812133 -0.00146194 0.812613
1543 1 0.194843 0.494067 0.556924
49 1 0.507507 0.128221 1.00446
9 1 0.249552 0.00291577 0.98923
30 1 0.927322 0.0624821 0.994007
1681 1 0.497632 0.497692 0.617596
37 1 0.132658 0.126176 0.998773
90 1 0.8165 0.3167 0.996877
98 1 0.0698089 0.430871 0.994975
114 1 0.564199 0.434488 0.999371
29 1 0.874199 -0.000562696 0.996514
65 1 0.999056 0.247621 0.991859
93 1 0.87713 0.250245 0.99529
617 1 0.255233 0.368918 0.504931
513 1 0.00619069 0.00364437 0.506662
622 1 0.435448 0.439585 0.502565
630 1 0.691221 0.434353 0.504578
629 1 0.629198 0.373518 0.506319
565 1 0.614301 0.119481 0.500632
18 1 0.563989 0.0592197 0.993319
557 1 0.378062 0.119039 0.506347
113 1 0.501071 0.374557 0.998419
578 1 0.0658326 0.316389 0.50274
550 1 0.188004 0.189024 0.503029
514 1 0.0675057 0.0625748 0.498556
1033 1 0.256634 0.499481 0.999247
81 1 0.50627 0.254302 1.00046
1032 1 0.127226 0.561606 0.0688607
1059 1 0.0581894 0.630764 0.0649633
1154 1 0.0636715 0.566344 0.126336
1189 1 0.119949 0.624227 0.12245
1185 1 1.00132 0.626636 0.123726
1028 1 -0.00109845 0.564379 0.0730514
1047 1 0.687091 0.504884 0.0635455
1171 1 0.563644 0.494732 0.189514
1508 1 0.995732 0.937599 0.434999
1157 1 0.117596 0.494084 0.124808
1036 1 0.253553 0.555667 0.0598725
1063 1 0.192933 0.622272 0.0591352
1067 1 0.314907 0.630596 0.0586986
1158 1 0.194494 0.564112 0.119906
1162 1 0.310791 0.567469 0.122329
1193 1 0.251566 0.626421 0.124351
537 1 0.747704 0.995815 0.498753
1078 1 0.691296 0.681766 0.0123296
1118 1 0.938386 0.807694 0.00413419
1125 1 0.122923 0.873687 0.00262169
1040 1 0.371024 0.564476 0.0617925
1071 1 0.432901 0.62408 0.061892
1166 1 0.443528 0.557653 0.122931
1197 1 0.378265 0.629809 0.121129
1044 1 0.505039 0.566163 0.0553677
1201 1 0.502818 0.624298 0.128787
1026 1 0.0567193 0.563928 0.00869656
149 1 0.617084 0.999228 0.128006
1117 1 0.869375 0.751972 0.00568664
1048 1 0.627124 0.558202 0.0730499
1075 1 0.563069 0.621274 0.0641056
1170 1 0.56008 0.563219 0.131946
1205 1 0.627202 0.616749 0.132391
1533 1 0.881185 0.872818 0.374004
1085 1 0.879853 0.631788 0.0102252
1034 1 0.313635 0.561188 -0.00076749
1052 1 0.749864 0.556555 0.0665328
1079 1 0.685679 0.62079 0.0697215
1083 1 0.813377 0.625297 0.0635984
1174 1 0.690402 0.562381 0.135273
1178 1 0.812578 0.558963 0.12578
1209 1 0.752576 0.625784 0.124707
1534 1 0.936025 0.941998 0.376579
1546 1 0.311071 0.569996 0.50234
1058 1 0.0597801 0.687675 0.00189988
1056 1 0.87319 0.563227 0.0620668
1087 1 0.936837 0.618413 0.071585
1182 1 0.938809 0.559845 0.133809
1213 1 0.874182 0.621736 0.126882
1061 1 0.124693 0.616397 0.00811603
283 1 0.81037 0.988564 0.317322
1535 1 0.939625 0.881127 0.443639
1622 1 0.687176 0.817283 0.494454
1064 1 0.127493 0.682622 0.0597731
1091 1 0.067907 0.750551 0.0629834
1096 1 0.13256 0.813435 0.0699648
1186 1 0.0626747 0.685002 0.128665
1218 1 0.0621811 0.812678 0.12443
1221 1 0.134765 0.747516 0.126263
1217 1 0.00993114 0.751244 0.120181
1060 1 0.994215 0.688065 0.0612064
1068 1 0.246081 0.687712 0.059661
1095 1 0.191201 0.75245 0.0617491
1099 1 0.309579 0.746572 0.0613993
1100 1 0.254517 0.813848 0.0611086
1190 1 0.191246 0.682113 0.115904
1194 1 0.311409 0.688737 0.120349
1222 1 0.195229 0.814144 0.130558
1225 1 0.254382 0.745507 0.124216
1226 1 0.314719 0.803825 0.121843
1153 1 0.994664 0.50207 0.125715
1072 1 0.376824 0.693359 0.0572616
1103 1 0.43676 0.759008 0.0656901
1104 1 0.375366 0.815272 0.0629147
1198 1 0.438459 0.687898 0.117593
1229 1 0.374602 0.74691 0.121539
1230 1 0.432342 0.812153 0.12214
1108 1 0.503786 0.815358 0.0560099
1233 1 0.5025 0.746371 0.121676
1281 1 0.00313208 0.495684 0.246808
1030 1 0.187763 0.563227 -0.00166348
1654 1 0.685492 0.939975 0.487952
1076 1 0.499923 0.684213 0.0591531
1080 1 0.621161 0.684131 0.0626958
1107 1 0.560532 0.746121 0.0667013
1112 1 0.623622 0.802762 0.0625164
1202 1 0.561666 0.684329 0.128344
1234 1 0.557441 0.814117 0.128209
1237 1 0.622535 0.754288 0.125811
1573 1 0.121495 0.626212 0.492294
1043 1 0.557637 0.500577 0.0622561
1081 1 0.747366 0.622169 0.00802353
1084 1 0.754856 0.684225 0.0667282
1111 1 0.679551 0.749268 0.0718275
1115 1 0.811133 0.748491 0.0667715
1116 1 0.746852 0.80935 0.0642397
1206 1 0.68045 0.687975 0.128405
1210 1 0.816278 0.691491 0.128958
1238 1 0.683843 0.813964 0.134298
1241 1 0.744493 0.746206 0.127918
1242 1 0.816009 0.809968 0.124882
1179 1 0.815909 0.502229 0.192849
1092 1 1.0055 0.810507 0.0663681
1088 1 0.873287 0.68926 0.0723839
1119 1 0.940454 0.748679 0.0647467
1120 1 0.872472 0.812139 0.0624246
1214 1 0.940551 0.685331 0.129683
1245 1 0.878667 0.755915 0.126139
1246 1 0.944948 0.810154 0.132364
529 1 0.499387 0.995834 0.497603
405 1 0.618412 0.989841 0.375684
391 1 0.189344 0.994315 0.434941
1123 1 0.0618355 0.870601 0.0666092
1128 1 0.127715 0.93948 0.0682653
1250 1 0.0622473 0.944019 0.123251
1253 1 0.134574 0.878032 0.126414
1042 1 0.568009 0.557533 0.00230631
1127 1 0.189669 0.870578 0.0543642
1131 1 0.315314 0.871587 0.0632373
1132 1 0.253827 0.947638 0.0557767
1254 1 0.18879 0.937269 0.122693
1257 1 0.251207 0.883597 0.121545
1258 1 0.309031 0.942562 0.124991
1536 1 0.880498 0.939336 0.443859
151 1 0.687934 0.996883 0.188527
1570 1 0.0596995 0.691177 0.500625
1421 1 0.373132 0.499313 0.376909
1135 1 0.433516 0.878346 0.0575451
1136 1 0.376349 0.941632 0.0683809
1261 1 0.373916 0.875214 0.123188
1262 1 0.439368 0.945873 0.120948
1265 1 0.495628 0.877726 0.124952
1283 1 0.0613665 0.503655 0.310044
1602 1 0.0636293 0.817723 0.492134
1553 1 0.490639 0.508329 0.498829
1082 1 0.810791 0.6884 0.00817389
281 1 0.750573 0.997985 0.244652
1140 1 0.495259 0.937545 0.0588266
1139 1 0.564569 0.874709 0.062269
1144 1 0.627987 0.938107 0.066094
1266 1 0.556729 0.938757 0.113908
1269 1 0.622768 0.874577 0.130695
1050 1 0.812431 0.558231 -0.00201395
1146 1 0.812156 0.934236 0.00851492
1143 1 0.678112 0.86574 0.0640059
1147 1 0.816189 0.8675 0.0634437
1148 1 0.745799 0.93465 0.0654604
1270 1 0.684181 0.93587 0.132474
1273 1 0.7373 0.874078 0.124939
1274 1 0.811591 0.929709 0.126276
1554 1 0.560727 0.568923 0.498375
397 1 0.374229 0.989638 0.370815
1634 1 0.0585971 0.940815 0.500366
1532 1 0.754065 0.938235 0.432335
1249 1 -0.000440421 0.882023 0.124131
1124 1 0.00893084 0.942288 0.0633962
1151 1 0.945325 0.875099 0.0652104
1152 1 0.87761 0.934057 0.0585602
1277 1 0.877794 0.872822 0.126407
1278 1 0.940524 0.932827 0.122755
1657 1 0.750618 0.869425 0.494104
1160 1 0.126209 0.562676 0.189701
1187 1 0.0720835 0.624982 0.190234
1282 1 0.0655187 0.557893 0.246746
1288 1 0.125095 0.558852 0.311395
1315 1 0.0633938 0.625553 0.312407
1317 1 0.12934 0.626871 0.251063
1156 1 0.00655673 0.572024 0.184987
1164 1 0.251246 0.563821 0.187479
1191 1 0.182331 0.624849 0.183672
1195 1 0.319294 0.62016 0.182412
1286 1 0.181228 0.559838 0.25182
1290 1 0.313649 0.559903 0.250625
1292 1 0.253087 0.570961 0.320395
1319 1 0.190163 0.617297 0.317852
1321 1 0.244082 0.620305 0.252586
1323 1 0.319973 0.620271 0.306671
1531 1 0.812088 0.871854 0.441767
1630 1 0.93575 0.813065 0.503072
1538 1 0.0586859 0.566149 0.495287
1168 1 0.377225 0.559045 0.191498
1199 1 0.433497 0.62668 0.183632
1294 1 0.440953 0.566611 0.2515
1296 1 0.378962 0.562829 0.313981
1325 1 0.379397 0.622621 0.244812
1327 1 0.44001 0.62764 0.313146
1172 1 0.499504 0.557131 0.189308
1530 1 0.815558 0.924271 0.377445
257 1 0.00540141 1.00024 0.245729
1329 1 0.504538 0.616377 0.249489
1300 1 0.499584 0.564723 0.323958
1176 1 0.622926 0.556299 0.191844
1203 1 0.56179 0.628845 0.187408
1298 1 0.563833 0.554014 0.254227
1304 1 0.623813 0.565734 0.315119
1331 1 0.560222 0.630443 0.312431
1333 1 0.62368 0.61723 0.252471
1180 1 0.752402 0.56259 0.189451
1207 1 0.690859 0.624606 0.189354
1211 1 0.815054 0.623079 0.188791
1302 1 0.686992 0.558411 0.248661
1306 1 0.817764 0.565404 0.251616
1308 1 0.761628 0.557023 0.308696
1335 1 0.6879 0.623887 0.314861
1337 1 0.749564 0.622691 0.248385
1339 1 0.814176 0.625731 0.312203
1586 1 0.558563 0.697641 0.497084
1089 1 0.000364199 0.751182 -0.00345276
1284 1 0.990688 0.562426 0.316581
287 1 0.940691 0.993884 0.307151
1313 1 0.997971 0.613065 0.255118
1184 1 0.873928 0.559023 0.183803
1215 1 0.930453 0.620536 0.191203
1310 1 0.932987 0.561316 0.24497
1312 1 0.868314 0.563505 0.314544
1341 1 0.875738 0.625677 0.253323
1343 1 0.941874 0.627829 0.311562
1192 1 0.125878 0.683712 0.187204
1219 1 0.0732731 0.749408 0.179855
1224 1 0.125809 0.813415 0.183038
1314 1 0.0634784 0.681563 0.250083
1320 1 0.12666 0.682637 0.313571
1346 1 0.0718331 0.811905 0.24281
1347 1 0.0630647 0.744513 0.30993
1349 1 0.126862 0.749512 0.244988
1352 1 0.126377 0.805357 0.320331
1188 1 0.00346129 0.6844 0.183877
1348 1 0.00776908 0.806875 0.306219
1345 1 0.00909175 0.752187 0.245242
1196 1 0.253042 0.68456 0.183994
1223 1 0.18796 0.749727 0.186485
1227 1 0.314423 0.753972 0.195529
1228 1 0.256298 0.813339 0.182653
1318 1 0.187637 0.684697 0.259148
1322 1 0.310173 0.673416 0.242225
1324 1 0.252296 0.699689 0.309049
1350 1 0.194625 0.812609 0.249076
1351 1 0.188378 0.7576 0.313263
1353 1 0.245728 0.745467 0.242474
1354 1 0.304734 0.814128 0.249589
1355 1 0.315146 0.747637 0.299434
1356 1 0.253299 0.813484 0.315502
1200 1 0.372541 0.682775 0.178144
1231 1 0.438615 0.7535 0.179868
1232 1 0.373364 0.807165 0.179898
1326 1 0.442865 0.683417 0.248659
1328 1 0.374918 0.683831 0.302704
1357 1 0.389557 0.747471 0.246175
1358 1 0.437581 0.808418 0.250682
1359 1 0.442694 0.746461 0.313687
1360 1 0.372688 0.807446 0.3127
1361 1 0.501141 0.751691 0.248233
1332 1 0.503362 0.69177 0.309084
1236 1 0.496124 0.813826 0.18078
1204 1 0.494895 0.692886 0.18487
1364 1 0.500101 0.808484 0.312409
1208 1 0.619525 0.691528 0.190758
1235 1 0.559523 0.748777 0.184696
1240 1 0.620478 0.814415 0.188594
1330 1 0.559006 0.685648 0.245538
1336 1 0.628369 0.681117 0.312496
1362 1 0.563286 0.805064 0.246149
1363 1 0.572261 0.744505 0.316334
1365 1 0.624064 0.746088 0.249609
1368 1 0.627948 0.807168 0.319139
1212 1 0.748485 0.684118 0.188693
1239 1 0.69281 0.749673 0.190121
1243 1 0.816976 0.74856 0.187662
1244 1 0.754928 0.810069 0.179706
1334 1 0.68937 0.689551 0.251035
1338 1 0.812111 0.693328 0.251583
1340 1 0.755222 0.681693 0.312003
1366 1 0.68799 0.812655 0.250462
1367 1 0.692967 0.742145 0.311234
1369 1 0.755177 0.751822 0.251097
1370 1 0.826666 0.81245 0.254442
1371 1 0.818402 0.749664 0.319942
1372 1 0.756155 0.804406 0.317911
1316 1 0.00383551 0.682083 0.304579
1220 1 0.00860108 0.812983 0.186993
1216 1 0.876361 0.687961 0.182419
1247 1 0.945159 0.749615 0.1843
1248 1 0.879879 0.814862 0.188976
1342 1 0.943607 0.689837 0.245912
1344 1 0.883333 0.695409 0.30932
1373 1 0.880963 0.748457 0.247664
1374 1 0.938222 0.817373 0.251309
1375 1 0.949471 0.749771 0.303543
1376 1 0.883911 0.809022 0.318989
1653 1 0.621613 0.870163 0.491487
1251 1 0.065606 0.874749 0.183466
1256 1 0.128832 0.9433 0.181945
1378 1 0.0706083 0.939235 0.247399
1379 1 0.0616199 0.872521 0.311467
1381 1 0.132191 0.873153 0.25116
1384 1 0.122563 0.93494 0.310085
1252 1 0.00252977 0.937877 0.185059
1377 1 0.00409481 0.871527 0.243642
1527 1 0.691013 0.876385 0.436188
1255 1 0.193341 0.877099 0.191139
1259 1 0.311629 0.873905 0.183532
1260 1 0.258333 0.940074 0.185141
1382 1 0.187197 0.937774 0.248969
1383 1 0.190657 0.870458 0.313795
1385 1 0.253138 0.888375 0.256775
1386 1 0.316023 0.935636 0.2481
1387 1 0.310097 0.880244 0.311131
1388 1 0.245199 0.938793 0.314864
1526 1 0.686478 0.939503 0.379338
1529 1 0.744927 0.872449 0.37317
1263 1 0.441766 0.876714 0.18429
1264 1 0.377412 0.93707 0.180028
1389 1 0.374457 0.871335 0.245706
1390 1 0.435238 0.933309 0.243459
1391 1 0.431052 0.879763 0.306964
1392 1 0.374873 0.932956 0.306768
1396 1 0.500938 0.935345 0.305641
1393 1 0.499484 0.869571 0.250769
1415 1 0.188764 0.503584 0.438961
1268 1 0.501339 0.938394 0.183381
1267 1 0.554517 0.875617 0.191357
1272 1 0.621989 0.939798 0.1879
1394 1 0.567702 0.935254 0.250743
1395 1 0.566521 0.871621 0.316036
1397 1 0.61853 0.870687 0.250471
1400 1 0.620226 0.932887 0.315626
1618 1 0.558263 0.812063 0.498581
1524 1 0.490081 0.931615 0.435908
1311 1 0.934092 0.498525 0.313425
1271 1 0.681759 0.875562 0.190498
1275 1 0.814836 0.875167 0.186462
1276 1 0.750113 0.928083 0.189506
1398 1 0.678376 0.937758 0.251119
1399 1 0.683899 0.880552 0.313094
1401 1 0.758914 0.875609 0.250953
1402 1 0.810304 0.937251 0.253765
1403 1 0.815537 0.873487 0.310618
1404 1 0.744779 0.933471 0.31118
1521 1 0.497465 0.874401 0.366633
1057 1 -0.00266031 0.619142 0.00324326
1522 1 0.554244 0.93215 0.372813
1380 1 0.00428439 0.94261 0.312332
1279 1 0.937148 0.880593 0.189603
1280 1 0.874554 0.934195 0.191229
1405 1 0.876215 0.876579 0.249503
1406 1 0.940344 0.938799 0.245184
1407 1 0.939533 0.87701 0.310773
1408 1 0.872424 0.932675 0.312517
1159 1 0.187323 0.504984 0.174429
1621 1 0.627418 0.752137 0.503195
1410 1 0.0599102 0.564488 0.376782
1416 1 0.123061 0.560121 0.436735
1443 1 0.0558863 0.626332 0.436355
1445 1 0.123749 0.62043 0.369511
1412 1 0.994487 0.557767 0.437044
1441 1 0.999436 0.62871 0.373922
263 1 0.189377 1.00092 0.32025
25 1 0.742614 0.993351 0.00139465
285 1 0.871731 0.995927 0.246889
1449 1 0.255679 0.634029 0.372111
1451 1 0.311029 0.631408 0.435112
1420 1 0.250863 0.578076 0.440752
1447 1 0.183854 0.635096 0.429095
1414 1 0.192798 0.560069 0.378459
1418 1 0.314808 0.567302 0.37679
157 1 0.868255 0.992849 0.124159
1662 1 0.944362 0.942402 0.503032
1142 1 0.682156 0.934189 -0.00300944
1113 1 0.749382 0.746853 0.000570513
1545 1 0.250618 0.511447 0.494607
1422 1 0.43224 0.562826 0.375982
1455 1 0.431705 0.62371 0.439236
1424 1 0.373878 0.563541 0.442418
1453 1 0.376932 0.628855 0.371275
279 1 0.680567 0.994487 0.315071
1428 1 0.496579 0.569353 0.433952
1411 1 0.0603656 0.501195 0.43475
1523 1 0.561555 0.873673 0.428895
1457 1 0.498258 0.627329 0.377215
1461 1 0.625873 0.629076 0.375301
1426 1 0.562017 0.564233 0.381149
1432 1 0.622746 0.563297 0.445326
1459 1 0.561145 0.632538 0.438685
1528 1 0.626142 0.936867 0.438651
1145 1 0.745418 0.86647 0.00287742
1537 1 0.99376 0.505314 0.498113
1430 1 0.685793 0.561062 0.379328
1463 1 0.687111 0.626475 0.441847
1436 1 0.745202 0.564055 0.446795
1434 1 0.813551 0.561053 0.377735
1465 1 0.758117 0.617548 0.379571
1467 1 0.817232 0.638623 0.437427
1440 1 0.877366 0.578556 0.438903
1469 1 0.877722 0.630211 0.373991
1471 1 0.938435 0.633052 0.442337
1438 1 0.924295 0.564926 0.37872
1574 1 0.185017 0.692716 0.496859
1474 1 0.0635929 0.815416 0.377054
1480 1 0.125531 0.817776 0.434756
1475 1 0.0594334 0.74787 0.435139
1442 1 0.0576384 0.690411 0.371696
1477 1 0.121109 0.752086 0.375181
1448 1 0.117452 0.686224 0.425581
1444 1 0.999347 0.692733 0.439131
1476 1 0.00378689 0.818365 0.436604
1562 1 0.813236 0.569607 0.491003
411 1 0.811499 0.997556 0.435897
1605 1 0.118102 0.75009 0.496353
1577 1 0.242537 0.637023 0.497533
271 1 0.440115 0.996283 0.311194
403 1 0.56369 0.99688 0.433769
1452 1 0.253575 0.697016 0.435251
1450 1 0.312688 0.69387 0.366687
1446 1 0.190736 0.690296 0.366352
1482 1 0.317947 0.819378 0.37811
1484 1 0.261725 0.81405 0.440585
1478 1 0.19289 0.810966 0.381016
1481 1 0.257903 0.756463 0.373495
1483 1 0.316367 0.75109 0.429969
1479 1 0.179941 0.743992 0.433986
1423 1 0.430661 0.501356 0.435836
1456 1 0.373419 0.684758 0.433388
1487 1 0.43638 0.750173 0.438723
1485 1 0.374188 0.748279 0.365545
1488 1 0.375342 0.810921 0.438268
1454 1 0.437386 0.684607 0.371377
1486 1 0.431643 0.809809 0.373606
1289 1 0.24773 0.507319 0.250414
1492 1 0.493804 0.820783 0.430744
1489 1 0.498043 0.742809 0.378762
1460 1 0.494305 0.685586 0.440032
1490 1 0.558288 0.810399 0.376557
1458 1 0.561003 0.689436 0.377458
1493 1 0.625478 0.747447 0.378981
1491 1 0.553577 0.75626 0.440369
1496 1 0.631286 0.814955 0.431534
1464 1 0.625645 0.687735 0.441742
269 1 0.380229 1.00045 0.246867
1462 1 0.692556 0.687417 0.374835
1494 1 0.696459 0.810865 0.374066
1466 1 0.814468 0.685642 0.367953
1499 1 0.819518 0.748238 0.437269
1495 1 0.690106 0.745378 0.434375
1500 1 0.751849 0.810857 0.435111
1497 1 0.753706 0.743056 0.372196
1498 1 0.814695 0.814541 0.373622
1468 1 0.750852 0.6928 0.439856
155 1 0.813028 0.999481 0.189114
1473 1 0.00347943 0.754545 0.372301
1504 1 0.882552 0.807461 0.435819
1503 1 0.947254 0.754372 0.437742
1501 1 0.888656 0.749332 0.373021
1470 1 0.945139 0.694267 0.362444
1502 1 0.944937 0.817341 0.379933
1472 1 0.884508 0.69065 0.435724
1505 1 0.00229261 0.872568 0.368241
1569 1 -0.00498847 0.62622 0.502769
19 1 0.560293 0.999353 0.0534911
1525 1 0.629295 0.872179 0.373621
1291 1 0.311536 0.506736 0.311575
1512 1 0.125244 0.93752 0.43325
1509 1 0.129666 0.875914 0.371116
1507 1 0.0594058 0.883914 0.433994
1506 1 0.0584851 0.945418 0.377478
1433 1 0.748301 0.505225 0.38628
1514 1 0.305749 0.937887 0.372012
1515 1 0.313977 0.878431 0.442258
1510 1 0.18931 0.932447 0.374004
1513 1 0.254591 0.87774 0.376776
1516 1 0.255625 0.934728 0.44055
1511 1 0.191459 0.875378 0.43564
1518 1 0.440132 0.934137 0.375009
1520 1 0.369711 0.945079 0.437178
1519 1 0.431169 0.879439 0.44695
1517 1 0.37586 0.876371 0.376911
275 1 0.558658 1.00208 0.309095
413 1 0.869174 0.994985 0.377784
1439 1 0.931596 0.501912 0.440445
1417 1 0.253558 0.502094 0.376682
1301 1 0.623464 0.496935 0.255551
1429 1 0.626782 0.501165 0.372911
1542 1 0.181925 0.569329 0.49035
1638 1 0.185622 0.93377 0.501908
1303 1 0.701843 0.491835 0.320928
517 1 0.130481 1.00435 0.498863
153 1 0.745952 0.997771 0.127012
1625 1 0.753207 0.753502 0.49678
1614 1 0.438339 0.812806 0.50247
1029 1 0.125547 0.49892 0.00428632
1637 1 0.117753 0.882471 0.492599
1126 1 0.191209 0.941792 0.00113507
1054 1 0.937582 0.561468 0.00818966
1626 1 0.816641 0.810951 0.494692
1122 1 0.0641077 0.93851 0.003996
1121 1 -0.000381822 0.867265 0.00807922
521 1 0.248111 1.00231 0.496768
1650 1 0.559116 0.936951 0.490388
1129 1 0.250927 0.881635 0.00449247
525 1 0.375063 1.00142 0.501564
1105 1 0.498989 0.748787 0.00307699
1141 1 0.631043 0.875275 0.00331526
1101 1 0.373052 0.75851 -0.000866985
1645 1 0.371509 0.881135 0.498032
1544 1 0.121999 0.562712 0.548845
1571 1 0.0601867 0.633481 0.561332
1666 1 0.0669377 0.566414 0.619467
1701 1 0.126794 0.624819 0.619885
1540 1 1.00701 0.565634 0.560848
1642 1 0.314008 0.945775 0.506746
1665 1 0.998538 0.504831 0.61575
1817 1 0.748527 0.503873 0.751246
665 1 0.748153 0.99594 0.621993
1581 1 0.368295 0.632773 0.500664
1548 1 0.249568 0.573255 0.560454
1575 1 0.182504 0.62615 0.554708
1579 1 0.321037 0.628161 0.566749
1670 1 0.180006 0.562056 0.614234
1674 1 0.310275 0.567883 0.623781
1705 1 0.253442 0.626461 0.616833
1547 1 0.311374 0.504279 0.564336
669 1 0.868927 0.995277 0.625742
927 1 0.930439 1.00447 0.93704
925 1 0.871837 1.00118 0.877349
919 1 0.688186 0.999633 0.93699
1552 1 0.378748 0.564991 0.5669
1583 1 0.434996 0.627615 0.566256
1678 1 0.43765 0.562717 0.630182
1709 1 0.375814 0.628529 0.630816
1713 1 0.499794 0.628211 0.619488
1550 1 0.43521 0.569548 0.497582
1555 1 0.564999 0.506727 0.554906
1556 1 0.499212 0.564443 0.560835
1560 1 0.620126 0.562194 0.564856
1587 1 0.556321 0.636697 0.550072
1682 1 0.553852 0.563188 0.618186
1717 1 0.618946 0.627277 0.629371
1610 1 0.314361 0.812066 0.49973
21 1 0.621177 0.998586 0.999141
1797 1 0.128328 0.501683 0.749137
1813 1 0.620579 0.507792 0.747551
1564 1 0.755758 0.565344 0.556914
1591 1 0.691413 0.627237 0.566732
1595 1 0.811477 0.629316 0.566358
1686 1 0.690112 0.568412 0.623606
1690 1 0.819657 0.567483 0.622374
1721 1 0.754427 0.634491 0.627615
1578 1 0.314143 0.687791 0.49811
651 1 0.304616 0.99624 0.690639
1617 1 0.493487 0.757232 0.500223
1683 1 0.56316 0.500047 0.678243
2045 1 0.873933 0.871581 0.875823
2046 1 0.931084 0.93395 0.889058
897 1 0.990654 0.999426 0.873799
1697 1 -0.00329827 0.622989 0.628217
1568 1 0.869374 0.560005 0.557615
1599 1 0.936549 0.624519 0.557046
1694 1 0.93308 0.558761 0.622603
1725 1 0.880747 0.625623 0.62557
1609 1 0.245684 0.753596 0.496992
1935 1 0.438206 0.49906 0.939228
1576 1 0.122683 0.686635 0.556115
1603 1 0.0648931 0.749224 0.56734
1608 1 0.124967 0.811193 0.562806
1698 1 0.0666518 0.688676 0.625264
1730 1 0.0648906 0.815234 0.622926
1733 1 0.12362 0.751876 0.625536
1604 1 0.996799 0.817778 0.566374
1572 1 -0.000634056 0.688163 0.56665
1641 1 0.247684 0.878148 0.50164
1580 1 0.252484 0.688562 0.559439
1607 1 0.179627 0.747815 0.564491
1611 1 0.311619 0.757443 0.557085
1612 1 0.245186 0.814268 0.565977
1702 1 0.184387 0.681886 0.612882
1706 1 0.311854 0.690409 0.616328
1734 1 0.181765 0.812508 0.624041
1737 1 0.244859 0.756303 0.625712
1738 1 0.316099 0.814185 0.628093
1584 1 0.381824 0.690486 0.56181
1615 1 0.436052 0.761191 0.569258
1616 1 0.37843 0.821842 0.56707
1710 1 0.437583 0.689437 0.62484
1741 1 0.374397 0.757512 0.621337
1742 1 0.435147 0.817403 0.627772
1745 1 0.49887 0.742917 0.626921
1588 1 0.495986 0.696281 0.560428
1941 1 0.622823 0.504603 0.870113
905 1 0.244242 0.995177 0.877234
1551 1 0.435313 0.506196 0.558592
1598 1 0.938619 0.688926 0.496756
1795 1 0.0651172 0.497092 0.816844
1620 1 0.500668 0.819182 0.558361
1592 1 0.634634 0.690395 0.560124
1619 1 0.560523 0.755447 0.562481
1624 1 0.626939 0.823002 0.566894
1714 1 0.562246 0.689204 0.626415
1746 1 0.565807 0.815015 0.622816
1749 1 0.623888 0.75031 0.622987
1649 1 0.49372 0.887387 0.502778
1597 1 0.867007 0.632537 0.50018
1585 1 0.492124 0.626645 0.495926
1596 1 0.752829 0.689592 0.562353
1623 1 0.68932 0.74922 0.558519
1627 1 0.806715 0.758509 0.567906
1628 1 0.740934 0.808582 0.553956
1718 1 0.68472 0.688192 0.630067
1722 1 0.815952 0.682161 0.630746
1750 1 0.687482 0.802426 0.624092
1753 1 0.7483 0.741674 0.629226
1754 1 0.815641 0.816807 0.630413
1945 1 0.749218 0.497536 0.875993
1594 1 0.812064 0.694276 0.503329
1729 1 -0.00237814 0.754029 0.621044
1600 1 0.878119 0.689526 0.565856
1631 1 0.937968 0.750629 0.560703
1632 1 0.878686 0.812269 0.563933
1726 1 0.939918 0.686233 0.624279
1757 1 0.878264 0.747796 0.63444
1758 1 0.935439 0.811999 0.624312
789 1 0.625848 0.993631 0.748411
1927 1 0.195269 0.50096 0.936884
1133 1 0.373444 0.877889 1.00054
1635 1 0.0608126 0.882569 0.573662
1640 1 0.125998 0.943865 0.563759
1762 1 0.0543529 0.945949 0.630293
1765 1 0.122585 0.875766 0.630809
1539 1 0.0555147 0.504448 0.552163
787 1 0.568537 1.00154 0.807417
2047 1 0.939627 0.866971 0.937351
1639 1 0.185898 0.881261 0.570988
1643 1 0.313003 0.876908 0.557995
1644 1 0.248693 0.939147 0.56416
1766 1 0.188676 0.946349 0.631722
1769 1 0.246337 0.869913 0.626504
1770 1 0.315268 0.935789 0.629091
1566 1 0.93411 0.565522 0.497293
2048 1 0.861709 0.940599 0.940331
1647 1 0.43373 0.881345 0.564708
1648 1 0.370101 0.947907 0.56653
1773 1 0.374894 0.88053 0.629434
1774 1 0.430188 0.944601 0.623721
655 1 0.434335 0.996959 0.689846
1549 1 0.374606 0.506846 0.501347
1629 1 0.875384 0.746634 0.499906
1939 1 0.56507 0.498292 0.946129
1652 1 0.495664 0.941987 0.566298
1777 1 0.496422 0.874237 0.627892
1651 1 0.560962 0.878253 0.553599
1656 1 0.620893 0.940585 0.560312
1778 1 0.559326 0.939832 0.626331
1781 1 0.624002 0.881053 0.623203
1669 1 0.122519 0.499549 0.622637
1819 1 0.817174 0.499708 0.818031
1655 1 0.685098 0.883587 0.553558
1659 1 0.8094 0.868198 0.56312
1660 1 0.755184 0.933217 0.562771
1782 1 0.68429 0.938824 0.614356
1785 1 0.747892 0.870663 0.619286
1786 1 0.812084 0.938593 0.62368
2017 1 0.996351 0.878618 0.879839
1811 1 0.55459 0.504341 0.812266
519 1 0.190187 1.00096 0.561788
917 1 0.625525 0.997046 0.874005
1761 1 0.993105 0.876411 0.628975
1636 1 0.995702 0.944858 0.569122
1663 1 0.942583 0.87964 0.556293
1664 1 0.884065 0.94516 0.559258
1789 1 0.874614 0.882867 0.623918
1790 1 0.931106 0.942962 0.627
1567 1 0.932038 0.506459 0.563152
1558 1 0.68254 0.56086 0.504537
1672 1 0.122291 0.564492 0.682445
1699 1 0.0683723 0.627086 0.681066
1794 1 0.0598945 0.557534 0.747676
1800 1 0.122693 0.560824 0.809678
1827 1 0.0689981 0.622112 0.80519
1829 1 0.131072 0.634205 0.749898
1668 1 0.00400098 0.559198 0.685641
773 1 0.117313 0.995709 0.75048
1093 1 0.127313 0.747685 1.00062
1676 1 0.243746 0.550738 0.68133
1703 1 0.193311 0.619965 0.674542
1707 1 0.306586 0.638621 0.684416
1798 1 0.18398 0.567482 0.742744
1802 1 0.297069 0.567971 0.741125
1804 1 0.242818 0.568556 0.809254
1831 1 0.186384 0.631263 0.814146
1833 1 0.244518 0.630119 0.74468
1835 1 0.310537 0.624761 0.801663
2028 1 0.248069 0.937979 0.935116
1680 1 0.366435 0.566174 0.6867
1711 1 0.438502 0.627571 0.685951
1806 1 0.437165 0.564821 0.740949
1808 1 0.383239 0.565868 0.807213
1837 1 0.374422 0.633559 0.743484
1839 1 0.43474 0.629269 0.804884
1841 1 0.497313 0.630692 0.745293
1812 1 0.491299 0.567837 0.808607
2026 1 0.309239 0.935472 0.868858
1065 1 0.254104 0.624243 1.00466
2041 1 0.756967 0.871433 0.87631
1684 1 0.503544 0.569009 0.685939
1688 1 0.625465 0.562465 0.682696
1715 1 0.5658 0.629272 0.690703
1810 1 0.559478 0.56332 0.747205
1816 1 0.614273 0.568479 0.806363
1843 1 0.552982 0.625162 0.805453
1845 1 0.625491 0.623976 0.750688
2043 1 0.817263 0.869191 0.943203
2042 1 0.812826 0.934102 0.877988
539 1 0.808978 0.999555 0.56367
1692 1 0.748971 0.565731 0.685316
1719 1 0.685777 0.618907 0.689697
1723 1 0.81696 0.618208 0.688214
1814 1 0.683476 0.560998 0.75159
1818 1 0.817169 0.556594 0.756895
1820 1 0.750305 0.563113 0.814964
1847 1 0.691466 0.625595 0.817756
1849 1 0.745948 0.622051 0.75004
1851 1 0.812986 0.623643 0.820776
799 1 0.93528 1.00022 0.8114
1097 1 0.246827 0.744926 0.99558
1613 1 0.369355 0.745064 0.498725
1796 1 0.00194755 0.562493 0.815231
1825 1 0.00183815 0.630497 0.741949
1696 1 0.878167 0.559788 0.695245
1727 1 0.934243 0.614799 0.68554
1822 1 0.949135 0.56452 0.751351
1824 1 0.879794 0.557182 0.81408
1853 1 0.872868 0.625028 0.753689
1855 1 0.942513 0.629801 0.81197
1704 1 0.132103 0.683156 0.684385
1731 1 0.0641717 0.755633 0.683066
1736 1 0.115272 0.818585 0.690816
1826 1 0.0676442 0.689641 0.748271
1832 1 0.122463 0.689971 0.817179
1858 1 0.0663653 0.8239 0.750916
1859 1 0.0643105 0.748966 0.80656
1861 1 0.12693 0.754881 0.751365
1864 1 0.116463 0.819939 0.810586
1857 1 -0.00322969 0.751873 0.752803
1708 1 0.246329 0.694206 0.684462
1735 1 0.185036 0.753978 0.683176
1739 1 0.312476 0.752203 0.688191
1740 1 0.243214 0.818561 0.687774
1830 1 0.190787 0.69236 0.756383
1834 1 0.31191 0.691837 0.74936
1836 1 0.25536 0.689182 0.81592
1862 1 0.19635 0.813595 0.749906
1863 1 0.184418 0.745071 0.819286
1865 1 0.246901 0.752489 0.756673
1866 1 0.310948 0.815102 0.751311
1867 1 0.314652 0.74988 0.815466
1868 1 0.254592 0.810385 0.812739
1712 1 0.378341 0.700946 0.684148
1743 1 0.436074 0.758185 0.681652
1744 1 0.37332 0.821138 0.69265
1838 1 0.435922 0.697598 0.745369
1840 1 0.371336 0.685115 0.811665
1869 1 0.373661 0.754015 0.747963
1870 1 0.43103 0.810376 0.75062
1871 1 0.436324 0.74388 0.815581
1872 1 0.375452 0.810111 0.811733
1873 1 0.500911 0.754167 0.745474
1716 1 0.499672 0.687717 0.687908
1748 1 0.49966 0.819715 0.687642
1876 1 0.502887 0.802026 0.812604
1844 1 0.493603 0.686262 0.81505
1720 1 0.62562 0.690184 0.688818
1747 1 0.559753 0.76143 0.682371
1752 1 0.631931 0.807186 0.685405
1842 1 0.567648 0.683796 0.751216
1848 1 0.626997 0.682642 0.814295
1874 1 0.56491 0.813714 0.746272
1875 1 0.566201 0.743987 0.809199
1877 1 0.622646 0.755079 0.743128
1880 1 0.617414 0.80795 0.808873
1724 1 0.741403 0.686722 0.696598
1751 1 0.688602 0.744188 0.68862
1755 1 0.809012 0.742055 0.692134
1756 1 0.748714 0.80329 0.685429
1846 1 0.6842 0.687303 0.76021
1850 1 0.817801 0.682818 0.748306
1852 1 0.746989 0.687127 0.812697
1878 1 0.684609 0.811199 0.748746
1879 1 0.689565 0.749566 0.814124
1881 1 0.748653 0.742558 0.753493
1882 1 0.813583 0.810066 0.74622
1883 1 0.812745 0.748659 0.813199
1884 1 0.75362 0.807566 0.80915
1732 1 0.00336654 0.813056 0.694818
1700 1 0.00935024 0.690769 0.68487
1828 1 1.00071 0.690042 0.819511
1860 1 0.999825 0.80785 0.820658
1728 1 0.879679 0.684204 0.686228
1759 1 0.938995 0.750781 0.693731
1760 1 0.881748 0.811262 0.689164
1854 1 0.938503 0.685594 0.747214
1856 1 0.872619 0.686925 0.811294
1885 1 0.875346 0.752511 0.752316
1886 1 0.944118 0.817702 0.754588
1887 1 0.938976 0.744788 0.812675
1888 1 0.876881 0.811804 0.81009
2027 1 0.310669 0.885387 0.936601
1763 1 0.0567067 0.87906 0.684579
1768 1 0.123493 0.94049 0.688469
1890 1 0.0595576 0.931027 0.751418
1891 1 0.0543943 0.881604 0.817287
1893 1 0.127631 0.882146 0.747421
1896 1 0.119923 0.939578 0.811023
1069 1 0.373578 0.624239 1.00375
1767 1 0.180494 0.871762 0.69086
1771 1 0.316795 0.881479 0.692012
1772 1 0.248596 0.93635 0.684432
1894 1 0.190683 0.936879 0.746624
1895 1 0.186521 0.881265 0.813123
1897 1 0.249967 0.87522 0.757403
1898 1 0.31144 0.937413 0.755581
1899 1 0.311935 0.871315 0.810433
1900 1 0.250288 0.941441 0.80895
2038 1 0.686565 0.936941 0.880345
1606 1 0.179986 0.80938 0.504208
1138 1 0.561556 0.935124 0.999157
1775 1 0.428921 0.879771 0.69127
1776 1 0.368317 0.942518 0.692803
1901 1 0.376621 0.86998 0.756903
1902 1 0.431873 0.930905 0.751807
1903 1 0.442836 0.867822 0.815963
1904 1 0.376645 0.941133 0.820452
1780 1 0.50117 0.934443 0.687883
1908 1 0.501483 0.939767 0.81021
1130 1 0.311073 0.947988 0.998718
1559 1 0.682769 0.504656 0.564277
2039 1 0.683667 0.869534 0.945175
1905 1 0.502247 0.873404 0.755301
1779 1 0.562199 0.876737 0.689056
1784 1 0.618113 0.938879 0.681596
1906 1 0.566527 0.9381 0.749218
1907 1 0.558744 0.868163 0.821648
1909 1 0.624726 0.872818 0.752154
1912 1 0.629722 0.940868 0.815505
1582 1 0.438427 0.693072 0.500221
1110 1 0.678917 0.806146 0.99916
1783 1 0.690216 0.874521 0.683953
1787 1 0.814542 0.883158 0.688957
1788 1 0.744898 0.931843 0.683047
1910 1 0.68481 0.928663 0.747509
1911 1 0.69507 0.876439 0.813767
1913 1 0.747439 0.861163 0.74018
1914 1 0.821464 0.945362 0.751646
1915 1 0.808399 0.875748 0.812248
1916 1 0.746659 0.935325 0.808006
791 1 0.688006 0.999492 0.808665
901 1 0.121612 0.998417 0.868687
2044 1 0.747366 0.933565 0.937108
1889 1 0.99617 0.883772 0.748298
1764 1 0.995413 0.938745 0.689867
1892 1 0.993117 0.939851 0.809194
1791 1 0.940129 0.880985 0.689345
1792 1 0.878483 0.937498 0.689351
1917 1 0.882506 0.874302 0.748284
1918 1 0.934088 0.943438 0.748796
1919 1 0.937578 0.873382 0.817265
1920 1 0.883584 0.937106 0.816949
1815 1 0.685522 0.499245 0.81266
1922 1 0.055736 0.565361 0.873466
1928 1 0.11822 0.5605 0.946952
1955 1 0.0643578 0.620361 0.942948
1957 1 0.123494 0.618817 0.871226
1953 1 -0.00280299 0.625041 0.879917
2030 1 0.43758 0.93467 0.878483
667 1 0.805807 0.99114 0.688021
923 1 0.804319 0.99907 0.947447
1807 1 0.440382 0.504006 0.807585
1062 1 0.190289 0.681732 0.998433
1134 1 0.43593 0.929435 0.993361
1930 1 0.318067 0.567355 0.871801
1926 1 0.187094 0.561425 0.869713
1963 1 0.30883 0.628048 0.936887
1961 1 0.254792 0.624218 0.876303
1932 1 0.25808 0.564888 0.944118
1959 1 0.189486 0.616906 0.938324
771 1 0.0537432 0.995668 0.812406
1937 1 0.503374 0.497744 0.884222
1801 1 0.241829 0.503138 0.753219
1965 1 0.375176 0.626131 0.869091
1934 1 0.435875 0.561918 0.874008
1936 1 0.370386 0.560586 0.931664
1967 1 0.432288 0.631679 0.932589
2034 1 0.554379 0.935875 0.872605
769 1 0.00146286 0.993114 0.751223
2018 1 0.0607986 0.938126 0.876782
2040 1 0.615693 0.929698 0.927074
1940 1 0.505939 0.55981 0.94393
1969 1 0.497237 0.628133 0.871487
1938 1 0.556369 0.574956 0.874526
1944 1 0.623302 0.559911 0.939406
1971 1 0.568895 0.629 0.933265
1973 1 0.625704 0.61494 0.874279
2035 1 0.559974 0.868448 0.933889
2033 1 0.493442 0.863483 0.876755
2036 1 0.498865 0.935931 0.940174
1633 1 0.00799606 0.878337 0.498135
2025 1 0.245156 0.877786 0.872013
1942 1 0.689083 0.557118 0.87265
1946 1 0.816871 0.562645 0.885959
1977 1 0.746273 0.625112 0.875236
1979 1 0.813041 0.622869 0.947328
1948 1 0.748044 0.5627 0.931227
1975 1 0.685544 0.615532 0.937543
1070 1 0.438302 0.677929 1.00016
2022 1 0.184289 0.938468 0.876945
1924 1 0.997023 0.564805 0.940686
1950 1 0.942029 0.567105 0.8732
1981 1 0.879504 0.625952 0.874575
1983 1 0.937395 0.629433 0.946109
1952 1 0.878609 0.562278 0.943846
1943 1 0.688285 0.497296 0.933612
2029 1 0.374696 0.87437 0.874019
1658 1 0.809633 0.937191 0.500485
2031 1 0.438636 0.878042 0.931944
1986 1 0.0652608 0.817837 0.875326
1987 1 0.0641815 0.752414 0.943405
1954 1 0.06069 0.682366 0.880203
1992 1 0.124081 0.814973 0.93721
1989 1 0.115432 0.748791 0.881652
1960 1 0.127928 0.688546 0.939963
1956 1 1.00157 0.68521 0.93856
1988 1 1.00315 0.813409 0.938424
2016 1 0.877054 0.80281 0.937598
2023 1 0.182382 0.880689 0.941374
1995 1 0.304788 0.752433 0.93856
1996 1 0.251694 0.826949 0.932867
1991 1 0.183167 0.750283 0.935861
1958 1 0.183001 0.68192 0.882146
1964 1 0.2507 0.685467 0.934484
1993 1 0.244238 0.745862 0.88065
1962 1 0.31138 0.685959 0.881492
1990 1 0.181816 0.808448 0.873222
1994 1 0.318293 0.809668 0.883383
1984 1 0.875172 0.685841 0.94158
2021 1 0.122616 0.878277 0.873202
2020 1 1.00098 0.936854 0.944853
2032 1 0.374347 0.940611 0.93029
1999 1 0.443251 0.747916 0.935317
1998 1 0.43866 0.809544 0.87403
1966 1 0.431766 0.689365 0.871528
1968 1 0.372446 0.694092 0.937481
1997 1 0.379586 0.755851 0.87549
2000 1 0.375828 0.817874 0.941357
2004 1 0.503443 0.806542 0.932643
2001 1 0.49885 0.748784 0.873681
2014 1 0.93824 0.809523 0.880303
1972 1 0.505267 0.683502 0.930326
2008 1 0.624189 0.815352 0.934034
2003 1 0.567503 0.755692 0.939115
1976 1 0.628371 0.684077 0.935938
1970 1 0.565957 0.682917 0.873055
2002 1 0.567327 0.801016 0.873151
2005 1 0.624127 0.747169 0.871844
2019 1 0.0693638 0.876684 0.940613
2010 1 0.814224 0.808298 0.870171
2007 1 0.686732 0.749286 0.934636
2009 1 0.750984 0.749382 0.871496
2006 1 0.690932 0.809589 0.874195
1974 1 0.688048 0.689151 0.878645
2012 1 0.746293 0.814306 0.936935
1980 1 0.745143 0.68659 0.940457
1978 1 0.809975 0.68468 0.879013
2011 1 0.812119 0.747315 0.934391
2024 1 0.118688 0.936414 0.932897
2037 1 0.631962 0.872968 0.86614
1985 1 -0.00282981 0.745496 0.880011
2013 1 0.87526 0.740418 0.877055
2015 1 0.940154 0.747911 0.942832
1982 1 0.939208 0.68164 0.877426
1685 1 0.624198 0.49799 0.624726
921 1 0.750037 0.993694 0.872594
1689 1 0.754243 0.5089 0.617092
1074 1 0.562137 0.689309 1.0044
793 1 0.743391 0.994797 0.743547
1038 1 0.438923 0.564283 0.997566
1098 1 0.31011 0.81654 0.996122
1106 1 0.562391 0.81267 0.999555
1137 1 0.505131 0.873662 0.99728
1073 1 0.508898 0.626875 0.999897
1102 1 0.438982 0.818272 0.994737
903 1 0.180698 0.991635 0.938267
1677 1 0.370976 0.504548 0.624839
1805 1 0.371594 0.503407 0.738617
1929 1 0.254714 0.499919 0.872498
1925 1 0.124088 0.507107 0.885198
663 1 0.690617 0.993785 0.682392
1823 1 0.944199 0.497506 0.816523
783 1 0.439963 0.99924 0.808456
1090 1 0.0590713 0.817559 0.995411
1114 1 0.810011 0.809063 1.00183
1661 1 0.876314 0.872905 0.50713
1150 1 0.935295 0.932918 0.989679
1066 1 0.30469 0.687077 0.997916
1094 1 0.194291 0.812926 0.993522
1025 1 0.995838 0.501522 1.00051
1046 1 0.687076 0.560164 0.999837
1086 1 0.937344 0.690812 1.00129
1590 1 0.689214 0.687583 0.498155
5 1 0.119231 0.99658 0.993705
1593 1 0.75612 0.628223 0.504177
533 1 0.619675 0.99325 0.500188
1 1 0.99599 0.998703 0.995557
1077 1 0.620617 0.625507 1.00111
1589 1 0.622641 0.625203 0.499388
1109 1 0.627085 0.743926 0.997255
1149 1 0.87924 0.868933 0.995308
1646 1 0.43616 0.944884 0.504697
1601 1 0.00180131 0.7527 0.507519
1565 1 0.879216 0.49853 0.500539
|
18,061 | 466e2147adbce0ab8516e51cbf37251e9cd49038 | import numpy as np
import matplotlib.pyplot as plt
from shapely.geometry import Point
from geopandas import GeoSeries, GeoDataFrame
import csv
import psycopg2
import math
hostname = 'localhost'
username = 'postgres'
#password = 'docker'
password = 'admin'
#database = 'sgbd'
database = 'world'
queryEJ1 = 'SELECT code, population FROM public.country ORDER BY code'
queryEJ2 = 'SELECT code, gnp FROM public.country ORDER BY code'
queryEJ3 = 'SELECT countrycode, count(entidad) FROM public.sitio GROUP BY countrycode ORDER BY countrycode'
def getData(conexion, query):
dictionary = {}
cur = conexion.cursor()
cur.execute(query)
for elem in cur.fetchall():
dictionary[elem[0].strip()] = elem[1]
return dictionary
def processMap(world, listDf, query, alphaValue):
conexion = psycopg2.connect( host=hostname, user=username, password=password, database=database )
resultDB = getData(conexion, query)
for elem in listDf:
if elem in resultDB:
if resultDB[elem] > 0:
world.at[elem, 'distribution'] = math.log2(resultDB[elem])
else:
world.at[elem, 'distribution'] = resultDB[elem]
world.plot(column='distribution', cmap='Greens', alpha=alphaValue, categorical=False, legend=False, ax=None)
print(world)
plt.show()
def main():
world = GeoDataFrame.from_file('ne_10m_admin_0_countries.shp').sort_values(by='NAME').set_index('ISO_A3')
listDf = world.index.tolist()
processMap(world, listDf, queryEJ1, 0.5)
processMap(world, listDf, queryEJ2, 0.5)
processMap(world, listDf, queryEJ3, 0.5)
main() |
18,062 | bf8e8d89c30fe30eaf639badf224480cf62686ca | """
-
코딩테스트 연습 - 깊이/너비 우선 탐색(DFS/BFS) - 네트워크
- URL : https://programmers.co.kr/learn/courses/30/lessons/43162
"""
def solution(n, computers):
answer = 0
c = [False]*n
now = 0
for i,v in enumerate(computers):
# 1. 개인 네트워크인 경우, 갔다는 의미로 true
if sum(v) == 1:
c[i] = True
answer += 1
# 2. 다른것과 연결이 되고 탐색되지 않은 경우, 너비우선탐색 실행
elif not c[i]:
q = list()
# 3. 연결된 node를 queue에 추가
for a,b in enumerate(v):
if b == 1:
q.append(a)
# 4. 연결된 모든 노드를 탐색할 때 까지 실행
while len(q) > 0:
now = q.pop(0)
# 5. 현재 노드가 안가본 곳이면 연결된 노드를 queue에 추가
if not c[now]:
for a,b in enumerate(computers[now]):
if b == 1:
q.append(a)
c[now] = True
# 6. 노드탐색이 끝나면 하나의 네트워크가 종료됨을 의미하므로 +1
answer += 1
return answer
solution(3,[[1, 1, 0], [1, 1, 0], [0, 0, 1]]) |
18,063 | a65ba5f05b0f286733915bf4857ece4f0d6a6102 | #!/usr/bin/env python3
from sage.all import *
from Crypto.Util.number import long_to_bytes
from Crypto.PublicKey import RSA
message = 6213639477312598145146606285597413094756028916460209994926376562685721597532354994527411261035070313371565996179096901618661905020103824302567694878011247857685359643790779936360396061892681963343509949795893998949164356297380564973147847768251471545846793414196863838506235390508670540548621210855302903513284961283614161501466772253041178512706947379642827461605012461899803919210999488026352375214758873859352222530502137358426056819293786590877544792321648180554981415658300194184367096348141488594780860400420776664995973439686986538967952922269183014996803258574382869102287844486447643771783747439478831567060
pubkey = RSA.importKey("""
-----BEGIN PUBLIC KEY-----
MIIBITANBgkqhkiG9w0BAQEFAAOCAQ4AMIIBCQKCAQBXyI8cm57UfYRPh7KfRHlu
F85Hwv4kzBq340QyszUhJGPSOZ0HRxGABXLqaBLikBICvF8ZDMtJZtVwkEpBaXpj
ZEiK4UCxtjV/xqa0rM1RenQDu8mW39ByiV9qmh6o8qbatp2hVXUXf0zvGtuQglu9
T+xQAarAGnDooQ4QEzRxOTK+R9GgnXDTEVf+JuVTd0+NnlAgmEcryocHkx4rycuS
qslEUb5vHlWLk6hoXOmE9IQK+vjSqK0NRlRUYqkYFRpQ3qGij03x5eaZsAUtpSMF
nrIdVrZ8keVqt181vJ9km+p2oTaxcNOmdvUUuciVXq94qQut1Uhbun8SF4sfj+/v
AgMBAAE=
-----END PUBLIC KEY-----
""".strip())
print(pubkey.n)
|
18,064 | 9f13e593b2d67b06142a87167e74142dfea83712 | class Game:
def __init__(self):
self.inst = []
def get_instance_count(self):
return len(self.inst)
|
18,065 | 0c692e4cdbca3b4d38e696b4a72c9ed086685d0b | import numpy
from numpy import *
import operator
from PIL import Image
from os import listdir
class Bayes:
def __init__(self):
self.length = -1
self.labelCount = dict()
self.vectorCount = dict()
def fit(self, data_set: list, labels: list):
if len(data_set) != len(labels):
raise ValueError("您输入的测试数组跟类别数组长度不一致")
# 测试数据特征值的长度
self.length = len(data_set[0])
# 类别所有的数量
labels_num = len(labels)
# 不重复类别的数量
no_repeat_labels = set(labels)
for item in no_repeat_labels:
this_label = item
# 当前类别占类别总数的比例
self.labelCount[this_label] = labels.count(this_label) / labels_num
for vector, label in zip(data_set, labels):
if label not in self.vectorCount:
self.vectorCount[label] = []
self.vectorCount[label].append(vector)
print("训练结束")
return self
def test(self, test_data, label_set):
if self.length == -1:
raise ValueError("您还没进行训练, 请先训练")
# 计算test_data分别为各个类别的概率
lb_dict = dict()
for this_lb in label_set:
p = 1
all_label = self.labelCount[str(this_lb)]
all_vector = self.vectorCount[str(this_lb)]
vector_num = len(all_vector)
all_vector = numpy.array(all_vector).T
for index in range(0, len(test_data)):
vector = list(all_vector[index])
p = p * vector.count(test_data[index]) / vector_num
lb_dict[this_lb] = p * all_label
this_label = sorted(lb_dict, key=lambda x: lb_dict[x], reverse=True)[0]
return this_label
# 加载数据
def dataToArray(fName):
arr = []
fh = open(fName)
for i in range(0, 32):
thisLine = fh.readline()
for j in range(0, 32):
arr.append(int(thisLine[j]))
return arr
def seplabel(fileName):
fileStr = fileName.split(".")[0]
label = fileStr.split("_")[0]
return label
# 建立训练数据
def trainData():
labels = []
trainFile = listdir("E:\pythonResult\knn\\traindata")
num = len(trainFile)
# 用一个数组存储所有训练数据, 行num: 文件总数, 列1024
trainArr = zeros((num, 1024))
for i in range(0, num):
thisFileName = trainFile[i]
thisLabel = seplabel(thisFileName)
labels.append(thisLabel)
trainArr[i,:] = dataToArray("E:\pythonResult\knn\\traindata\\" + thisFileName)
return trainArr, labels
bys = Bayes()
train_data, labels = trainData()
# 训练
bys.fit(train_data, labels)
# 测试
this_data = dataToArray("E:\pythonResult\knn\\testdata\8_90.txt")
labels_all = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
# 识别单个手写体
rst = bys.test(this_data, labels_all)
print(rst)
# 识别多个手写体
test_file_all = listdir("E:\pythonResult\knn\\testdata")
num = len(test_file_all)
for i in range(0, num):
this_file_name = test_file_all[i]
this_label = seplabel(this_file_name)
this_data_array = dataToArray("E:\pythonResult\knn\\testdata\\" + this_file_name)
label = bys.test(this_data_array, labels_all)
print("该数字是: " + str(this_label) + ", 识别出来的数字: " + str(label))
|
18,066 | 214ffe24b8dc20bfb3defbe78061715b8b1be4d3 | from django.shortcuts import render, redirect
from django.views.generic import ListView, CreateView
from django.http import HttpResponse, HttpResponseRedirect
from .forms import FormCategoria, FormUsuario
from django.core.exceptions import PermissionDenied
from .models import Categoria
from .models import Cita
from .models import Usuario
from .models import Tratamiento
from .models import TipoMedicamento
from .models import Dosis
from .models import Medicina
from .models import Indicacion
from .models import TratamientoIndicacion
from django.contrib import messages
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
""" from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, HttpResponseNotFound
from django.http import Http404
from datetime import date
import re """
# Create your views here.
def index(request):
return render(request, 'serina_views/index.html')
def view_categoria(request):
if not request.user.is_authenticated:
raise PermissionDenied
return render(request, 'serina_views/ver_categ.html')
def add_categoria(request):
if not request.user.is_authenticated:
raise PermissionDenied
nombre = request.POST['categoria']
categoria = nombre.lower()
obj = Categoria.objects.all()
for i in obj:
nombres = i.nombre.lower()
if(categoria == nombres):
return HttpResponse("ERROR. ESTA AGREGANDO UNA CATEGORIA QUE YA EXISTE")
email = User.objects.filter(email = request.session['user_email'])
email_obj = User.objects.get(pk = email[0].id)
db_o = Categoria(nombre=categoria, email_usuario=email_obj)
db_o.save()
return redirect('show_categoria')
def add_medicina(request):
if not request.user.is_authenticated:
raise PermissionDenied
medicina = request.POST['medicina']
tipo_medicamento = request.POST['tipo_medicamento']
tipo_dosis = request.POST['tipo_dosis']
concentracion = request.POST['concentracion']
nombre = medicina.lower()
context = {'medicina': nombre}
obj_dosis = Dosis(tipo = tipo_dosis)
obj_dosis.save()
obj_tipo_medicamento = TipoMedicamento(nombre = tipo_medicamento)
obj_tipo_medicamento.save()
pk_dosis = Dosis(pk = tipo_dosis)
pk_medicamento = TipoMedicamento(pk = tipo_medicamento)
obj_medicina = Medicina(nombre = nombre, concentracion = concentracion, tipo_medicamento = pk_medicamento, tipo_dosis = pk_dosis)
obj_medicina.save()
return redirect('show_categoria')
def add_cita(request):
if not request.user.is_authenticated:
raise PermissionDenied
categoria = request.POST['categ']
fecha = request.POST['fechaa']
nota = request.POST['notaa']
email = User.objects.filter(email = request.session['user_email'])
email_obj = User.objects.get(pk = email[0].id)
id_categ = Categoria.objects.get(pk = categoria)
cita = Cita(fecha = fecha, nota = nota, email_usuario = email_obj, id_categoria = id_categ)
cita.save()
return redirect('show_categoria')
def show_categoria(request):
if not request.user.is_authenticated:
raise PermissionDenied
categoria = Categoria.objects.all()
cita = Cita.objects.all
tratamiento = Tratamiento.objects.all()
context = {'categoria': categoria, 'cita': cita, 'tratamiento': tratamiento}
return render(request, 'serina_views/ver_categ.html', context)
def delete_categoria(request):
if not request.user.is_authenticated:
raise PermissionDenied
categoria = request.GET.get("categoria","")
cita = Cita.objects.all()
eliminar = Categoria.objects.get(pk = categoria)
eliminar.delete()
return redirect('show_categoria')
def delete_cita(request):
if not request.user.is_authenticated:
raise PermissionDenied
cita = request.GET.get("cita", "")
eliminar = Cita.objects.get(pk = cita)
eliminar.delete()
return redirect('show_categoria')
def show_tratamiento(request):
if not request.user.is_authenticated:
raise PermissionDenied
cita = Cita.objects.all()
tratamiento = Tratamiento.objects.all()
medicina = Medicina.objects.all()
tratamiento_medicina = TratamientoIndicacion.objects.all()
context = {'cita': cita, 'tratamiento': tratamiento, 'medicina': medicina,
'tratamiento_medicina': tratamiento_medicina}
return render(request, "serina_views/ver_tratamiento.html",context)
def view_tratamiento(request):
if not request.user.is_authenticated:
raise PermissionDenied
tratamiento = request.GET.get("tratamiento","")
obj_tratamiento = Tratamiento.objects.get(pk = tratamiento)
tratamiento_indicacion = TratamientoIndicacion.objects.all()
cita = Cita.objects.all()
medicina = Medicina.objects.all()
dosis = Dosis.objects.all()
categoria = Categoria.objects.all()
tipo_medicamento = TipoMedicamento.objects.all()
indicacion = Indicacion.objects.all()
context = {'cita': cita, 'tratamiento': obj_tratamiento, 'medicina': medicina, 'tratamiento_indicacion': tratamiento_indicacion,
'dosis': dosis, 'tipo_medicamento': tipo_medicamento,'categoria': categoria, 'indicacion': indicacion}
return render(request, "serina_views/info_tratamiento.html", context)
def show_indicaciones(request):
if not request.user.is_authenticated:
raise PermissionDenied
cita = request.GET.get("cita","")
obj_cita = Cita.objects.get(pk = cita)
obj_categoria = Categoria.objects.all()
obj_medicina = Medicina.objects.all()
context = {'cita' : obj_cita, 'categoria' : obj_categoria, 'medicina': obj_medicina}
return render(request, "serina_views/form_indicaciones.html", context)
def add_indicaciones(request):
if not request.user.is_authenticated:
raise PermissionDenied
medicina = request.POST['info_medicina']
info = medicina.split("|$")
nombre_medicina = info[0]
dosis_medicina = info[1]
concentracion_medicina = info[2]
cita = request.POST['citas_form']
fecha_inicio_ind = request.POST['fecha_inicio_ind']
fecha_final_ind = request.POST['fecha_final_ind']
fecha_recipe_tra = request.POST['fecha_recipe_tra']
cantidad_dosis = request.POST['cantidad_dosis']
dif_horas = request.POST['dif_horas']
tra_continuo = request.POST.get('tra_continuo', False)
if tra_continuo:
tra_continuo = True
id_cita = Cita.objects.get(pk = cita)
obj_tratamiento = Tratamiento(continuo = tra_continuo, fecha_recipe = fecha_recipe_tra, id_cita = id_cita)
obj_tratamiento.save()
obj_medicina = Medicina.objects.filter(nombre = nombre_medicina, concentracion = concentracion_medicina,tipo_dosis =dosis_medicina)
obj_indicacion = Indicacion(id_medicina = obj_medicina[0], fecha_inicio = fecha_inicio_ind, fecha_final = fecha_final_ind,
diferencia_horas = dif_horas, cantidad_dosis = cantidad_dosis)
obj_indicacion.save()
id_tratamiento = Tratamiento.objects.get(pk = obj_tratamiento.id)
id_indicacion = Indicacion.objects.get(pk = obj_indicacion.id)
tratamiento_indicacion = TratamientoIndicacion(id_tratamiento = id_tratamiento, id_indicacion = id_indicacion)
tratamiento_indicacion.save()
return redirect('show_tratamiento')
def add_tratamiento(request):
if not request.user.is_authenticated:
raise PermissionDenied
return redirect('show_tratamiento')
def add_informacion(request):
if not request.user.is_authenticated:
raise PermissionDenied
return render(request, "serina_views/form_informacion.html")
def show_login(request):
return render(request, "serina_views/sign_in.html" )
def show_sign_up(request):
return render(request, "serina_views/sign_up.html")
def add_sing_up(request):
email = request.POST['cuenta']
password = request.POST['password']
nombre = request.POST['nombre']
apellido = request.POST['apellido']
ci = request.POST['cedula']
nacimiento = request.POST['fecha_nacimiento']
altura = request.POST['altura']
user = User.objects.create_user(email, email, password)
user.first_name = nombre
user.last_name = apellido
user.save()
db_object = Usuario(email = user, nombre = nombre, apellido = apellido, ci = ci, fecha_nacimiento =nacimiento, altura = altura)
db_object.save()
return redirect('index')
def add_sing_in(request):
username = request.POST['lemail']
password = request.POST['lpassword']
user = authenticate(request, username = username, password=password)
if user is not None:
login(request, user)
request.session['user_email'] = user.email
messages.success(request, 'Bienvenido')
return redirect('show_categoria')
else:
messages.success(request, 'Datos Incorrectos')
return redirect('show_login')
def logout_view(request):
logout(request)
try:
del request.session['user_email']
except KeyError:
pass
messages.success(request, 'Hasta Luego')
return redirect('index')
|
18,067 | 6858845ef9be2faf52b154c30b80eb455475dfe5 | import CR3BP
import numpy as np
from scipy import optimize
from functools import partial
m_Earth = 5.972E24 # kg
m_Moon = 7.34E22 # kg
m_Sun = 1.98E30 # kg
mu_Earth = m_Earth / (m_Earth + m_Sun)
mu_Moon = m_Moon / (m_Earth + m_Moon)
class Simulation:
def __init__(self):
"""
Containter for simulation solution and necessary plotting variables and items
:param mu:
:param type: Earth-Sun or Earth-Moon system
:param direction: Forwards or backwards time integration. This defines unstable/stable manifolds for trajectory
simulations
"""
self.mu = None
self.type = None
self.direction = None
self.eq_points = None
self.init_conds = None
self.time = None
self.trajectory = None
self.initial_point = None
self.contour_levels = []
def find_lagrange_points(self, mu=None):
if mu is None:
mu = self.mu
bound_func = partial(CR3BP.Vx, y=0,
mu=mu) # Bind the potential to be on the line y=0 with the value of mu so optimize.fsolve only has to do a 1D solve for the x-point
L1 = optimize.fsolve(bound_func, np.array([0.5]))
L2 = optimize.fsolve(bound_func, np.array([1]))
L3 = optimize.fsolve(bound_func, np.array([-1]))
return {"L1": np.array([L1[0], 0]), "L2": np.array([L2[0], 0]), "L3": np.array([L3[0], 0]),
"L4": np.array([0.5 - mu, np.sqrt(3) / 2]), "L5": np.array([0.5 - mu, -np.sqrt(3) / 2])}
def pick_random_initial_conditions(self, determine_dir = False, determine_mass = False, pos_epsilon=5E-3, vel_espilon=1E-7):
"""
There are a few random conditions to pick
Type: Earth-Sun or Earth-Moon system
Lagrange Point: Which point to start near (L1, L2, L3, L4 or L5)
Direction: integrate forwards or backwards in time
Initial conditions: x, y, xdot and ydot all need to be defined appropriately
"""
# Pick Sun or Moon System
if determine_mass:
if np.random.random() > 0.5:
self.type = 'Sun'
self.mu = mu_Earth
else:
self.type = 'Moon'
self.mu = mu_Moon
else:
self.type = 'Moon'
self.mu = mu_Moon
self.eq_points = self.find_lagrange_points(self.mu)
for k, point, in self.eq_points.items():
self.contour_levels.append(CR3BP.V(point[0], point[1], self.mu))
self.contour_levels.sort()
# pick lagrange point
lagrange_point = np.random.randint(1,6)
init_xy = self.eq_points["L{}".format(lagrange_point)]
self.initial_point = self.eq_points["L{}".format(lagrange_point)]
self.initial_point_str = "L{}".format(lagrange_point)
# pick random offsets from lagrange point and a random velocity in x-y direction
self.init_conds = np.concatenate([np.random.uniform(init_xy[0] - pos_epsilon, init_xy[0] + pos_epsilon, [1,1]),
np.random.uniform(init_xy[1] - pos_epsilon, init_xy[1] + pos_epsilon,[1,1]),
np.random.uniform(-1 * vel_espilon, vel_espilon, [1, 2])], axis=1).reshape(-1)
if determine_dir:
# Pick to either simulate forwards or backwards in time
# Forwards in time will show unstable manifolds while backwards in time will show stable manifolds
if np.random.random() > 0.5:
# integrate fowards in time
self.direction = True
else:
# integrate backwards in time
self.direction = False
else:
self.direction = True
def simulate_trajectory(self, time_length = 6 * np.pi):
if not self.direction:
time_length = -1 * time_length
bound_differential_equation = partial(CR3BP.rotating_CR3BP_DEs, self.mu)
self.time, self.trajectory = CR3BP.rk45(bound_differential_equation, self.init_conds, 0, time_length, 0.01)
if not self.direction:
# If you integrate backwards in time, the last data point becomes the first data point
self.time = self.time + np.min(self.time)
def make_simulation(self, time_length=6*np.pi):
self.pick_random_initial_conditions()
self.simulate_trajectory(time_length)
if __name__ == '__main__':
Sims = Simulation()
Sims.make_simulation() |
18,068 | b20fb65ab0405ed2e014a8564f3f7d6ac5e80684 | # 384A : CODER
# Prerequisite : Implementation
n=int(raw_input())
if n%2: print (n*n)/2+1
else: print (n*n)/2
for i in range(n):
s=''
if i%2==0:
for j in range(n):
if(j%2==0):s+='C'
else: s+='.'
else:
for j in range(n):
if(j%2==0):s+='.'
else: s+='C'
print s
|
18,069 | 26efda94fe2d2f2b09fc85521cdee7b8a103e7d4 | import numpy as np
import operator
from os import listdir
from sklearn.neighbors import KNeighborsClassifier as kNN
def img2vector(filename):
returnVect = np.zeros((1,1024))
fr = open(filename)
for i in range(32):
lineStr = fr.readline()
for j in range(32):
returnVect[0,32*i+j] = int(lineStr[j])
return returnVect
def handwritingClassCount():
hwLabels = []
trainingFileList = listdir('trainingDigits')
m = len(trainingFileList)
trainingMat = np.zeros((m,1024))
for i in range(m):
fileNameStr = trainingFileList[i]
classNumber = int(fileNameStr.split('_')[0])
hwLabels.append(classNumber)
trainingMat[i,:] = img2vector('trainingDigits/%s' % fileNameStr)
neigh = kNN(n_neighbors=3,algorithm='auto')
neigh.fit(trainingMat,hwLabels)
testFileList = listdir('testDigits')
errorCount = 0.0
mTest = len(testFileList)
for i in range(mTest):
fileNameStr = testFileList[i]
classNumber = int(fileNameStr.split('_')[0])
vectUnderTest = img2vector('testDigits/%s' % (fileNameStr))
classifierResult = neigh.predict(vectUnderTest)
print("分类结果%d真实结果%d" % (classifierResult,classNumber))
if classifierResult != classNumber:
errorCount += 1.0
print("总共错了%d错误率为%s%%" % (errorCount,errorCount/mTest*100))
if __name__ == '__main__':
handwritingClassCount() |
18,070 | b2abff11e155d7e9a6928089f8b4a29bba4c3a70 | from PIL import *
img=Image.open(r"C:\\Users\\techno\\Desktop\\calc.png")
crp=(200,200,900,900)
im=img.crop(crp)
im.paste(im,crp)
im.show()
|
18,071 | c920300a761cd183033e41b8b488e9068e537bce | #rename files for each move with their place in the movelist
import os
import sys
src = os.getcwd()
targetDir = src + "/" + sys.argv[1] + "_gif_raw"
list = os.listdir(targetDir)
#convert 2nd argument into string array of indexes to insert
insertIndexes = sys.argv[2].split(",")
insertIndexes = [int(i) for i in insertIndexes] #convert string array into array of ints
#sort files by date modified
list.sort(key=lambda x: os.stat(os.path.join(targetDir, x)).st_mtime)
os.chdir(targetDir)
j = 0
for index,oldFile in enumerate(list):
#move_num = str(index)
# if(index + 1 >= insertIndexes[j]):
# move_num = index + (j+2)
# else:
# move_num = index+1
# if(j != (len(insertIndexes) - 1)):
# if(index+1 == insertIndexes[j+1]):
# j += 1
# if(index+1 == insertIndexes[j] and j+1 < len(insertIndexes)):
# j += 1
while(j != len(insertIndexes) and index + (j+1) == insertIndexes[j]):
j += 1
move_num = index + 1 + j
newFile = sys.argv[1] + "_move_{}.mp4".format(move_num)
os.rename(oldFile, newFile)
|
18,072 | 2940cb993c681852833538763ebe951d994f9658 | class Node:
def __init__(self, data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def tambahkan(self, data_baru):
new_node = Node(data_baru)
if self.head is None:
self.head = new_node
return
node = self.head
while node.next is not None:
node = node.next
node.next = new_node
def pengurut(self, awal, akhir):
hasil = None
if awal == None:
return akhir
if akhir == None:
return awal
if awal.data <= akhir.data:
hasil = awal
hasil.next = self.pengurut(awal.next, akhir)
else:
hasil = akhir
hasil.next = self.pengurut(awal, akhir.next)
return hasil
def cariMid(self, head):
if (head == None):
return head
a = head
b = head
while b.next is not None and b.next.next is not None:
a = a.next
b = b.next.next
return a
def mergeSort(self, a):
if a == None or a.next == None:
return a
mid = self.cariMid(a)
separuhKanan = mid.next
mid.next = None
awal = self.mergeSort(a)
akhir = self.mergeSort(separuhKanan)
hasil = self.pengurut(awal, akhir)
return hasil
def kunjungi(self):
node = self.head
while node is not None:
print(node.data)
node = node.next
ll = LinkedList()
ll.tambahkan(77)
ll.tambahkan(69)
ll.tambahkan(18)
ll.tambahkan(1)
ll.tambahkan(86)
ll.head = ll.mergeSort(ll.head)
ll.kunjungi() |
18,073 | 847bdae93aac5ef1573cd7719f4d0ad66eb9d65e | from django.shortcuts import render, redirect, HttpResponseRedirect
from django.contrib import messages
from .forms import DocumentForm
from .models import Document
def UploadView(request):
if request.method == 'POST':
form = DocumentForm(request.POST)
if form.is_valid():
document = form.save(commit=False)
document.save()
messages.success(request, 'File uploaded successfully !!!')
return HttpResponseRedirect('/upload')
else:
form = DocumentForm()
try:
documents = Document.objects.all()
except Document.DoesNotExist:
documents = None
return render(request, 'consultation/upload.html', {'documents': documents, 'form': form})
def CreateViewWork(request):
try:
documents = Document.objects.all()
except Document.DoesNotExist:
documents = None
return render(request, 'consultation/index.html', {'documents': documents})
|
18,074 | d4f6a52c7f8ae77b822edd20d0ba5bfd79e7f52d | # -*- coding: utf-8 -*-
# @Author: Clarence
# @Date: 2018-03-09 10:33:03
# @Last Modified by: Clarence
# @Last Modified time: 2018-03-11 09:47:03
"""
使用sprite中Sprite类中自带的碰撞检测来检测小球是否碰撞
spritecollide(sprite, group, dokill, collided = None)
第四个参数collided=None时是用Rect来检测是否碰撞
collided表示回调函数,指定函数种类会根据指定检测函数来检测碰撞
pygame.sprite.collide_circle():
collide_circle(left, right) -> bool
Tests for collision between two sprites, by tesing to
see if two circles centered on the sprites overlap.
Intended to be passed as a collided callback function to
the *collide functions. Sprites must have a "rect" and an optional
"radius" attribute.
也就是说这个方法名(地址)传入spritecollide()方法中, sprite精灵必须
要有rect属性,
可选属性为radius.
pygame.sprite.Group()
A container class to hold an manage multiple Sprite
objects.
主要的方法有:
pygame.sprite.Group.sprites:
list of the Sprites this Group contains
pygame.sprite.Group.copy:
duplicate the Group
pygame.sprite.Group.add:
add Sprites to this Group
pygame.sprite.Group.remove:
remove Sprites to this Group
pygame.sprite.Group.has:
test if a Group contains Sprites
pygame.sprite.Group.update:
call the update method on contained Sprites
pygame.sprite.Group.draw:
blit the Sprite images
pygame.sprite.Group.clear:
draw a background over the Sprites
pygame.sprite.Group.empty:
remove all Sprites.
"""
import pygame
import sys
from pygame.locals import *
from random import *
class Ball(pygame.sprite.Sprite):
def __init__(self, image, position, speed, bg_size):
# Call the parent class (Sprite) constructor
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load(image).convert_alpha()
self.rect = self.image.get_rect()
#小球的位置
self.rect.left, self.rect.top = position
self.speed = speed
self.width, self.height = bg_size[0], bg_size[1]
#self.width = self.width / 2
'''
Pygame.Rect.move():
moves the rectangle
move(x, y) -> Rect
Returns a new rectangle that is moved by the given offset. The
x and y arguments can be any integer value, positive or
negative.
可以在Rect对象的move方法中添加可正可负的两元素
如果要实现小球的移动,则要在类中添加一个move()方法,并且在绘图的时候调用小球
对象的move()方法
'''
def move(self):
self.rect = self.rect.move(self.speed)
#类似实现贪吃蛇穿入墙壁从对面墙壁出来(左右方向)
if self.rect.right < 0:
self.rect.left = self.width
elif self.rect.left > self.width:
self.rect.right = 0
#(上下方向) 从下往上 和 从上往下
elif self.rect.bottom < 0:
self.rect.top = self.height
elif self.rect.top > self.height:
self.rect.bottom = 0
def main():
pygame.init()
ball_image = "gray_ball.png"
bg_image = "background.png"
running = True
# 添加模型的背景音乐
pygame.mixer.music.load("bg_music.ogg")
pygame.mixer.music.play()
# 添加音效
loser_sound = pygame.mixer.Sound("loser.wav")
laugh_sound = pygame.mixer.Sound("laugh.wav")
winner_sound = pygame.mixer.Sound("winner.wav")
hole_sound = pygame.mixer.Sound("hole.wav")
# 音乐播放完时游戏结束,将GAMEOVER事件加入到事件队列中去
# USEREVENT为用户自定义的事件
# 如果想定义第二个事件可以是GAMEOVERTWO = USEREVENT + 1
GAMEOVER = USEREVENT
#背景音乐结束后发生GAMEOVER事件消息
pygame.mixer.music.set_endevent(GAMEOVER)
# 根据背景图片指定游戏界面尺寸
bg_size = width, height = 1024, 681
screen = pygame.display.set_mode(bg_size)
pygame.display.set_caption("Play the Ball")
#.png格式可以加入apha通道
background = pygame.image.load(bg_image).convert_alpha()
#用来存放小球对象的列表
balls = []
group = pygame.sprite.Group()
for i in range(5):
#球的尺寸是100*100 随机产生小球的位置
position = randint(0, width-100), randint(0, height-100)
#两个元素的一个列表,表示x轴和y轴方向的速度
speed = [randint(-10, 10), randint(-10, 10)]
#实例化小球对象 分别传入Surface对象 位置二元组 速度两元素列表
ball = Ball(ball_image, position, speed, bg_size)
#碰撞检测之后不从组里面删除
while pygame.sprite.spritecollide(ball, group, False, pygame.sprite.collide_circle):
ball.rect.left, ball.rect.top = randint(0, width - 100),\
randint(0, height - 100)
balls.append(ball) #将小球加入到小球列表中
group.add(ball)
# CLock()对象用来设置小球的帧率
clock = pygame.time.Clock()
while running:
for event in pygame.event.get():
if event.type == QUIT:
sys.exit()
elif event.type == GAMEOVER:
loser_sound.play()
pygame.time.delay(2000)
laugh_sound.play()
running = False #结束循环
screen.blit(background, (0, 0))
for each in balls:
each.move()
screen.blit(each.image, each.rect)
for each in group:
group.remove(each)
if pygame.sprite.spritecollide(each, group, False, pygame.sprite.collide_circle):
each.speed[0] = -each.speed[0]
each.speed[1] = -each.speed[1]
group.add(each)
pygame.display.flip() #将显示缓冲区中的数据刷入显示器中
clock.tick(30)
if __name__ == "__main__":
main()
|
18,075 | d9704bb175e77a1bf040c5c6cb3d2f0236eb912a | # EXERCISE 46 : What color is that square (chess)
position = input("Enter a chess board position: ")
col = position[0].lower()
row = int(position[1])
if col in "aceg":
starts_with_black = True
else:
starts_with_black = False
if starts_with_black:
if row % 2 == 0:
white = True
else:
white = False
else:
if row % 2 == 0:
white = False
else:
white = True
if white:
print(f"The position {position} is colored white")
else:
print(f"The position {position} is colored black")
|
18,076 | 1019ad6020f168fc77580d756dc45cc992c1f047 | import itertools as it
class Solution:
# @param s, a string
# @return a list of strings
def restoreIpAddresses(self, s):
# if len(s) > 12:
# return []
i,j,k = range(3)
valids = []
while i<3:
while j<=i+3:
while k<=j+3 and k+1<len(s):
valids = self.valid_address(s, i, j, k, valids)
k += 1
j += 1
k = j + 1
i += 1
j = i + 1
k = j + 1
return valids
def valid_address(self, s, i, j, k, valids):
octets = []
octets.append(int(s[:i+1]))
octets.append(int(s[i+1:j+1]))
octets.append(int(s[j+1:k+1]))
octets.append(int(s[k+1:]))
is_valid = True
for octet in octets:
is_valid = is_valid and octet>=0 and octet <=255 \
and ''.join([str(o) for o in octets]) == s
if is_valid:
valids.append('.'.join([str(o) for o in octets]))
return valids
def generatePatterns(self):
x=[i for i in it.product([1,2,3],repeat=4)]
return x
s = Solution()
# s.restoreIpAddresses('0000')
# s.restoreIpAddresses('25525511135')
# print s.restoreIpAddresses('111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111')
print s.restoreIpAddresses('010010') |
18,077 | 906211cfbb254bf582cff0b83f72ffecde0d48af | # Напишите программу, которая запрашивает ввод двух значений.
# Если хотя бы одно из них не является числом (любым), то должна выполняться конкатенация, т. е. соединение, строк.
# В остальных случаях введённые числа суммируются.
a = input("Input first number: ")
b = input("Input second number: ")
try:
a = int(a)
b = int(b)
c = a + b
print(c)
except:
a = str(a)
b = str(b)
c = a + b
print(c)
|
18,078 | f075bce50a25d8bf37f4c1a6f3eabd5bd2ba8a98 | from app import app,render_template,pd
from variables import data,dict_detail1,dict_detail2,dict_market_all,dict_loc1,dict_loc2,dict_vente2,dict_vente1,list_country_vente,list_country_loc,list_country_ratio,all_list_avg_vente,all_list_avg_loc,all_list_ratio ,data,data_detail
countries_to_detail=["Banlieue Nord","Les Berges du Lac","Ariana","Ben arous","Sousse"]
@app.route("/next/<int:id>")
def next(id):
i = 0
list_obj_vente = list()
list_obj_loc = list()
data_points_ratio = list()
data_points_mark_all = list()
data_points_mark_vente = list()
data_points_mark_loc = list()
for a, b in zip(list_country_vente, all_list_avg_vente[id]):
if(a in countries_to_detail):
i += 1
list_obj_vente.append(data(a, b, f'/detail/{i}/{a}/{id}',"#34dbcb"))
else:
list_obj_vente.append(data(a, b))
for a, b in zip(list_country_loc, all_list_avg_loc[id]):
if(a in countries_to_detail):
i += 1
list_obj_loc.append(data(a, b, f'/detail/{i}/{a}/{id}',"#34dbcb"))
else:
list_obj_loc.append(data(a, b))
for a, b in zip(list_country_ratio, all_list_ratio[id]):
i += 1
data_points_ratio.append(data(a, b))
pie=dict_market_all[id] #dict_marketi
for a,b in pie[0].items():#dict_vente
data_points_mark_vente.append(data(a,b))
for a,b in pie[1].items():#dict_loc
data_points_mark_loc.append(data(a,b))
for a,b in pie[2].items():#dict_all
data_points_mark_all.append(data(a,b))
return render_template("next1.html",
data_points=list_obj_vente,
data_points_loc=list_obj_loc,
data_points_ratio=data_points_ratio,
data_points_mark_vente=data_points_mark_vente,
data_points_mark_all=data_points_mark_all,
data_points_mark_loc=data_points_mark_loc,
current_id=id,
countries_to_detail=countries_to_detail,
)
# ****************************************************************************************************
def for_detail(dict_vente,dict_loc,country,dict_ann):
list_obj_vente = list()
list_obj_loc = list()
for a,b in dict_vente.items():
if(a==country):
for i,j in zip(b[0],b[1]):
list_obj_vente.append(data(i, j))
for a, b in dict_loc.items():
if(a==country):
for i,j in zip(b[0],b[1]):
list_obj_loc.append(data(i, j))
for a, b in dict_ann.items():
if(a==country):
obj_ann=data_detail(b[0],b[1],b[2],b[3],b[4],b[5],b[6])
return list_obj_vente,list_obj_loc,obj_ann
@app.route('/detail/<int:id>/<string:country>/<int:next_page>')
def detail(id,country,next_page):
print(f"Country {country}")
if next_page == 0:
list_obj_vente, list_obj_loc, obj_ann = for_detail(dict_vente1, dict_loc1, country, dict_detail1)
else:
list_obj_vente, list_obj_loc, obj_ann = for_detail(dict_vente2, dict_loc2, country, dict_detail2)
print(obj_ann.prix)
#return (list_obj_vente[0].label) #, list_obj_loc, obj_ann))
return render_template('detail.html',
data_points=list_obj_vente,
data_points_loc=list_obj_loc,
data_ann=obj_ann,
)
# ****************************************************************************************************
@app.route('/')
def dash():
return next(0)
@app.route('/no_deatil')
def no_deatil():
return render_template("nodetail.html")
@app.route('/click')
def click():
return render_template("inline_CANVAS.html")
@app.route('/compare')
def compare():
list_obj_vente=list()
list_obj_vente1=list()
list_obj_loc=list()
list_obj_loc1=list()
for a, b in zip(list_country_vente, all_list_avg_vente[0]):
list_obj_vente.append(data(a, b))
for a, b in zip(list_country_vente, all_list_avg_vente[1]):
list_obj_vente1.append(data(a, b))
for a, b in zip(list_country_loc, all_list_avg_loc[0]):
list_obj_loc.append(data(a, b))
for a, b in zip(list_country_loc, all_list_avg_loc[1]):
list_obj_loc1.append(data(a, b))
return render_template('compare.html',
data_points=list_obj_vente,
data_points1=list_obj_vente1,
data_points_loc=list_obj_loc,
data_points_loc1=list_obj_loc1
)
|
18,079 | 2fade811301eb4767d2a8fedc6cd8e5f87fce561 | from .machines import *
from .operations import *
from .runs import *
from .tables import *
from .graphs import *
from .regexps import *
from .grammars import *
|
18,080 | 3a051c42ec166009d845a565b0b4c94e35377e61 | """
Fifth function
"""
def string_to_six_bit_list(string):
# This function takes the string as parameter
# Then creates a list of 6 characters for each index
six_bit_list = []
# You convert the string into items of 6 characters long into a list
for i in range(0, len(string), 6):
six_bit_list.append(string[i: i + 6])
if len(string) % 6 != 0:
# You extract the last item of the list which is not 6 character long
str_pop = six_bit_list.pop()
# Calculate how many 0 you need to add
dif = 6 - (len(string) % 6)
# Concat the pop and the number of "0" required
str_app = str_pop + ("0" * dif)
# And append the result into the list to have all entries 6 long
six_bit_list.append(str_app)
print(six_bit_list)
|
18,081 | ef663216bd9e662166f5e0f931bb4d394a12c5c1 | import getopt
import os
import re
import sys
def parse_args():
"""
命令行参数解析
"""
path = ''
try:
opts, args = getopt.getopt(sys.argv[1:], "hp:", ["help", "path="])
except getopt.GetoptError:
print(f"Usage: python3 {sys.argv[0]} -p <api path>")
print(f"Or: python3 {sys.argv[0]} --path=<api path>")
print(f"For help: python3 {sys.argv[0]} --help/-h ")
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
print("功能:提取git中controller下的api的uri")
print("请输入controller文件夹的路径")
print(f"Usage: python3 {sys.argv[0]} <api path>")
print(f"Usage: python3 {sys.argv[0]} -p <api path>")
print(f"Or: python3 {sys.argv[0]} --path=<api path>")
sys.exit(2)
elif opt in ("-p", "--path"):
path = arg
# 不含'-'或'--'的参数。
for i in range(0, len(args)):
if os.path.isdir(args[i]):
path = args[i]
return path
def find_file_content(file_path: str) -> list:
"""查找文件内容,获取api接口"""
# 文件路径转换为api路径
try:
api_path = re.match(r'^.*controller[s]?(/[\w|/]+)_controller.*', file_path).group(1)
except:
raise AssertionError(f'不是controller文件:{file_path}')
with open(file_path) as f:
lines = f.readlines()
result = []
for l in range(len(lines)):
m = re.match(r'.*public function.*', lines[l])
if m:
func_name = re.match(r'^public function (\w*)\(', m.group().strip()).group(1).strip()
if not func_name.startswith('_'):
# 向上回溯5行,取函数功能名称
usage_name = ''
for i in range(6):
# 取有汉字的行
usage = re.match(r'^.*([\u4e00-\u9fff]+)\w*', lines[l - i - 1])
if usage:
# 只取汉字、英文、数字字符
usage_name = ''.join([_ for _ in usage.group() if
'\u4e00' <= _ <= '\u9fff' or u'\u0030' <= _ <= u'\u007A']) + usage_name
# 如果遇到{},则认为遇到了上一个函数,结束向上回溯
if re.match(r'.*[{|}]+.*', lines[l - i - 1]):
break
result.append(api_path + '/' + func_name + ',' + usage_name)
return result
def main(path: str):
# 遍历目录下所有文件
file_list = []
g = os.walk(path, topdown=False)
for root, dir_names, file_names in g:
for f in file_names:
file_list.append(os.path.join(root, f))
# 可以过滤一些文件
ignore_pattern = [
# 'cron*',
'xxxxxxxxx'
]
ignore_regex = re.compile(r'(' + '|'.join(ignore_pattern) + ')', re.I)
file_list = list(filter(lambda x:not ignore_regex.search(x), file_list))
# print(file_list)
for file in file_list:
try:
# result = find_file_content(file)
for i in find_file_content(file):
print(i)
except Exception as e:
print(e)
pass
if __name__ == '__main__':
# 参数:api文件所在目录
main(parse_args())
|
18,082 | a34a33b14809b931bfc800c237aed67048dcf433 | import statsapi
import arrow
from datetime import datetime
from inputs import utcnow, pstnow, today
formatToday = pstnow.format('MMMM Do, YYYY')
# TODO : any way to get MLB league-wide stats? Need to calculate cFIP
# returns a list of games
def getSchedule(gamesDate = today, rangeEndDate = None):
if rangeEndDate is None:
schedule = statsapi.schedule(gamesDate)
return schedule
if rangeEndDate is not None:
schedule = statsapi.schedule(start_date=gamesDate,end_date=rangeEndDate)
return schedule
def getFirstPitch(gamesDate = today):
schedule = getSchedule(gamesDate)
dayFirstPitch = None
firstGames = []
for game in schedule:
# second game in a double header seems to get a game_datetime of 8:33 PM PST the previous day (12:33 AM EST the same day)
# - makes second game of doubleheaders appear as earliest game if we don't filter them out
# scheduled playoff games w/o a start time appear to get 12:33AM PST the same day (7:33ET)
gameFirstPitch = arrow.get(game['game_datetime'])
pstGameFirstPitch = gameFirstPitch.to('US/Pacific')
if pstGameFirstPitch.date() == arrow.get(gamesDate).date():
if dayFirstPitch is None or gameFirstPitch <= dayFirstPitch:
dayFirstPitch = arrow.get(game['game_datetime'])
# we need to remove all other games added to firstGames if this game is earlier
if gameFirstPitch < dayFirstPitch:
firstGames = []
firstGames.append(game)
if dayFirstPitch is not None:
pstFirstPitch = dayFirstPitch.to('US/Pacific')
formatFirstPitch = pstFirstPitch.format('h:mm A ZZZ')
formatGamesDate = pstFirstPitch.format('MMMM Do, YYYY')
print('First pitch on ' + formatGamesDate + ' is at ' + formatFirstPitch)
print('First games count: ' + str(len(firstGames)))
return pstFirstPitch
else:
return False |
18,083 | f43beec410b62884e3af93d63562b3427ede650b | #!/usr/bin/python
from handler import Handler
import accessControl
class DeleteBlog(Handler):
@accessControl.user_logged_in
@accessControl.post_exist
@accessControl.user_owns_blog
def get(self, blog_id, blog):
blog.delete()
self.redirect('/blog/?')
|
18,084 | 2c4814b661f7d5e6d319e01c7820b03acd6b7485 | """
Some common utilities.
"""
import yaml
from funcy import *
from munch import Munch
import cytoolz as tz
def difference(*colls):
"""
Find the keys that have different values in an arbitrary number of (nested) collections. Any key
that differs in at least 2 collections is considered to fit this criterion.
"""
# Get all the leaf paths for each collection: make each path a tuple
leaf_paths_by_coll = list(map(lambda c: list(map(tuple, get_all_leaf_paths(c))), colls))
# Find the union of all leaf paths: merge all the paths and keep only the unique paths
union_leaf_paths = list(distinct(concat(*leaf_paths_by_coll)))
# Get the values corresponding to these leaf paths in every collection: if a leaf path doesn't exist, assumes None
values_by_coll = list(map(lambda lp: list(map(lambda coll: tz.get_in(lp, coll), colls)), union_leaf_paths))
# Filter out the leaf paths that have identical values across the collections
keep_leaf_paths = list(map(0, filter(lambda t: not allequal(t[1]), zip(union_leaf_paths, values_by_coll))))
keep_values = list(map(1, filter(lambda t: not allequal(t[1]), zip(union_leaf_paths, values_by_coll))))
# Rearrange to construct a list of dictionaries -- one per original collection.
# Each of these dictionaries maps a 'kept' leaf path to its corresponding
# value in the collection
differences = list(map(lambda vals: dict(zip(keep_leaf_paths, vals)), list(zip(*keep_values))))
return differences
def rmerge(*colls):
"""
Recursively merge an arbitrary number of collections.
For conflicting values, later collections to the right are given priority.
Note that this function treats sequences as a normal value and sequences are not merged.
Uses:
- merging config files
"""
if isinstance(colls, tuple) and len(colls) == 1:
# A squeeze operation since merge_with generates tuple(list_of_objs,)
colls = colls[0]
if all(is_mapping, colls):
# Merges all the collections, recursively applies merging to the combined values
return merge_with(rmerge, *colls)
else:
# If colls does not contain mappings, simply pick the last one
return last(colls)
def prettyprint(s):
if hasattr(s, '__dict__'):
print(yaml.dump(s.__dict__))
elif isinstance(s, dict):
print(yaml.dump(s))
else:
print(s)
def allequal(seq):
return len(set(seq)) <= 1
@autocurry
def listmap(fn, seq):
return list(map(fn, seq))
@autocurry
def prefix(s, p):
if isinstance(s, str):
return f'{p}{s}'
elif isinstance(s, list):
return list(map(prefix(p=p), s))
else:
raise NotImplementedError
@autocurry
def postfix(s, p):
if isinstance(s, str):
return f'{s}{p}'
elif isinstance(s, list):
return list(map(postfix(p=p), s))
else:
raise NotImplementedError
@autocurry
def surround(s, pre, post):
return postfix(prefix(s, pre), post)
def nested_map(f, *args):
""" Recursively transpose a nested structure of tuples, lists, and dicts """
assert len(args) > 0, 'Must have at least one argument.'
arg = args[0]
if isinstance(arg, tuple) or isinstance(arg, list):
return [nested_map(f, *a) for a in zip(*args)]
elif isinstance(arg, dict):
return {
k: nested_map(f, *[a[k] for a in args])
for k in arg
}
else:
return f(*args)
@autocurry
def walk_values_rec(f, coll):
"""
Similar to funcy's walk_values, but does so recursively, including mapping f over lists.
"""
if is_mapping(coll):
return f(walk_values(walk_values_rec(f), coll))
elif is_list(coll):
return f(list(map(walk_values_rec(f), coll)))
else:
return f(coll)
@autocurry
def nested_dict_walker(fn, coll):
"""
Apply a function over the mappings contained in coll.
"""
return walk_values_rec(iffy(is_mapping, fn), coll)
def get_all_leaf_paths(coll):
"""
Returns a list of paths to all leaf nodes in a nested dict.
Paths can travel through lists and the index is inserted into the path.
"""
if isinstance(coll, dict) or isinstance(coll, Munch):
return list(cat(map(lambda t: list(map(lambda p: [t[0]] + p,
get_all_leaf_paths(t[1])
)),
iteritems(coll)))
)
elif isinstance(coll, list):
return list(cat(map(lambda t: list(map(lambda p: [t[0]] + p,
get_all_leaf_paths(t[1])
)),
enumerate(coll)))
)
else:
return [[]]
def get_all_paths(coll, prefix_path=(), stop_at=None, stop_below=None):
"""
Given a collection, by default returns paths to all the leaf nodes.
Use stop_at to truncate paths at the given key.
Use stop_below to truncate paths one level below the given key.
"""
assert stop_at is None or stop_below is None, 'Only one of stop_at or stop_below can be used.'
if stop_below is not None and stop_below in str(last(butlast(prefix_path))):
return [[]]
if stop_at is not None and stop_at in str(last(prefix_path)):
return [[]]
if isinstance(coll, dict) or isinstance(coll, Munch) or isinstance(coll, list):
if isinstance(coll, dict) or isinstance(coll, Munch):
items = iteritems(coll)
else:
items = enumerate(coll)
return list(cat(map(lambda t: list(map(lambda p: [t[0]] + p,
get_all_paths(t[1],
prefix_path=list(prefix_path) + [t[0]],
stop_at=stop_at,
stop_below=stop_below)
)),
items))
)
else:
return [[]]
def get_only_paths(coll, pred, prefix_path=(), stop_at=None, stop_below=None):
"""
Get all paths that satisfy the predicate fn pred.
First gets all paths and then filters them based on pred.
"""
all_paths = get_all_paths(coll, prefix_path=prefix_path, stop_at=stop_at, stop_below=stop_below)
return list(filter(pred, all_paths))
if __name__ == '__main__':
coll1 = {'a': 1,
'b': 2,
'c': {'d': 12}}
coll2 = {'a': 1,
'b': 2,
'c': {'d': 13}}
coll3 = {'a': 1,
'b': 3,
'c': {'d': 14},
'e': 4}
difference(coll1, coll2, coll3) |
18,085 | 3600bd1c88b912a893b573bacb2305e809d9c1f5 | #!/usr/bin/env python
from os import path
from glob import glob
from shutil import copy
from sys import exit
from csv import reader
# import sqlite3
from ROOT import TH1F, TCanvas, TLegend, TMarker
from ROOT import gROOT, gStyle, SetOwnership
from ROOT import (kRed, kOrange, kYellow, kGreen,
kBlue, kViolet, kPink, kCyan, kBlack)
START = 1880
END = 2015
REBIN = 5
inputDir = '/Users/lantonel/DataScience/BabyNames/inputData/'
fileList = glob(inputDir+'yob*')
gROOT.SetBatch()
gStyle.SetOptStat(0)
colors = ([kRed+1, kOrange+1, kYellow+1, kGreen+1,
kBlue+1, kViolet+1, kPink+1, kCyan+1, kBlack])
antonellis = [
['Jamie', 'M', 1982],
['Rebecca', 'F', 1986],
['Christopher', 'M', 1988],
['Dominic', 'M', 1991]
]
storey_girls = [
['Sarah', 'F', 1983],
['Nicole', 'F', 1986],
['Rebecca', 'F', 1998],
['Rachel', 'F', 1998],
]
storey_boys = [
['Adam', 'M', 1985],
['Robert', 'M', 1988],
['Richard', 'M', 1992],
['Stephen', 'M', 1995],
['Michael', 'M', 2000]
]
name_ideas = [
['Madeleine', 'F'],
['Vincent', 'M'],
['Genevieve', 'F'],
['Augustine', 'M'],
['Zelie', 'F'],
['Maximilian', 'M'],
]
def makePlot(name, sex):
plot = TH1F(name + "_" + sex, name, END-START, START, END)
plot.SetLineWidth(3)
SetOwnership(plot, False)
if sex == "M" and name not in boyNames:
return plot
if sex == "F" and name not in girlNames:
return plot
for year in range(START, END):
if sex == "M" and year in boyNames[name]:
plot.Fill(year, float(boyNames[name][year])/boyBirths[year])
elif sex == "F" and year in girlNames[name]:
plot.Fill(year, float(girlNames[name][year])/girlBirths[year])
plot.Rebin(REBIN)
return plot
# change the plot to go from 0 to 1
# 0 => least popular year
# 1 => most popular year
def normalizePlot(plot):
min = plot.GetMinimum()
max = plot.GetMaximum()
range_ = max - min
for bin in range(plot.GetNbinsX()+1):
content = plot.GetBinContent(bin)
newContent = (content - min) / range_ + 0.1
plot.SetBinContent(bin, newContent)
def produceBoyGirlPlot(name):
boyPlot = makePlot(name, "M")
girlPlot = makePlot(name, "F")
boyPlot.SetLineColor(kBlue+1)
girlPlot.SetLineColor(kPink+1)
legend = TLegend(0.1, 0.7, 0.4, 0.87)
legend.SetBorderSize(0)
legend.SetFillStyle(0)
canvas = TCanvas(name)
legend.AddEntry(boyPlot, "boys", "L")
legend.AddEntry(girlPlot, "girls", "L")
if boyPlot.GetMaximum() > girlPlot.GetMaximum():
girlPlot.SetMaximum(boyPlot.GetMaximum())
girlPlot.Draw("C")
boyPlot.Draw("same C")
legend.Draw()
canvas.SaveAs(name + "_BoyGirl.pdf")
def produceFamilyPlot(family, surname):
canvas = TCanvas(surname, "", 2400, 800)
legend = TLegend(0.1, 0.5, 0.4, 0.87)
legend.SetBorderSize(0)
legend.SetFillStyle(0)
counter = 0
markers = []
for person in family:
if len(person) < 2:
print "error"
continue
name = person[0]
sex = person[1]
if len(person) >= 3:
birth = person[2]
else:
birth = 0
plot = makePlot(name, sex)
normalizePlot(plot)
plot.SetTitle("")
plot.GetYaxis().SetLabelSize(0)
plot.GetXaxis().SetLabelSize(0.07)
plot.GetYaxis().SetTickSize(0)
plot.GetYaxis().SetTitle("normalized popularity")
plot.GetYaxis().SetTitleSize(0.07)
plot.GetYaxis().SetTitleOffset(0.22)
plot.SetLineColor(colors[counter])
legend.AddEntry(plot, name, "L")
plot.Draw("same C")
if birth > 0:
marker = TMarker(birth+0.5,
plot.GetBinContent(plot.FindBin(birth)), 20)
marker.SetMarkerColor(colors[counter])
marker.SetMarkerSize(5)
markers.append(marker)
markers[counter].Draw("same")
counter += 1
legend.Draw()
canvas.SaveAs(surname + ".pdf")
# parse input data into a python dictionary, using the name as a key
# use separate dictionaries for boy/girl names
boyNames = {}
girlNames = {}
boyBirths = {}
boyFile = open("BoyBirthsByYear.txt")
for line in boyFile:
splitline = line.split(" ")
boyBirths[int(splitline[0])] = int(splitline[1].strip())
girlBirths = {}
girlFile = open("GirlBirthsByYear.txt")
for line in girlFile:
splitline = line.split(" ")
girlBirths[int(splitline[0])] = int(splitline[1].strip())
for file in fileList:
year = file.split("/")[-1][3:7]
# boyBirths = 0
# girlBirths = 0
with open(file) as data:
entries = reader(data)
for entry in entries:
if len(entry) < 3:
continue
name = entry[0]
sex = entry[1]
count = entry[2]
# if sex == "M":
# boyBirths += int(count)
# if sex == "F":
# girlBirths += int(count)
if sex == "M" and name not in boyNames:
boyNames[name] = {}
boyNames[name]["sex"] = sex
elif sex == "F" and name not in girlNames:
girlNames[name] = {}
girlNames[name]["sex"] = sex
if sex == "M":
boyNames[name][int(year)] = count
elif sex == "F":
girlNames[name][int(year)] = count
# girlBirthsFile.write(year + " " + str(girlBirths) + "\n")
# boyBirthsFile.write(year + " " + str(boyBirths) + "\n")
print "parsed", len(girlNames), "unique girl names"
print "parsed", len(boyNames), "unique boy names"
produceFamilyPlot(antonellis, "Antonelli")
produceFamilyPlot(storey_girls, "Storey Girls")
produceFamilyPlot(storey_boys, "Storey Boys")
produceFamilyPlot(name_ideas, "Name Ideas")
produceBoyGirlPlot("Jamie")
produceBoyGirlPlot("Madeleine")
produceBoyGirlPlot("Madeline")
produceBoyGirlPlot("Nicole")
produceBoyGirlPlot("Zelie")
|
18,086 | 235e2700b1818835393d064315426f864a99af5d | from django.conf.urls.defaults import *
from polls.models import Poll
# Uncomment the next two lines to enable the admin:
#from django.contrib import admin
#admin.autodiscover()
info_dict = {
'queryset': Poll.objects.all(),
}
urlpatterns = patterns('',
(r'^$', 'django.views.generic.list_detail.object_list', info_dict),
(r'^(?P<object_id>\d+)/$', 'django.views.generic.list_detail.object_detail', info_dict),
url(r'^(?P<object_id>\d+)/results/$', 'django.views.generic.list_detail.object_detail', dict(info_dict, template_name='polls/results.html'), 'poll_results'),
(r'^(?P<poll_id>\d+)/vote/$', 'polls.views.vote'),
(r'^contactForm$', 'polls.views.contactForm'),
(r'^pollForm$', 'polls.views.pollForm'),
(r'^newPoll/$', 'polls.views.newPoll'),
(r'^images/$', 'polls.views.uploadImages'),
(r'^uploadImages/$', 'polls.views.uploadImages'),
(r'^imageList/$', 'polls.views.imageList'),
)
|
18,087 | 8887c6db4636b10b72ddbb96f8f4e03784bf9a2b | """count_points_in_region
Count the number of points that fall into each region of a brain atlas.
"""
import argparse
import json
import numpy as np
import sys
import tifffile
from .brain_regions import BrainRegions
from nuggt.utils.warp import Warper
def parse_args(args=sys.argv[1:]):
parser = argparse.ArgumentParser()
parser.add_argument("--points",
help="The points to be counted",
required=True)
parser.add_argument("--alignment",
help="The points file from nuggt-align",
required=True)
parser.add_argument("--reference-segmentation",
help="The reference segmentation that we map to.",
required=True)
parser.add_argument("--brain-regions-csv",
help="The .csv file that provides the correspondences "
"between segmentation IDs and their brain region names",
required=True)
parser.add_argument("--output",
help="The name of the .csv file to be written",
required=True)
parser.add_argument("--level",
help="The granularity level (1 to 7 with 7 as the "
"finest level. Default is the finest.",
type=int,
default=7)
parser.add_argument("--xyz",
help="Specify this flag if the points file is "
"ordered by X, Y, and Z instead of Z, Y and X.",
action="store_true")
return parser.parse_args(args)
def warp_points(pts_moving, pts_reference, points):
"""Warp points from the moving space to the reference
:param pts_moving: points for aligning in the moving coordinate frame
:param pts_reference: corresponding points in the reference frame
:param points: the points to be translated
:return: the points in the reference frame
"""
warper = Warper(pts_moving, pts_reference)
return warper(points)
def main():
args = parse_args()
with open(args.points) as fd:
points = np.array(json.load(fd))
if args.xyz:
points = points[:, ::-1]
with open(args.alignment) as fd:
alignment = json.load(fd)
moving_pts = np.array(alignment["moving"])
ref_pts = np.array(alignment["reference"])
xform = warp_points(moving_pts, ref_pts, points)
xform = (xform + .5).astype(int)
seg = tifffile.imread(args.reference_segmentation).astype(np.uint32)
mask = np.all((xform >= 0) & (xform < np.array(seg.shape).reshape(1, 3)), 1)
xform_legal = xform[mask]
counts = np.bincount(
seg[xform_legal[:, 0], xform_legal[:, 1], xform_legal[:, 2]])
counts[0] += np.sum(~ mask)
seg_ids = np.where(counts > 0)[0]
counts_per_id = counts[seg_ids]
with open(args.brain_regions_csv) as fd:
br = BrainRegions.parse(fd)
if args.level == 7:
with open(args.output, "w") as fd:
fd.write('"id","region","count"\n')
for seg_id, count in zip(seg_ids, counts_per_id):
if seg_id == 0:
region = "not in any region"
else:
region = br.name_per_id.get(seg_id, "id%d" % seg_id)
fd.write('%d,"%s",%d\n' % (seg_id, region, count))
else:
d = {}
for seg_id, count in zip(seg_ids, counts_per_id):
level = br.get_level_name(seg_id, args.level)
if level in d:
count += d[level]
d[level] = count
with open(args.output, "w") as fd:
fd.write('"region","count"\n')
for level in sorted(d):
fd.write('"%s",%d\n' % (level, d[level]))
if __name__=="__main__":
main()
|
18,088 | 721ae2783fe3d25a5a31353523cc4809ef61cc61 | from django.shortcuts import render,redirect
from django.contrib.auth import authenticate,logout as logged_out,login as auto_login,update_session_auth_hash
from django.http import HttpResponse, HttpResponseRedirect
from django.urls import reverse
from django.contrib.auth.models import User
from .forms import CustomUserChangeForm
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from user.models import CustomUser
# Create your views here.
def home(request):
if request.user.is_authenticated:
return redirect('dashboard')
else:
return render(request,'index.html')
def login(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(request,username=username,password=password)
if user:
auto_login(request,user)
return HttpResponseRedirect(reverse('dashboard'))
else:
messages.error(request,'Invalid User Credentials !!')
return render(request,'index.html')
def dashboard(request):
return render(request,'profile/dashboard.html')
def logout(request):
logged_out(request)
return HttpResponseRedirect(reverse('home'))
@login_required(login_url='/')
def user_profile(request):
if request.method == "POST":
form = CustomUserChangeForm(request.POST,instance=request.user)
if form.is_valid():
form.save()
messages.success(request,'Profile Details Updated Successfully !!')
return redirect('user_profile')
else:
form = CustomUserChangeForm(instance=request.user)
context = {
'form':form,
}
return render(request,'profile/profile.html',context)
@login_required(login_url='/')
def change_password(request):
if request.method == 'POST':
form = PasswordChangeForm(data=request.POST,user=request.user)
if form.is_valid():
form.save()
update_session_auth_hash(request,form.user)
messages.success(request,'Password Changed Successfully !!')
return redirect('user_profile')
else:
messages.error(request,form.errors)
form = PasswordChangeForm(user=request.user)
context = {'form':form}
return render(request,'profile/password.html',context)
else:
form = PasswordChangeForm(user=request.user)
context = {
'form':form
}
return render(request,'profile/password.html',context)
|
18,089 | 3c227e5aabbeb20e22ce22e934e803fb99235434 | from characters.character import Character
import random
# Depends on the Enemy class
class Player(Character):
def __init__(self, strength, defense):
super(Player, self).__init__(100, strength, defense) # Import the Constructor from superclass Character
self.strength = strength # Polymorphism, the subclass overrides the superclass constructor
self.defense = defense
def fightenemy(self, other): # Method to do damage on Enemy
attack = input("Pick a move! A = Punch. B = Bitchslap. C = Kick. ")
# Iteration structure for the different attacks
if attack.upper() in 'A':
print("Good punch! Your enemy\'s health is now: ")
other.health -= (int(self.strength / random.uniform(1, 1.7) - other.defense))
print(other.health)
elif attack.upper() in 'B':
print("A slap in the face! Your enemy\'s health is now: ")
other.health -= (int(self.strength / random.uniform(1, 2) - other.defense))
print(other.health)
elif attack.upper() in 'C':
print("Ouch, that kick did some damage! Your enemy\'s health is now: ")
other.health -= (int(self.strength / random.uniform(1, 1.5) - other.defense))
print(other.health)
else:
print("You hesitate...")
def checkHealth(self):
if self.health <= 0:
print("Your health level is too low. \n Game Over.")
else:
print("Your health level is " + str(self.health))
print("Your strength level is " + str(self.strength))
|
18,090 | 3551ca9766bf2f8e0dc741cb2326e608bfa1921e | # Crie um jogo de pedra papel e tesoura
from random import choice
from time import sleep
lista = ['pedra', 'papel', 'tesoura']
print('\033[1;32m='*32)
print('\033[4;30;44mJogo de pedra, papel ou tesoura!\033[m')
print('\033[1;32m=\033[m'*32)
sleep(1)
escolha = int(input('Digite 1 para pedra, 2 para papel e 3 para tesoura: '))
if escolha<1 or escolha>3:
jogador = choice(lista)
print('Escolha invalida, então vou escolher {} por você.'.format(jogador))
else:
jogador = lista[escolha-1]
print('Você escolheu {}'.format(jogador))
sleep(1)
print('O computador está escolhendo...')
computador = choice(lista)
sleep(2)
print('Pronto?')
sleep(1)
print('JO')
sleep(1)
print('KEN')
sleep(1)
print('PO')
sleep(1)
print('Você: {}'.format(jogador))
print('Computador: {}'.format(computador))
sleep(3)
if jogador == 'pedra' and computador == 'tesoura' or jogador == 'papel' and computador == 'pedra' or jogador == 'tesoura' and computador == 'papel':
print('Você ganhou!')
elif jogador==computador:
print('Empatou.')
else:
print('Você perdeu...')
|
18,091 | 4482f92401db0c8101a97b3954185a4ac0544786 | # 케라스(Keras) 기본 개념
# - 케라스의 가장 핵심적인 데이터 구조는 “모델"이다.
# - 케라스에서 제공하는 시퀀스 모델로 원하는 레이어를 쉽게 순차적으로 쌓을 수 있다.
#예) iris dataset으로 종류별로 분류
import tensorflow as tf
import keras
import numpy as np
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data
Y = iris.target
from sklearn.preprocessing import OneHotEncoder
enc = OneHotEncoder()
Y_1hot = enc.fit_transform(Y.reshape(-1, 1)).toarray()
print(Y[0], " -- one hot enocding --> ", Y_1hot[0])
print(Y[50], " -- one hot enocding --> ", Y_1hot[50])
print(Y[100], " -- one hot enocding --> ", Y_1hot[100])
from keras.models import Sequential
from keras.layers import Dense
model = Sequential()
model.add(Dense(4, input_dim=4, activation='relu'))
model.add(Dense(3, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(X, Y_1hot, epochs=300, batch_size=10)
#print(model.evaluate(X, Y_1hot))
Y_pred = model.predict_classes(X)
print('예측값 :', Y_pred)
print('실제값 :', Y)
print('분류 실패 수:', (Y != Y_pred).sum())
new_x = np.array([[1.1, 1.1, 1.1, 1.1],[5.5, 5.5, 5.5, 5.5]])
new_pred = model.predict_classes(new_x)
print(new_pred)
|
18,092 | a350b0b6a0339e209eb62240509472e0cceea892 | #https://realpython.com/face-detection-in-python-using-a-webcam/
from keras.preprocessing.image import img_to_array
from keras.models import load_model
import numpy as np
import cv2
import os
import cvlib as cv
# model path
model_path = "model.h5"
model_weights_path = "weights.h5"
# load model
model = load_model(model_path)
model.load_weights(model_weights_path)
video_capture = cv2.VideoCapture(0)
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces, confidences = cv.detect_face(frame)
for face in faces:
(startX,startY) = face[0],face[1]
(endX,endY) = face[2],face[3]
# draw rectangle over face
cv2.rectangle(frame, (startX,startY), (endX,endY), (0,100,0), 2)
# preprocessing for gender detection model
cropped_face = frame[startY:endY,startX:endX]
if (cropped_face.shape[0]) < 10 or (cropped_face.shape[1]) < 10:
continue
cropped_face = cv2.resize(cropped_face, (150,150))
cropped_face = cropped_face.astype("float32") / 255
cropped_face = img_to_array(cropped_face)
cropped_face = np.expand_dims(cropped_face, axis=0)
# apply prediction
conf = model.predict(cropped_face)[0]
if conf[0] > conf[1]:
label = "Male"
else:
label = "Female"
print(conf)
cv2.putText(frame, label, (startX, startY-5), cv2.FONT_HERSHEY_SIMPLEX,0.8, (0,100,0), 2)
# Show result
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows() |
18,093 | 8fec92188a78be18b4870a1274dd840a223a2699 | import datetime
import pandas as pd
from GoogleNews import GoogleNews
def get_df_history_google_news(ticker, initial_date, final_date, add_stock=False):
googlenews = GoogleNews(lang='en',
start=datetime.date.strftime(initial_date, "%m/%d/%Y"),
end=datetime.date.strftime(final_date, "%m/%d/%Y"),
encode='utf-8')
if add_stock:
googlenews.get_news(ticker + ' stock')
# googlenews.search(ticker + ' stock')
else:
googlenews.get_news(ticker)
# googlenews.search(ticker)
result = googlenews.results()
# parse data
parsed_data = []
for row in result:
title = row['title'] + ". " + row['desc']
date = row['datetime'].strftime("%b-%d-%Y")
time = row['datetime']
parsed_data.append([ticker, date, time, title])
df = pd.DataFrame(parsed_data, columns=['ticker', 'date', 'time', 'title'])
df['date'] = pd.to_datetime(df.date).dt.date
df = df[(df.date >= initial_date) & (final_date >= df.date)]
return df
|
18,094 | 46195d490d8f136a170881d2d06e38153cd9c06f | from django.contrib import admin
from django.apps import apps
from django.contrib.auth.models import User, Group
app = apps.get_app_config('projeto')
admin.site.unregister(User)
admin.site.unregister(Group)
for model_name, model in app.models.items():
admin.site.register(model)
|
18,095 | 24c45bb31ff68eeaa3991fac302ee7f35bc66722 | import numpy as np
import talib as ta
from jesse.helpers import get_candle_source
from collections import namedtuple
wavetrend = namedtuple('Wavetrend', ['wt1', 'wt2', 'wtCrossUp', 'wtCrossDown', 'wtOversold', 'wtOverbought', 'wtVwap'])
# Wavetrend indicator ported from: https://www.tradingview.com/script/Msm4SjwI-VuManChu-Cipher-B-Divergences/
# https://www.tradingview.com/script/2KE8wTuF-Indicator-WaveTrend-Oscillator-WT/
#
# buySignal = wtCross and wtCrossUp and wtOversold
# sellSignal = wtCross and wtCrossDown and wtOverbought
#
# See https://github.com/ysdede/lazarus3/blob/partialexit/strategies/lazarus3/__init__.py for working jesse.ai example.
def wt(candles: np.ndarray,
wtchannellen: int = 9,
wtaveragelen: int = 12,
wtmalen: int = 3,
oblevel: int = 53,
oslevel: int = -53,
source_type="hlc3",
sequential=False) -> wavetrend:
if not sequential and len(candles) > 240:
candles = candles[-240:]
src = get_candle_source(candles, source_type=source_type)
# wt
esa = ta.EMA(src, wtchannellen)
de = ta.EMA(abs(src - esa), wtchannellen)
ci = (src - esa) / (0.015 * de)
wt1 = ta.EMA(ci, wtaveragelen)
wt2 = ta.SMA(wt1, wtmalen)
wtVwap = wt1 - wt2
wtOversold = wt2 <= oslevel
wtOverbought = wt2 >= oblevel
wtCrossUp = wt2 - wt1 <= 0
wtCrossDown = wt2 - wt1 >= 0
if sequential:
return wavetrend(wt1, wt2, wtCrossUp, wtCrossDown, wtOversold, wtOverbought, wtVwap)
else:
return wavetrend(wt1[-1], wt2[-1], wtCrossUp[-1], wtCrossDown[-1], wtOversold[-1], wtOverbought[-1], wtVwap[-1])
|
18,096 | b37c54d70b8e6e5a6ffcfc1b36db1ce0c7fd25ef | from dash.dependencies import Input, Output, State
import dash_html_components as html
import dash_core_components as dcc
import dash_bootstrap_components as dbc
METALLINVEST_LOGO = "https://www.metalloinvest.com/_v/_i/152.png" #Логотип метталоинвеста
theme = {
'navcolor': '#ffffff', # фоновый цвет навигационного меню
}
def Navbar():
navbar = dbc.Navbar(
[
dcc.Link(
# Use row and col to control vertical alignment of logo / brand
dbc.Row(
[
dbc.Col(html.Img(src=METALLINVEST_LOGO, height="52px"),width=3),
dbc.Col([
html.H3("Металлоинвест", className="brandName"),
html.P("Ресурсы создают возможности", className="brandSlogan"),
],width=9)
],
align="center",
),
href="/",
),
dbc.NavbarToggler(id="navbar-toggler")
#dbc.Collapse(search_bar, id="navbar-collapse", navbar=True)
],
className="navbar_style",
color=theme['navcolor'] #комментарий
)
return navbar
def Sidebar():
sidebar = html.Div(
[
#html.H2("Sidebar", className="display-4"),
#html.Hr(),
#html.P(
# "A simple sidebar layout with navigation links", className="lead"
#),
dbc.Nav(
[
dbc.NavLink("Мониторинг", href="/page-1", id="page-1-link"),
dbc.NavLink("Анализ", href="/page-2", id="page-2-link"),
dbc.NavLink("Планирование и прогнозирование", href="/page-3", id="page-3-link"),
dbc.NavLink("Мнемосхема", href="/page-4", id="page-4-link"),
],
vertical=True,
pills=True
),
],
className="sidebar_style"
)
return sidebar
|
18,097 | 163104d6acc93874719e405696e70501795cec7e | import tarfile
import os
from argparse import ArgumentParser
parser = ArgumentParser(description="Author: xzhou15@cs.stanford.edu\n liuyichen@std.uestc.edu.cn\n",usage='use "python3 %(prog)s --help" for more information')
parser.add_argument('--input','-i',help="Input fp,fn or tp result vcf",required = True)
parser.add_argument('--output','-o',help="Output dir",required=True)
args = parser.parse_args()
def takeFirst(elem):
return elem[0]
def find_pos(blocks,pos,start,end):
mid = (start+end)//2
if pos > blocks[end][1]:
return (end,end+1,"out")
elif pos < blocks[start][0]:
return (start-1,start,"out")
elif pos > blocks[mid][1]:
if pos < blocks[mid+1][0]:
return (mid,mid+1,"out")
elif pos <= blocks[mid+1][1]:
return (mid+1,mid+1,"in")
else:
start = mid
return find_pos(blocks,pos,start,end)
elif pos < blocks[mid][0]:
end = mid
return find_pos(blocks,pos,start,end)
else:
return (mid,mid,"in")
def TRgt100(vcf,TRmask,outdir):
mask = {}
with open(TRmask,"r") as ftr:
for line in ftr:
line = line.split("\t")
if line[0] not in mask:
mask[line[0]] = [(int(line[1]),int(line[2]))]
else:
mask[line[0]].append((int(line[1]),int(line[2])))
for key in mask.keys():
mask[key].sort(key=takeFirst)
with open(outdir+"TR100.vcf","w") as fw:
with open(outdir+"noTR100.vcf","w") as nfw:
with open(vcf,"r") as fv:
for line in fv:
if line[0] == "#":
nfw.write(line)
fw.write(line)
else:
overlap = 0
line_split = line.split("\t")
CHROM = line_split[0]
start = int(line_split[1])
end = start+len(line_split[3])-1
blocks = mask[CHROM]
lenth = end-start+1
start_block = find_pos(blocks,start,0,len(blocks)-1)
end_block = find_pos(blocks,end,0,len(blocks)-1)
if start_block[2] == "in" and end_block[2] == "in":
if start_block[0] == end_block[0]:
fw.write(line)
else:
overlap = overlap+blocks[start_block[0]][1]-start+end-blocks[end_block[0]][0]+2
for i in range(start_block[0]+1,end_block[0]):
overlap = overlap+blocks[i][1]-blocks[i][0]+1
if overlap/lenth >= 0.2:
#if overlap > 0:
fw.write(line)
else:
nfw.write(line)
elif start_block[2] == "out" and end_block[2] == "in":
overlap = overlap+end-blocks[end_block[0]][0]+1
for i in range(start_block[1],end_block[0]):
overlap = overlap+blocks[i][1]-blocks[i][0]+1
if overlap/lenth >= 0.2:
#if overlap > 0:
fw.write(line)
else:
nfw.write(line)
elif start_block[2] == "in" and end_block[2] == "out":
overlap = overlap+blocks[start_block[0]][1]-start+1
for i in range(start_block[0]+1,end_block[1]):
overlap = overlap+blocks[i][1]-blocks[i][0]+1
if overlap/lenth >= 0.2:
#if overlap > 0:
fw.write(line)
else:
nfw.write(line)
elif start_block[2] == "out" and end_block[2] == "out":
if start_block[0] == end_block[0]:
nfw.write(line)
else:
for i in range(start_block[1],end_block[1]):
overlap = overlap+blocks[i][1]-blocks[i][0]+1
if overlap/lenth >= 0.2:
#if overlap > 0:
fw.write(line)
else:
nfw.write(line)
if __name__ == "__main__":
vcf = args.input
outdir = args.output
if not os.path.exists(outdir):
os.mkdir(outdir)
script_path = os.path.dirname(os.path.abspath( __file__ ))
code_path = script_path + "/"
if not os.path.exists(code_path+"TRmask.bed"):
trmask = tarfile.open(code_path+"TRmask.bed.tar.gz","r")
for ti in trmask:
trmask.extract(ti,code_path)
trmask.close()
TRgt100(vcf,code_path+"TRmask.bed",outdir)
|
18,098 | 45c9a7b41d22f471cdfa0a92f39e5a8b1cf81fa5 | from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from math import ceil
import imaplib
import email
from email.header import decode_header
import webbrowser
import os
import smtplib
from email import encoders
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from bs4 import BeautifulSoup as bs
imap = None
def first(request):
global imap
if 'user_email' in request.session:
ee=request.session.get('user_email')
pe=request.session.get('user_pass')
imap = imaplib.IMAP4_SSL("imap.zoho.in",port=993)
imap.login(ee, pe)
return HttpResponseRedirect('/mailBox/0/0')
return HttpResponseRedirect('login')
def getMail(request,mbn,mid):
boxes=getMailBoxes()
imap.select(boxes[mbn])
msg=imap.fetch(str(mid),"(RFC822)")[1]
attachments = getattachment(mid,boxes[mbn])
if (len(attachments)==0):
attachments=False
for r in msg:
if(isinstance(r,tuple)):
c=email.message_from_bytes(r[1])
s=decode_header(c["Subject"])[0][0] #Subject o Email
f=c["From"] #Sender Name n Email
n=""
e=""
if(len(f.split("<"))>1):
n=f.split("<")[0].strip("\" ")
e=f.split("<")[1].strip("\" >")
else:
e=f.strip("\" >")
d=c["Date"]
if c.is_multipart() :
for p in c.walk():
ct=p.get_content_type()
cd=str(p.get("Content-Disposition"))
try:
b=p.get_payload(decode=True).decode()
except:
pass
if ct == "text/html" and "attachment" not in cd:
return render(request,"client/MailView.html",{"sub":s,"from":f,"con":b,"name":n,"fmail":e,"date":d,"boxes":boxes,"boxnum":mbn,"mid":mid,"atlist":attachments})
else:
ct = c.get_content_type()
b = c.get_payload(decode=True).decode()
if ct == "text/plain":
return render(request,"client/MailView.html",{"sub":s,"from":f,"con":b,"boxes":boxes,"boxnum":mbn,"mid":mid,"atlist":attachments})
def getSubjects(total,page,search=False,searchList=None):
subjectList={}
if search:
if searchList!=None:
for i in searchList:
flag=False
response=imap.fetch(str(i),'(FLAGS)')[1][0]
response=response.decode()
if('Flagged' in response):
flag=True
response=imap.fetch(str(i),"(RFC822)")[1][0]
msg=email.message_from_bytes(response[1])
s=decode_header(msg["Subject"])[0][0]
if isinstance(s,bytes):
s=s.decode()
f=msg["From"]
#n=f.split("<")[0].strip("\" ")
d=msg["Date"][:16]
subjectList[(s,f,d,flag)]=i
return subjectList
on1page=10
start=total-((page-1)*on1page)
end=start-10
while(start>end and start>0):
flag=False
response=imap.fetch(str(start),'(FLAGS)')[1][0]
response=response.decode()
if('Flagged' in response):
flag=True
response=imap.fetch(str(start),"(RFC822)")[1][0]
msg=email.message_from_bytes(response[1])
s=decode_header(msg["Subject"])[0][0]
if isinstance(s,bytes):
s=s.decode()
f=msg["From"]
#n=f.split("<")[0].strip("\" ")
d=msg["Date"][:16]
subjectList[(s,f,d,flag)]=start
start-=1
return subjectList
def clearBox(a):
return a.split("/")[1].strip("\" ")
def getMailBoxes():
global imap
mailBoxes=[]
for i in imap.list()[1]:
mailBoxes.append(clearBox(i.decode("utf-8")))
return mailBoxes
def login(request):
return render(request,"client/Login.html")
def logchecker(request):
global imap
ee=request.POST['email']
pe=request.POST['pass']
request.session['user_email']=ee
request.session['user_pass']=pe
imap = imaplib.IMAP4_SSL("imap.zoho.in",port=993)
imap.login(ee, pe)
selected=0
page=0
return HttpResponseRedirect("/mailBox/"+str(selected)+"/"+str(page))
def mailBox(request,mn,page,searchEnb=None):
boxes=getMailBoxes()
imap.select(boxes[mn])
search=False
searchList=None
if (searchEnb!=None):
typ,msgno = imap.search(None, 'FROM', searchEnb)
searchList = [int(x) for x in msgno[0].split()]
search=True
total = int(imap.select(boxes[mn])[1][0])
subjects=getSubjects(total,page+1,search,searchList)
totalpages=ceil(total/10)-1
ms=total-(page*10)
me=ms-10
if me<0:
me=0
return render(request,"client/InboxView.html",{"Box":boxes,"Sub":subjects,"currBox":mn,
"currPage":page,"Totpage":totalpages,"startmail":ms,"endMail":me})
def logout(request):
del request.session['user_email']
del request.session['user_pass']
imap.logout()
return HttpResponseRedirect("/")
def composeMail(request):
return render(request,"client/ComposeMail.html")
def packmsg(from1,to,subject,body,cc):
msg = MIMEMultipart("alternative")
msg["From"] = from1
msg["To"] = to
msg['Cc']=cc
msg["Subject"] = subject
html = "<div>"+body+"</div>"
text = bs(html, "html.parser").text
text_part = MIMEText(text, "plain")
html_part = MIMEText(html, "html")
msg.attach(text_part)
msg.attach(html_part)
return msg
def mailfunction(email,password,FROM,TO,CC,msg):
server = smtplib.SMTP_SSL("smtp.zoho.in", 465)
server.login(email, password)
server.sendmail(FROM, (TO+CC) , msg.as_string())
server.quit()
def getlist(a):
g=[]
for i in a.split(","):
g.append(i.strip(" "))
return ((", ".join(g)),g)
def sendMail(request):
to=request.POST['sendto']
sub=request.POST['subject']
cc=request.POST['cc']
body=request.POST['mailBody']
reclist=getlist(to)
cclist=getlist(cc)
ue=request.session['user_email']
up=request.session['user_pass']
msg=packmsg(ue,reclist[0],sub,body,cclist[0])
mailfunction(ue,up,ue,reclist[1],cclist[1],msg)
return HttpResponseRedirect("/")
def deletethis(dl):
for curr in dl:
imap.store(str(curr), '+FLAGS', '\Deleted')
imap.expunge()
def deleteMail(request):
dl=request.POST.getlist('checks')
deletethis(dl)
return HttpResponseRedirect("/")
def createnewbox(a):
imap.create(a)
return
def createbox(request):
newbox=request.POST['newbox']
createnewbox(newbox)
return HttpResponseRedirect("/")
def flagmail(request,mid):
imap.store(str(mid),'+FLAGS','\\Flagged')
return HttpResponseRedirect("/")
def removeflag(request,mid):
imap.store(str(mid),'-FLAGS','\\Flagged')
return HttpResponseRedirect("/")
def movemail(request,mbn,mid,box):
boxes=getMailBoxes()
result = imap.copy(str(mid),boxes[box])
deletethis([mid])
return HttpResponseRedirect("/")
def getattachment(mailno,box):
res, msg = imap.fetch(str(mailno), "(RFC822)")
response = msg[0]
msg = email.message_from_bytes(response[1])
attach = []
for part in msg.walk():
if 'attachment' in str(part.get("Content-Disposition")):
filename = part.get_filename()
if filename:
attach.append(filename)
return attach
def downloadAttached(request, mid):
res, msg = imap.fetch(str(mid), "(RFC822)")
response = msg[0]
msg = email.message_from_bytes(response[1])
for part in msg.walk():
if 'attachment' in str(part.get("Content-Disposition")):
filename = part.get_filename()
if filename:
print('attachment_found : ',filename)
if not os.path.isdir(str(mid)):
os.mkdir(str(mid))
filepath = os.path.join(str(mid), filename)
open(filepath, "wb").write(part.get_payload(decode=True))
return HttpResponseRedirect('#')
#def
def searchThis(request):
q=request.POST['query']
return mailBox(request,0,0,q)
def Feedback(request):
return render(request,"client/help.html")
|
18,099 | 64617424188427605042ce4368b273ff80935c38 | class Node:
# Contructor to create a new node
def __init__(self, data):
self.data = data
self.left = None
self.right = None
def helper(root):
if root is None:
return 0
left = helper(root.left)
right = helper(root.right)
max_single= max(max(left,right)+root.data,root.data)
max_top = max(max_single,left+right+root.data)
helper.res = max(helper.res,max_top)
return max_single
def maxPathSum(root):
helper.res = float("-inf")
helper(root)
return helper.res
root = Node(10)
root.left = Node(2)
root.right = Node(10)
root.left.left = Node(20)
root.left.right = Node(1)
root.right.right = Node(-25)
root.right.right.left = Node(3)
root.right.right.right = Node(4)
print(maxPathSum(root))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.