index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
6,100 | ac033e45ea61770c302be677f4dfc95945e2cca5 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Calcu.py
#
import os, sys
def menuCalc():
os.system('clear')
print("Esto parece un menu:")
print("\t1 - Suma")
print("\t2 - Resta")
print("\t3 - Multiplicacion")
print("\t4 - Division")
print("\tq - Para salir")
def calculadora(calcu,):
if calcu == "1":
os.system('clear')
s1=int(input("Ingrese un numero\n"))
s2=int(input("Ingrese otro\n"))
os.system('clear')
print(f"{s1} + {s2} = {s1+s2}")
input("\nPresione una tecla para continuar.")
elif calcu == "2":
os.system('clear')
s1=int(input("Ingrese un numero\n"))
s2=int(input("Ingrese otro\n"))
os.system('clear')
print(f"{s1} - {s2} = {s1-s2}")
input("\nPresione una tecla para continuar.")
elif calcu == "3":
os.system('clear')
s1=int(input("Ingrese un numero\n"))
s2=int(input("Ingrese otro\n"))
os.system('clear')
print(f" {s1} x {s2} = {s1*s2}")
input("\nPresione una tecla para continuar.")
elif calcu == "4":
os.system('clear')
s1=int(input("Ingrese un numero\n"))
s2=int(input("Ingrese otro\n"))
os.system('clear')
print(f"{s1} / {s2} = {s1 / s2}")
input("\nPresione una tecla para continuar.")
elif calcu == "q":
print("Gracias, Vuelva Prontoss")
exit()
else:
os.system('clear')
print("Lo siento no es un numero valido!")
while True:
menuCalc()
calc = input("Ingrese su opcion: ")
calculadora(calc)
|
6,101 | 21050d66120787c1260efd42bb6456d7131fcc6b | N=input()
l=map(int,raw_input().split())
l.sort()
flag=0
if l[0]<0:
print 'False'
else:
for i in l:
if str(i)==str(i)[::-1]:
flag=flag+1
if flag>=1:
print 'True'
else:
print 'False'
|
6,102 | 6b6fac3bfb1b1478dd491fc4dd9c45a19aeb7bd8 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
import pygtk
pygtk.require("2.0")
import gtk
from testarMsg import *
class tgApp(object):
def __init__(self):
builder = gtk.Builder()
builder.add_from_file("../tg.glade")
self.window = builder.get_object("window1")
self.text_area = builder.get_object("text_entry")
self.window.show()
self.opcao = ""
builder.connect_signals({"gtk_main_quit": gtk.main_quit,
"on_button_analisar_clicked": self.analisar_frase,
"on_button_clear_clicked": self.clear_text,
"on_button_dilma_clicked": self.opcao_dilma,
"on_button_copa_clicked": self.opcao_copa,
"on_button_palmeiras_clicked": self.opcao_palmeiras,
"on_button_fatec_clicked": self.opcao_fatec,
"on_sad_show": self.sad_show,
})
def analisar_frase(self, widget):
"""Função: analisar a frase que o usuário"""
frase = self.text_area.get_text()
if ( frase != ""):
frase_proc= normalizar(frase)
self.text_area.set_text(frase)
if (self.opcao == 'dilma' or self.opcao == 'copa' or self.opcao == 'palmeiras' or self.opcao == 'fatec'):
print("Opcao: %s "%self.opcao)
featureList = gera_lista_features(self.opcao)
lista_feature_fell = get_lista_feature_fell()
features_msg = getFeatureVector(frase_proc)
training_set = apply_features(extract_features,lista_feature_fell)
fell = avaliar_Sentimento(features_msg,training_set)
print ("Sentimento: %s "%fell)
def clear_text(self, widget):
"""Função: para apagar o texto na área de texto"""
self.text_area.set_text("")
def opcao_dilma(self, widget):
"""Função: para definir a opcao Dilma"""
self.opcao="dilma"
def opcao_copa(self, widget):
"""Função: para definir a opcao Copa"""
self.opcao="copa"
def opcao_palmeiras(self, widget):
"""Função: para definir a opcao Palmeiras"""
self.opcao="palmeiras"
def opcao_fatec(self, widget):
"""Função: para definir a opcao Fatec"""
self.opcao="fatec"
def sad_show(self,widget):
"""Função: para definir se imagem Sad ira aparecer"""
self.visible=True
if __name__ == "__main__":
app = tgApp()
gtk.main()
|
6,103 | 3be7183b5c1d86ee0ebfdea89c6459efe89510f8 | from data import constants
from data.action import Action
from data.point import Point
class MoveActorsAction(Action):
"""A code template for moving actors. The responsibility of this class of
objects is move any actor that has a velocity more than zero.
Stereotype:
Controller
Attributes:
_input_service (InputService): An instance of InputService.
"""
def execute(self, cast):
"""Executes the action using the given actors.
Args:
cast (dict): The game actors {key: tag, value: list}.
"""
for group in cast.values():
for actor in group:
# It would be nice to add something to a base Actor class
# to detect is_zero()...
# if not actor.get_velocity().is_zero():
if actor.change_x != 0 or actor.change_y != 0:
self._move_actor(actor)
def _move_actor(self, actor):
"""Moves the given actor to its next position according to its
velocity. Will wrap the position from one side of the screen to the
other when it reaches the edge in either direction.
Args:
actor (Actor): The actor to move.
"""
actor.center_x = actor.center_x + actor.change_x
actor.center_y = actor.center_y + actor.change_y
|
6,104 | c38aff77a7beebc13e7486150d549b876c830db8 |
class Pwm():
def __init__(self, number, path, features):
self.id = number
self.path = path + 'pwm' + number
self.features = features
self.duty = self.get_feature('')
self.enable = self.get_feature('_enable')
def get_feature(self, feature):
return self.features['pwm' + self.id + feature]
def set_feature(self, feature, value=0):
pass
def __str__(self):
return 'pwm{}'.format(self.id) |
6,105 | 6fdfcbcfdf2b680a1fbdb74f77fd5d1a9f7eac0b | # -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
# Copyright (c) 2017, Battelle Memorial Institute
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation
# are those of the authors and should not be interpreted as representing
# official policies, either expressed or implied, of the FreeBSD
# Project.
#
# This material was prepared as an account of work sponsored by an
# agency of the United States Government. Neither the United States
# Government nor the United States Department of Energy, nor Battelle,
# nor any of their employees, nor any jurisdiction or organization that
# has cooperated in the development of these materials, makes any
# warranty, express or implied, or assumes any legal liability or
# responsibility for the accuracy, completeness, or usefulness or any
# information, apparatus, product, software, or process disclosed, or
# represents that its use would not infringe privately owned rights.
#
# Reference herein to any specific commercial product, process, or
# service by trade name, trademark, manufacturer, or otherwise does not
# necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors
# expressed herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY
# operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
import requests
"""
This example exposes the VOLTTRON web API
through a python class that that does not depend
on VOLTTRON proper. A VOLTTRON Central Agent must
be running on the url passed to the constructor.
"""
class VolttronWebRPC(object):
def __init__(self, url, username='admin', password='admin'):
"""
:param url: Jsonrpc endpoint for posting data.
:param username:
:param password:
"""
self._url = url
self._username = username
self._password = password
self._auth_token = None
self._auth_token = self.get_auth_token()
def do_rpc(self, method, **params):
"""
Generic method to request data from Volttron Central
:param method: Method to call
:param params: Any method specific keyword arguments
"""
data = {
'jsonrpc': '2.0',
'method': method,
'params': params,
'authorization': self._auth_token,
'id': '1'
}
r = requests.post(self._url, json=data)
validate_response(r)
return r.json()['result']
def get_auth_token(self):
"""
Get an authorization token from Volttron Central,
automatically called when the object is created
"""
return self.do_rpc('get_authorization',
username=self._username,
password=self._password)
def register_instance(self, addr, name=None):
"""
Register a platform with Volttron Central
:param addr: Platform's discovery address that will be registered
"""
return self.do_rpc('register_instance',discovery_address=addr,
display_name=name)
def list_platforms(self):
"""
Get a list of registered platforms from Volttron Central.
"""
return self.do_rpc('list_platforms')
def install_agent(self, platform_uuid, fileargs):
"""
Install an agent on a platform
:param platform_uuid: uuid of platform where agent will be installed
:param fileargs: arguments for installing the agent
"""
rpc = 'platforms.uuid.{}.install'.format(platform_uuid)
return self.do_rpc(rpc, files=[fileargs])
def list_agents(self, platform_uuid):
"""
List agents installed on a platform
"""
return self.do_rpc('platforms.uuid.' + platform_uuid + '.list_agents')
def unregister_platform(self, platform_uuid):
"""
Unregister a platform with Volttron Central
"""
return self.do_rpc('unregister_platform', platform_uuid=platform_uuid)
def store_agent_config(self, platform_uuid, agent_identity, config_name,
raw_contents, config_type="json"):
"""
Add a file to the an agent's config store
:param platform_uuid: uuid of platform where agent will is installed
:param agent_identity: VIP identity of agent that will own the config
:param config_name: name of the configuration file
:param raw_contents: file data
"""
params = dict(platform_uuid=platform_uuid,
agent_identity=agent_identity,
config_name=config_name,
raw_contents=raw_contents,
config_type=config_type)
return self.do_rpc("store_agent_config", **params)
def list_agent_configs(self, platform_uuid, agent_identity):
"""
List the configuration files stored for an agent.
:param platform_uuid: uuid of platform where agent is installed
:param agent_identity: VIP identity of agent that owns the configs
"""
params = dict(platform_uuid=platform_uuid,
agent_identity=agent_identity)
return self.do_rpc("list_agent_configs", **params)
def get_agent_config(self, platform_uuid, agent_identity, config_name,
raw=True):
"""
Get a config file from an agent's Configuration Store
:param platform_uuid: uuid of platform where agent is installed
:param agent_identity: VIP identity of agent that owns the config
:param config_name: name of the configuration file
"""
params = dict(platform_uuid=platform_uuid,
agent_identity=agent_identity,
config_name=config_name,
raw=raw)
return self.do_rpc("get_agent_config", **params)
def set_setting(self, setting, value):
"""
Assign a value to a setting in Volttron Central
:param setting: Name of the setting to set
:param value: Value to assign to setting
"""
return self.do_rpc("set_setting", key=key, value=value)
def get_setting(self, setting):
"""
Get the value of a setting in Volttron Central
:param setting: Name of the setting to get
"""
return self.do_rpc("get_setting", key=key)
def get_setting_keys(self):
"""
Get a list of settings in Volttorn Central
"""
return self.do_rpc("get_setting_keys")
def validate_response(response):
"""
Validate that the message is a json-rpc response.
:param response:
:return:
"""
assert response.ok
rpcdict = response.json()
assert rpcdict['jsonrpc'] == '2.0'
assert rpcdict['id']
assert 'error' in rpcdict.keys() or 'result' in rpcdict.keys()
|
6,106 | 9b6d30a40bafa0e9e4760843d6a2f750f0f88a57 | from datetime import date
def diff_in_date(first, second):
value = str(second - first)
if value.__contains__(','):
generated_sum = value.split(',')
return generated_sum[0]
else:
return value
first_date = date(2014, 7, 2)
second_date = date(2014, 7, 11)
current_date = date.today()
val = diff_in_date(first_date, second_date)
print(val)
newVal = diff_in_date(second_date, current_date)
print(newVal)
|
6,107 | e0541c377eb6631e4ef5eb79b1204612ce8af48c | import sys
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from uraeus.nmbd.python import simulation
from uraeus.nmbd.python.engine.numerics.math_funcs import A, B
database_directory = os.path.abspath('../../')
sys.path.append(database_directory)
from uraeus_fsae.simenv.assemblies import asurt_FS17_v1 as num_assm
from controllers import speed_controller, stanley_controller
num_model = num_assm.num_model
dt = num_assm.dt
TR = 254
def generate_circular_path(radius, offset):
theta = np.deg2rad(np.linspace(0, 360, 360))
x_data = radius * np.sin(theta) + offset[0]
y_data = radius * np.cos(theta) + offset[1]
radii = radius * np.ones((360,))
return x_data, y_data, radii
x_data, y_data, radii = generate_circular_path(10.5, (0, -10.5))
path_data = np.zeros((360, 3))
path_data[:, 0] = -1e3 * x_data
path_data[:, 1] = 1e3 * y_data
path_data[:, 2] = 1e3 * radii
plt.figure(figsize=(10, 5))
plt.plot(path_data[:, 0], path_data[:, 1])
plt.grid()
plt.show()
logitudinal_controller = speed_controller(35, dt)
lateral_controller = stanley_controller(path_data, 25)
def terrain_state(x, y):
local_normal = np.array([[0],[0],[1]], dtype=np.float64)
hieght = 0
return [local_normal, hieght]
def torque_function(t):
P_ch = num_model.Subsystems.CH.P_rbs_chassis
Rd = num_model.Subsystems.CH.Rd_rbs_chassis
factor = logitudinal_controller.get_torque_factor(P_ch, Rd)
return factor
def RR_Torque(t):
factor = torque_function(t)
torque = -factor*(70*9.81)*1e6*TR
return torque
def RL_Torque(t):
factor = torque_function(t)
torque = -factor*(70*9.81)*1e6*TR
return torque
def steering_function(t):
R_ch = num_model.Subsystems.CH.R_rbs_chassis
P_ch = num_model.Subsystems.CH.P_rbs_chassis
Rd_ch = num_model.Subsystems.CH.Rd_rbs_chassis
Pd_ch = num_model.Subsystems.CH.Pd_rbs_chassis
rbar_ax1 = np.array([[-800], [0], [0]], dtype=np.float64)
r_ax1 = R_ch + A(P_ch)@rbar_ax1
vel = (A(P_ch).T @ (Rd_ch + B(P_ch, rbar_ax1)@Pd_ch))[0,0]
delta = lateral_controller.get_steer_factor(r_ax1, P_ch, Pd_ch, vel)
travel = delta * 18
#print('Travel = %s'%travel)
return travel
def zero_func(t):
return np.zeros((3,1), dtype=np.float64)
num_assm.terrain_data.get_state = terrain_state
num_assm.ST1_config.UF_mcs_rack_act = steering_function
num_assm.AX1_config.UF_far_drive = RR_Torque
num_assm.AX1_config.UF_fal_drive = RL_Torque
#num_assm.DR2_config.UF_far_drive = RR_Torque
#num_assm.DR2_config.UF_fal_drive = RL_Torque
num_assm.CH_config.UF_fas_aero_drag_F = zero_func
num_assm.CH_config.UF_fas_aero_drag_T = zero_func
# =============================================================================
# Setting and Starting Simulation
# =============================================================================
sim = simulation('sim', num_model, 'dds')
sim.set_time_array(15, dt)
# Getting Equilibrium results as initial conditions to this simulation
# ====================================================================
sim.set_initial_states('results/equilibrium_v4.npz')
sim.solve()
sim.save_as_csv('results', 'constant_radius_v8', 'pos')
sim.save_as_npz('results', 'constant_radius_v8')
#=============================================================================
# Plotting Simulation Results
# =============================================================================
import matplotlib.pyplot as plt
sim.soln.pos_dataframe.plot(x='CH.rbs_chassis.x', y='CH.rbs_chassis.y', grid=True)
sim.soln.vel_dataframe.plot(x='time', y='CH.rbs_chassis.x', grid=True)
sim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.z', grid=True)
sim.soln.vel_dataframe.plot(x='time', y='CH.rbs_chassis.z', grid=True)
sim.soln.acc_dataframe.plot(x='time', y='CH.rbs_chassis.z', grid=True)
sim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.e0', grid=True)
sim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.e1', grid=True)
sim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.e2', grid=True)
sim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.e3', grid=True)
plt.show()
|
6,108 | 96bf6220bfc884e3a19f70a63d9ecba449e2e7e2 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# staticbox.py
import wx
class StaticBox(wx.Dialog):
def __init__(self, parent, id, title):
wx.Dialog.__init__(self, parent, id, title, size = (250, 230))
wx.StaticBox(self, -1, 'Personal Info', (5, 5), size = (240, 170))
wx.CheckBox(self, -1, 'Male', (15, 30))
wx.CheckBox(self, -1, 'Married', (15, 55))
wx.StaticText(self, -1, 'Age', (15, 95))
wx.SpinCtrl(self, -1, '1', (55, 90), (60, -1), min = 1, max = 120)
wx.Button(self, 1, 'Ok', (90, 185), (60, -1))
self.Bind(wx.EVT_BUTTON, self.OnClose, id = 1)
self.Center()
self.ShowModal()
self.Destroy()
def OnClose(self, event):
self.Close()
if __name__ == '__main__':
app = wx.App()
StaticBox(None, -1, 'staticbox.py')
app.MainLoop()
|
6,109 | 6682c864a3da6f2c894a3a40359726b4eb97d040 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
# author: MSJ
# date: 2021/3/11
# desc:冒泡排序
def bubble_sort(arr):
for i in range(1, len(arr)):
for j in range(0, len(arr) - i):
if arr[j] > arr[j + 1]:
tmp = arr[j]
arr[j] = arr[j + 1]
arr[j + 1] = tmp
return arr
if __name__ == '__main__':
r1 = bubble_sort([0, 5, 3, 2, 9, 20, 6, 7, 3])
print(r1)
|
6,110 | 61e38ae6ae2a1ed061f9893742f45b3e44f19a68 | from tkinter import *
from tkinter import messagebox
root = Tk()
def hello():
messagebox.showinfo("Say Hello", "Hello World")
B1 = Button(root, text = "Say Hello", command = hello, font='arial 20')
B1.pack()
mainloop()
|
6,111 | f925b3b2f55c3f8daf57438d8d20b60446ae39af | from torchsummary import summary
import torch
import torch.nn as nn
import torch.nn.functional as F
from eva4modeltrainer import ModelTrainer
class Net(nn.Module):
"""
Base network that defines helper functions, summary and mapping to device
"""
def conv2d(self, in_channels, out_channels, kernel_size=(3,3), dilation=1, groups=1, padding=1, bias=False, padding_mode="zeros"):
return [nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, groups=groups, dilation=dilation, padding=padding, bias=bias, padding_mode=padding_mode)]
def separable_conv2d(self, in_channels, out_channels, kernel_size=(3,3), dilation=1, padding=1, bias=False, padding_mode="zeros"):
return [nn.Conv2d(in_channels=in_channels, out_channels=in_channels, kernel_size=kernel_size, groups=in_channels, dilation=dilation, padding=padding, bias=bias, padding_mode=padding_mode),
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=(1,1), bias=bias)]
def activate(self, l, out_channels, bn=True, dropout=0, relu=True,max_pooling=0):
if(max_pooling>0):
l.append(nn.MaxPool2d(2,2))
if bn:
l.append(nn.BatchNorm2d(out_channels))
if dropout>0:
l.append(nn.Dropout(dropout))
if relu:
l.append(nn.ReLU())
return nn.Sequential(*l)
def create_conv2d(self, in_channels, out_channels, kernel_size=(3,3), dilation=1, groups=1, padding=1, bias=False, bn=True, dropout=0, relu=True, padding_mode="zeros",max_pooling=0):
return self.activate(self.conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, groups=groups, dilation=dilation, padding=padding, bias=bias, padding_mode=padding_mode), out_channels, bn, dropout, relu,max_pooling)
def create_depthwise_conv2d(self, in_channels, out_channels, kernel_size=(3,3), dilation=1, padding=1, bias=False, bn=True, dropout=0, relu=True, padding_mode="zeros"):
return self.activate(self.separable_conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, dilation=dilation, padding=padding, bias=bias, padding_mode=padding_mode),
out_channels, bn, dropout, relu)
def __init__(self, name="Model"):
super(Net, self).__init__()
self.trainer = None
self.name = name
def summary(self, input_size): #input_size=(1, 28, 28)
summary(self, input_size=input_size)
def gotrain(self, optimizer, train_loader, test_loader, epochs, statspath, scheduler=None, batch_scheduler=False, L1lambda=0):
self.trainer = ModelTrainer(self, optimizer, train_loader, test_loader, statspath, scheduler, batch_scheduler, L1lambda)
self.trainer.run(epochs)
def stats(self):
return self.trainer.stats if self.trainer else None
#implementation of the new resnet model
class newResnetS11(Net):
def __init__(self,name="Model",dropout_value=0):
super(newResnetS11,self).__init__(name)
self.prepLayer=self.create_conv2d(3, 64, dropout=dropout_value)
#layer1
self.layer1Conv1=self.create_conv2d(64,128, dropout=dropout_value,max_pooling=1)
self.layer1resnetBlock1=self.resnetBlock(128,128)
#layer2
self.layer2Conv1=self.create_conv2d(128,256, dropout=dropout_value,max_pooling=1)
#layer3
self.layer3Conv1=self.create_conv2d(256,512, dropout=dropout_value,max_pooling=1)
self.layer3resnetBlock1=self.resnetBlock(512,512)
#ending layer or layer-4
self.maxpool=nn.MaxPool2d(4,1)
self.fc_layer=self.create_conv2d(512, 10, kernel_size=(1,1), padding=0, bn=False, relu=False)
def resnetBlock(self,in_channels, out_channels):
l=[]
l.append(nn.Conv2d(in_channels,out_channels,(3,3),padding=1,bias=False))
l.append(nn.BatchNorm2d(out_channels))
l.append(nn.ReLU())
l.append(nn.Conv2d(in_channels,out_channels,(3,3),padding=1,bias=False))
l.append(nn.BatchNorm2d(out_channels))
l.append(nn.ReLU())
return nn.Sequential(*l)
def forward(self,x):
#prepLayer
x=self.prepLayer(x)
#Layer1
x=self.layer1Conv1(x)
r1=self.layer1resnetBlock1(x)
x=torch.add(x,r1)
#layer2
x=self.layer2Conv1(x)
#layer3
x=self.layer3Conv1(x)
r2=self.layer3resnetBlock1(x)
x=torch.add(x,r2)
#layer4 or ending layer
x=self.maxpool(x)
x=self.fc_layer(x)
x=x.view(-1,10)
return F.log_softmax(x,dim=-1)
|
6,112 | 7e58fe636e6d835d7857a49900bbc127b52f63d9 | class HashTableEntry:
"""
Hash Table entry, as a linked list node.
"""
def __init__(self, key, value):
self.key = key
self.value = value
self.next = None
class HashTable:
"""
A hash table that with `capacity` buckets
that accepts string keys
Implement this.
"""
def __init__(self, capacity):
self.capacity = capacity
self.storage = [None] * capacity
self.numberOfItems = 0
def fnv1(self, key):
"""
FNV-1 64-bit hash function
Implement this, and/or DJB2.
"""
# hash = 0xff
hash = 0xcbf29ce484222325
for n in key.encode():
# print(n)
hash = hash ^ n
hash = hash * 0x100000001b3
# print(hash)
return hash
def djb2(self, key):
"""
DJB2 32-bit hash function
Implement this, and/or FNV-1.
"""
hash = 5381
for n in key.encode():
# hash = ((hash << 5) + hash) + n
hash = hash * 33 + n
return hash
# return hash & 0xFFFFFFFF
def hash_index(self, key):
"""
Take an arbitrary key and return a valid integer index
between within the storage capacity of the hash table.
"""
# return self.fnv1(key) % self.capacity
return self.djb2(key) % self.capacity
def put(self, key, value):
"""
Store the value with the given key.
Hash collisions should be handled with Linked List Chaining.
Implement this.
"""
hi = self.hash_index(key)
if self.storage[hi]:
current = self.storage[hi]
while current.next and current.key != key:
current = current.next
if current.key == key:
current.value = value
else:
current.next = HashTableEntry(key, value)
self.numberOfItems += 1
else:
self.storage[hi] = HashTableEntry(key, value)
self.numberOfItems += 1
self.calculateLoad()
def delete(self, key):
"""
Remove the value stored with the given key.
Print a warning if the key is not found.
Implement this.
"""
hi = self.hash_index(key)
# if that hi is empty ignore
# if self.storage[hi] is None:
# print("WARNING: no key")
# return
current = self.storage[hi]
prev = self.storage[hi]
while current and current.key != key:
prev = current
current = current.next
if (current and current.key == key):
# if its the first link in the list
if (current == self.storage[hi]):
self.storage[hi] = current.next
else:
prev.next = current.next
self.numberOfItems -= 1
else:
print("WARNING: no key")
self.calculateLoad()
def get(self, key):
"""
Retrieve the value stored with the given key.
Returns None if the key is not found.
Implement this.
"""
hi = self.hash_index(key)
if (self.storage[hi]):
if(self.storage[hi].next):
current = self.storage[hi]
while current.next and current.key != key:
current = current.next
return current.value
else:
return self.storage[hi].value
return None
def resize(self, factor=2):
"""
Doubles the capacity of the hash table and
rehash all key/value pairs.
Implement this.
"""
self.capacity = round(self.capacity*factor)
newarr = [None] * self.capacity
for i, v in enumerate(self.storage):
while v:
hi = self.hash_index(v.key)
if newarr[hi]:
current = newarr[hi]
while current.next:
current = current.next
current.next = HashTableEntry(v.key, v.value)
else:
newarr[hi] = HashTableEntry(v.key, v.value)
v = v.next
self.storage = newarr
# Solution 2 - Much cleaner
# newHashTable = HashTable(round(self.capacity*factor))
# for i, v in enumerate(self.storage):
# while v:
# newHashTable.put(v.key, v.value)
# v = v.next
# self.capacity = newHashTable.capacity
# self.storage = newHashTable.storage
def calculateLoad(self):
load = self.numberOfItems/len(self.storage)
# print("Items:\t", ht.numberOfItems)
# print("Storage:", len(ht.storage))
# print("LOAD:\t", load)
# comment code bellow to pass tests
if load > 0.7:
self.resize(2)
elif load < 0.2:
self.resize(0.5)
pass
if __name__ == "__main__":
ht = HashTable(2)
ht.put("line_1", "111")
ht.put("line_2", "222")
ht.put("line_3", "333")
ht.put("line_4", "sss")
ht.put("line_5", "ddd")
ht.put("line_6", "ggg")
ht.put("line_7", "hhh")
ht.put("line_12", "jjj")
print("")
# Test storing beyond capacity
# print(ht.get("line_1"))
# print(ht.get("line_2"))
# print(ht.get("line_3"))
# print(ht.get("line_4"))
# print(ht.get("line_5"))
# print(ht.get("line_6"))
# print(ht.get("line_7"))
# Test resizing
old_capacity = len(ht.storage)
ht.resize()
new_capacity = len(ht.storage)
print(f"\nResized from {old_capacity} to {new_capacity}.\n")
# print("1: ", ht.storage[1].value)
# print("1: ", ht.storage[1].next.value)
# print("3: ", ht.storage[3].value)
# print("3: ", ht.storage[3].next.value)
# print("3: ", ht.storage[3].next.next.value)
print("")
for i, v in enumerate(ht.storage):
while v:
print(i, v.value)
v = v.next
print("")
ht.delete("line_3")
print("")
for i, v in enumerate(ht.storage):
while v:
print(i, v.value)
v = v.next
print("")
# Test if data intact after resizing
# print(ht.get("line_1"))
# print(ht.get("line_2"))
# print(ht.get("line_3"))
# print(ht.get("line_4"))
# print(ht.get("line_5"))
# print(ht.get("line_6"))
# print(ht.get("line_7"))
print("")
|
6,113 | dc2b074d7d0e87105b2479bb60b46c73dce6c069 | # -*-coding:utf-8 -*-
#
# Created on 2016-04-01
# __ __
# - /__) _ /__) __/
# / / ( (/ / ( /
# /
from core.views import BaseView
class TestView(BaseView):
"""
测试页面
"""
# template_name = 'test/blog-1.html'
template_name = 'test/music-1.html'
|
6,114 | bfa5739949c26758e3762fcff8347d23ad70f704 | # 데이터 출처: kaggle
# 데이터 개요: 511, 유리를 위한 다양한 속성(화학원소)들로부터 type 구별
# 데이터 예측 모델: 이진클래스
# 적용 머신러닝 모델: 깊은 다층 퍼셉트론 신경망
# 훈련 데이터셋: 160건
# 검증 데이터셋: 건
# 시험 데이터셋: 수집데이터로서 시험셋을 확보할 수 없으므로 고려하지 않음
# 입력 데이터: 10개 항목의 데이터
# 은닉층: 2개
# 사용한 활성화 함수
# - 제1 은닉층: Relu
# - 제2 은닉층: Relu
# - Output Layer: Softmax
# 사용한 손실함수: categorical_crossentropy
# 사용한 Optimizer: rmsprop
# Tensorflow 버전: 2.0.0
# 파이썬버전: 3.7.4
import pandas as pd
from datetime import datetime
from sklearn.model_selection import train_test_split
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from keras.utils import to_categorical
np.random.seed(5)
match_dic={}
zoo_class = pd.read_csv('zoo.csv',sep=',',header=0)
zoo_class.columns = zoo_class.columns.str.replace(' ','_')
# 전체 독립변수 식별
input_data_header = list(zoo_class.columns.difference(["animal_name","class_type"]))
input_data_number = len(input_data_header)
label = zoo_class["class_type"]
start_time = datetime.now()
train_data, test_data, train_label, test_label = train_test_split(zoo_class[input_data_header],label)
train_label = to_categorical(train_label, num_classes=7)
test_label = to_categorical(test_label, num_classes=7)
# 훈련셋과 시험셋 불러오기
# x_train = x_train.reshape(60000, width * height).astype('float32') / 255.0
# x_test = x_test.reshape(10000, width * height).astype('float32') / 255.0
# 모델 구성하기
model = Sequential()
model.add(Dense(64, input_dim=input_data_number, activation='relu'))
model.add(Dense(64, activation='relu'))
# model.add(Dense(6, activation='sigmoid'))
model.add(Dense(7, activation='softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
# model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
# 4. 모델 학습시키기
hist = model.fit(train_data, train_label, epochs=20000, batch_size=64, validation_data=(test_data, test_label))
# hist = model.fit(train_data, train_label, epochs=1000, batch_size=64)
end_time = datetime.now()
# 5. 학습과정 살펴보기
import matplotlib.pyplot as plt
fig, loss_ax = plt.subplots()
acc_ax = loss_ax.twinx()
loss_ax.plot(hist.history['loss'], 'y', label='train loss')
loss_ax.plot(hist.history['val_loss'], 'r', label='val loss')
# acc_ax.plot(hist.history['acc'], 'b', label='train acc')
acc_ax.plot(hist.history['accuracy'], 'b', label='train acc')
# acc_ax.plot(hist.history['val_acc'], 'g', label='val acc')
acc_ax.plot(hist.history['val_accuracy'],'g', label='val acc')
loss_ax.set_xlabel('epoch')
loss_ax.set_ylabel('loss')
acc_ax.set_ylabel('accuray')
loss_ax.legend(loc='upper left')
acc_ax.legend(loc='lower left')
plt.show()
# 6. 모델 평가하기
loss_and_metrics = model.evaluate(test_data, test_label, batch_size=32)
print('loss_and_metrics : ' + str(loss_and_metrics))
scores = model.evaluate(test_data, test_label)
print("%s: %.2f%%"%(model.metrics_names[1],scores[1]*100)) |
6,115 | 3f2c1a83ae0dfdba202038a209b90162ccddee36 | #!/usr/bin/python3
"""City Module"""
from models.base_model import BaseModel
class City(BaseModel):
"""City Class
Public class attributes:
state_d: type string
name: type string
"""
state_id = ""
name = ""
|
6,116 | cb903f3f7fd3c4f3ba5f8ff2ce12aac9c680aa15 | from pyramid.request import Request
from pyramid.response import Response
from pyramid.view import view_config
from svc1_first_auto_service.data.repository import Repository
@view_config(route_name='autos_api',
request_method='GET',
renderer='json')
def all_autos(_):
cars = Repository.all_cars(limit=25)
return cars
@view_config(route_name='auto_api',
request_method='GET',
renderer='json')
def single_auto(request: Request):
car_id = request.matchdict.get('car_id')
car = Repository.car_by_id(car_id)
if not car:
msg = "The car with id '{}' was not found.".format(car_id)
return Response(status=404, json_body={'error': msg})
return car
@view_config(route_name='auto',
request_method='GET',
renderer='json')
def auto_by_id(request: Request):
cid = request.matchdict.get('cid')
cid = int(cid)
if cid is not None:
car = Repository.car_by_cid(cid)
if not car:
msg = f"The car with id '{cid}' was not found."
return Response(status=404, json_body={'error': msg})
return car
else:
msg = f"The cid is None"
return Response(status=404, json_body={'error': msg})
|
6,117 | 9a2002b5ff0fe41f2b5b568f4c278d4376bf4fb1 | import pandas as pd
from bokeh.models import ColumnDataSource, LinearColorMapper, HoverTool
from bokeh.plotting import figure
from bokeh.transform import transform
from sklearn.metrics import confusion_matrix
from reporter.settings import COLORS
from reporter.metrics import Metric
class ConfusionMatrix(Metric):
def __init__(self):
super().__init__('confusion-matrix')
def generate_data(self):
matrix = confusion_matrix(self.y, self.y_pred)
matrix = pd.DataFrame(matrix, index=self.labels, columns=self.labels)
matrix.index.name = 'Predicted'
matrix.columns.name = 'Actual'
return pd.DataFrame(matrix.stack(), columns=['Value']).reset_index()
def draw(self, size=400):
index_label = 'Predicted'
column_label = 'Actual'
matrix = self.generate_data()
min_val, max_val = matrix.Value.min(), matrix.Value.max()
source = ColumnDataSource(matrix)
mapper = LinearColorMapper(palette=COLORS, low=min_val, high=max_val)
hover = HoverTool(tooltips=[
('Number', f"@Value")
])
p = figure(plot_width=size,
plot_height=size,
title='Confusion Matrix',
tools=[hover],
toolbar_location=None,
x_range=self.labels,
y_range=list(reversed(self.labels)))
p.yaxis.axis_label = index_label
p.xaxis.axis_label = column_label
p.rect(x=column_label,
y=index_label,
width=1,
height=1,
source=source,
fill_color=transform('Value', mapper))
self.plot = p
return p
|
6,118 | 9fd33089a9dc919ef2fb2698059e60a24a0e05e6 | import mechanicalsoup
from bs4 import BeautifulSoup
import re
import json
def extract_title(page):
return page.find("header").find("h1").contents[0]
def extract_colours(page):
color_list = page.find("ul")
return list(dict.fromkeys(re.findall("#\w+", str(color_list.contents))))
def get_colours_from_page(browser, baseurl, target_page):
response = browser.open(baseurl + target_page)
soup = BeautifulSoup(response.text, 'lxml')
extract = soup.find("section", {"id": "item"})
entity = {"title": extract_title(extract), "colours": extract_colours(extract)}
return entity
def get_links_from_article(articles):
links = []
for article in articles:
links.append(article.find("a").attrs['href'])
return links
def scrape_flag_pagination_page(browser, baseurl, pageCount):
response = browser.open(baseurl + "/flags?page={0}".format(pageCount))
soup = BeautifulSoup(response.text, 'lxml')
flag_articles = soup.findAll("article")
return get_links_from_article(flag_articles)
baseurl = "https://encycolorpedia.com"
browser = mechanicalsoup.StatefulBrowser(raise_on_404=True)
list_of_urls = []
flag_count = 0
pageCount = 1
while(True):
try:
list_of_urls += scrape_flag_pagination_page(browser, baseurl, pageCount)
except mechanicalsoup.utils.LinkNotFoundError:
break
pageCount += 1
package = []
for url in list_of_urls:
package.append(get_colours_from_page(browser, baseurl, url))
with open('flag_colours.json', 'w', encoding='utf-8') as f:
json.dump(package, f, ensure_ascii=False, indent=4) |
6,119 | 7502e28197cb40044303a0a2163546f42375aeb6 | #!/usr/bin/env python
import os, time, sys
fifoname = '/dev/pi-blaster' # must open same name
def child( ):
pipeout = os.open(fifoname, os.O_WRONLY) # open fifo pipe file as fd
zzz = 0
while 1:
time.sleep(zzz)
os.write(pipeout, 'Spam %03d\n' % zzz)
zzz = (zzz+1) % 5
def parent( ):
pipein = open(fifoname, 'r', 0) # open fifo as stdio object
while 1:
line = pipein.readline( )[:-1] # blocks until data sent
print 'Parent %d got "%s" at %s' % (os.getpid(), line, time.time( ))
#if _ _name_ _ == '_ _main_ _':
# if not os.path.exists(fifoname):
# os.mkfifo(fifoname) # create a named pipe file
# if len(sys.argv) == 1:
# parent( ) # run as parent if no args
# else: # else run as child process
parent( )
|
6,120 | 2a3c3112122dee5574a1569155287ea3e5f8c7b2 | def say_hi(argument):
return f"Hello {argument}"
def call_func(some_func, argument):
return some_func(argument)
def main(argument):
"""docstring"""
return call_func(say_hi, argument)
if __name__ == "__main__":
print(main(1)) |
6,121 | 141e0f20ce912ecf21940f78e9f40cb86b91dc2b | #! /usr/bin/env python
"""
Normalizes a vidoe by dividing against it's background.
See: BackgroundExtractor.py to get the background of a video.
USING:
As a command line utility:
$ Normalizer.py input_video input_image output_video
As a module:
from Normalizer import Normalizer
norm = Normalizer("input_video.avi", input_image, "output_video.avi")
norm.normalize()
Author: Martin Humphreys
"""
from argparse import ArgumentParser
import numpy as np
import os
import cv2
class Normalizer:
def __init__(self):
pass
def imageFromArg(self, image):
if isinstance(image, (str, unicode)):
return cv2.imread(image, 0)
else:
return image
def videoReaderFromArg(self, video):
if isinstance(video, (str, unicode)):
vc = cv2.VideoCapture(video)
else:
vc = video
return vc
def normalize(self, background, in_video, out_video):
vc = self.videoReaderFromArg(in_video)
frames = int(vc.get(cv2.CAP_PROP_FRAME_COUNT))
fps = float(vc.get(cv2.CAP_PROP_FPS))
if fps == float('inf'):
fps = 300
width = int(vc.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vc.get(cv2.CAP_PROP_FRAME_HEIGHT))
fourcc = int(vc.get(cv2.CAP_PROP_FOURCC))
vw = cv2.VideoWriter(out_video, fourcc, fps, (width, height), False)
self.normalizeVideo(background, vc, vw)
def normalizeVideo(self, background, video_reader, video_writer):
f = 1
while(True):
ret, frame = video_reader.read()
if not ret:
break;
else:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
f += 1
normal_frame = self.normalizeFrame(background, frame)
video_writer.write(normal_frame)
def normalizeFrame(self, background, frame):
if callable(background):
bg = background(frame)
else:
bg = self.imageFromArg(background)
a = frame.astype('float')
a = self.transformRange(a, 0, 255, 1, 255)
b = bg.astype('float')
b = self.transformRange(b, 0, 255, 1, 255)
c = a/((b+1)/256)
d = c*(c < 255)+255*np.ones(np.shape(c))*(c > 255)
return d.astype('uint8')
def transformRange(self, value, oldmin, oldmax, newmin, newmax):
return (((value - oldmin) * (newmax - newmin)) / (oldmax - oldmin)) + newmin
def build_parser():
parser = ArgumentParser()
parser.add_argument('input_video', help='video to process')
parser.add_argument('background', help='background image')
parser.add_argument('output_video', help='file to save normalized video to')
return parser
def main():
parser = build_parser()
opts = parser.parse_args()
if not os.path.isfile(opts.input_video):
parser.error("Video file %s does not exist." % opts.input_video)
if not os.path.isfile(opts.background):
parser.error("Image file %s does not exist." % opts.background)
norm = Normalizer()
norm.normalize(opts.background, opts.input_video, opts.output_video)
if __name__ == '__main__':
main()
|
6,122 | 01eef391f6d37d1e74cb032c5b27e1d8fc4395da | def countdown(n):
def next():
nonlocal n
r = n
n-=1
return r
return next
a = countdown(12)
while True:
v = a()
if not v:break
|
6,123 | a40c87fe4b805495e5bd30155faa861cbe16c368 | from eboss_qso.fits.joint import run_joint_mcmc_fit
from eboss_qso.measurements.utils import make_hash
import os.path as osp
import os
from glob import glob
ARGS = [(False, 1.0),
(False, 1.6),
(True, 1.6),
(True, 1.0)
]
ITERATIONS = 500
WALKERS = 100
def main(argnum, kmin):
z_weighted, p = ARGS[argnum]
# the data to load
kws = {}
kws['version'] = 'v1.9f'
kws['krange'] = '%s-0.3' % kmin
kws['params'] = 'basemodel-N-fnl'
kws['zrange'] = '0.8-2.2'
kws['z_weighted'] = z_weighted
kws['p'] = p
kws['ells'] = [0]
hashstr = make_hash(kws)
# output directory
output = osp.join(os.environ['EBOSS_FITS'], 'data')
output = osp.join(output, kws['version'],
kws['krange'], kws['params'], kws['zrange'])
output = osp.join(output, 'QSO-N+S-%s' % hashstr)
if not osp.exists(output):
os.makedirs(output)
# output file name
i = len(glob(osp.join(output, '*npz')))
output = osp.join(output, 'chain_%dx%d_%d.npz' % (ITERATIONS, WALKERS, i))
print(output)
# run
run_joint_mcmc_fit('data', ITERATIONS, WALKERS,
output, kws, joint_params=['f_nl'])
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("argnum", type=int, choices=[0, 1, 2, 3])
parser.add_argument('kmin', type=str, choices=["0.0001", "0.005"])
ns = parser.parse_args()
main(ns.argnum, ns.kmin)
|
6,124 | fb9d639bca59ecb081e7d9f30f97bdcd35627d34 | # -*- coding: utf-8 -*-
class FizzBuzz:
def convert(self, number):
# raise NotImplementedError
# for number in range(1, 101):
if number%3 == 0 and number%5 != 0:
return ("Fizz")
elif number%3 != 0 and number%5 == 0:
return("Buzz")
elif number%3 == 0 and number%5 == 0:
return("FizzBuzz")
else:
return str(number) |
6,125 | 4c010f9d9e7813a4ae4f592ade60130933b51958 | #/usr/share/python3
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import train_test_split
import numpy as np
import seaborn as sb
import pandas as pd
from pmlb import fetch_data, classification_dataset_names
import util
# from os.path import exists, join
# from os import makedirs
# scores a model on the data [X y]
def score_model(X, y, model):
train_X, test_X, train_y, test_y = train_test_split(X, y)
model.fit(train_X, train_y) # train the model
return model.score(test_X, test_y)
# returns dict of scores (keyed by names) after running each model on the provided data
@util.timeout(180)
def compare(X, y, model_list, model_names, n_times=10):
total = {}
for i, m in enumerate(model_list):
print(" Tring model {}: ".format(i), end="", flush=True)
results = []
for t in range(n_times):
results.append(score_model(X, y, m()))
mean = np.mean(results)
print(mean)
total[model_names[i]] = [mean]
return total
def main():
ds_names = classification_dataset_names
models = [LogisticRegression, GradientBoostingClassifier]
model_names = ["LogisticRegression", "GradientBoosting"]
results = {}
for i, n in enumerate(ds_names):
try:
print("Iteration: {}/{} '{}'".format(i+1, len(ds_names), n))
X, y = fetch_data(n, return_X_y=True)
results = util.merge_dicts(results,
compare(X, y, models, model_names)) # updates results
pd.DataFrame(results).to_pickle('labels.pkl')
except util.TimeoutError:
print("Timed Out!")
print("Done!")
df = pd.DataFrame(results)
df = df.rename(index=util.list_to_idx_dict(ds_names))
df.to_pickle("labels.pkl")
if __name__ == "__main__":
main()
|
6,126 | ccfc78ae430f835244e0618afdeebe960c868415 | #!/usr/bin/env python
'''
Usage:
dep_tree.py [-h] [-v] [-p P] [-m component_map]
repos_root top_dir [top_depfile]
Parse design dependency tree and generate build scripts and other useful files
positional arguments:
repos_root repository root
top_dir top level design directory
top_depfile top level dep file
optional arguments:
-h, --help show this help message and exit
-v verbosity
-p P output product: x (xtclsh script); s (Modelsim script); c
(component list}; a (address table list); b (address
decoder script); f (flat file list)
-m component_map location of component map file
-D set or override script directives
default: nothing is done
---
Repository layout in each component / top-level area:
firmware/cfg: contains .dep files and project config files
firmware/hdl: contains source files
firmware/cgn: contains XCO core build files
/addr_table: contains uHAL address table XML files
---
.dep file format
# Comment line
common options:
-c component_name: look under different component to find referenced file
-d: descend a level in dir tree to find referenced file
-s dir: look in subdir path to find referenced file
include [dep_file_list]
default is to take file component_name.dep
setup [-z] [tcl_file_list]
default is to take file component_name.tcl
-z: coregen project configuration script
src [-l library] [-g] [-n] src_file_list
src_file_list under firmware/hdl by default; may contain glob patterns
-g: find 'generated' src in ipcore directory
-n: for XCO files, build but don't include
addrtab [-t] [file_list]
default is to reference file component_name.xml
-t: top-level address table file
---
component_map file format
logical_name physical_dir
The 'physical_dir' is relative to the trunk/
'''
from __future__ import print_function
import argparse
import sys
import os
import time
import glob
from dep_tree.DepFileParser import DepFileParser
from dep_tree.CommandLineParser import CommandLineParser
from dep_tree.Pathmaker import Pathmaker
from dep_tree.AddressTableGeneratorWriter import AddressTableGeneratorWriter
from dep_tree.AddressTableListWriter import AddressTableListWriter
from dep_tree.ComponentListWriter import ComponentListWriter
from dep_tree.IPCoreSimScriptWriter import IPCoreSimScriptWriter
from dep_tree.ModelsimScriptWriter import ModelsimScriptWriter
from dep_tree.SourceListWriter import SourceListWriter
from dep_tree.SourceListWriter2 import SourceListWriter2
from dep_tree.XtclshScriptWriter import XtclshScriptWriter
from dep_tree.VivadoScriptWriter import VivadoScriptWriter
#--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
def main():
#--------------------------------------------------------------
# Set up the three objects which do the real hardwork
lCommandLineArgs = CommandLineParser().parse()
lPathmaker = Pathmaker( lCommandLineArgs.root , lCommandLineArgs.top , lCommandLineArgs.componentmap , lCommandLineArgs.verbosity )
lDepFileParser = DepFileParser( lCommandLineArgs , lPathmaker )
#--------------------------------------------------------------
#--------------------------------------------------------------
# Assign the product handlers to the appropriate commandline flag and check we know how to handle the requested product
lWriters = {
"c":ComponentListWriter , # Output file lists
"f":SourceListWriter , # Output file lists
"f2":SourceListWriter2 , # Output file lists
"a":AddressTableListWriter , # Output file lists
"b":AddressTableGeneratorWriter , # Output address table generator file
"s":ModelsimScriptWriter , # Output Modelsim script
"ip":IPCoreSimScriptWriter , # Output IPSim script
"x":XtclshScriptWriter , # Output xtclsh script
"v":VivadoScriptWriter # Output vivado script
}
if lCommandLineArgs.product not in lWriters:
raise SystemExit( "No handler for product option '{0}' supplied".format( lCommandLineArgs.product ) )
#--------------------------------------------------------------
#--------------------------------------------------------------
# Set the entrypoint for depfile parsing
lTopFile = lPathmaker.getpath( lCommandLineArgs.top , "include" , lCommandLineArgs.dep )
#--------------------------------------------------------------
#--------------------------------------------------------------
# Debugging
if lCommandLineArgs.verbosity > 0:
print( "Top:" , lTopFile )
#--------------------------------------------------------------
#--------------------------------------------------------------
# Parse the requested dep file
lDepFileParser.parse( lTopFile , lCommandLineArgs.top )
#--------------------------------------------------------------
#--------------------------------------------------------------
# Debugging
if lCommandLineArgs.verbosity > 0:
print( "-"*20 )
for i,j in sorted( lDepFileParser.CommandList.iteritems() ):
print( i , ":" , len( j ) , "files" )
print( "-"*20 )
print( "Build settings:" )
for i,j in sorted( lDepFileParser.ScriptVariables.iteritems() ):
print( " " , i , ":" , j )
print( "-"*20 )
if len( lDepFileParser.FilesNotFound ):
print( "-"*20 )
print( "Warning: Files not found" )
for i in lDepFileParser.FilesNotFound:
print ( ">" , i )
print( "-"*20 )
#--------------------------------------------------------------
#--------------------------------------------------------------
# Look up the Writer object in the dictionary, create an object of that type and call the write function
try:
lWriters[lCommandLineArgs.product]( lCommandLineArgs , lPathmaker ).write( lDepFileParser.ScriptVariables , lDepFileParser.ComponentPaths , lDepFileParser.CommandList , lDepFileParser.Libs, lDepFileParser.Maps )
except Exception as e:
import sys, traceback
traceback.print_exc(file=sys.stdout)
print('ERROR:', e)
raise SystemExit(-1)
#--------------------------------------------------------------
raise SystemExit(0)
#--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
if __name__ == '__main__':
main()
#--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
6,127 | a18fad746a1da3327d79ac0a61edd156c5fb8892 |
class TrieTree(object):
def __init__(self):
self.size=0
self.childern=[None]*26
def insert(self,word):
node=self
for w in word:
index=ord(w)-97
node.size+=1
if node.childern[index]==None:
node.childern[index]=TrieTree()
node=node.childern[index]
def search(self,word):
node=self
for w in word:
index=ord(w)-97
if node.childern[index]==None:
return 0
else:
node=node.childern[index]
return node.size
tt=TrieTree()
tt.insert('abc')
tt.insert('abbcc')
print(tt.search('ab'))
print(tt.search('a')) |
6,128 | ef0c9f740f1ca0906aeb7a5c5e5d35baca189310 | # pylint: disable=missing-docstring,function-redefined
import uuid
from behave import given, then, when
import requests
from features.steps import utils
from testsuite.oauth import authorize
from testsuite import fhir
ERROR_AUTHORIZATION_FAILED = 'Authorization failed.'
ERROR_BAD_CONFORMANCE = 'Could not parse conformance statement.'
ERROR_OAUTH_DISABLED = 'OAuth is not enabled on this server.'
ERROR_SELENIUM_SCREENSHOT = '''
An authorization error occurred: {0}
For more information, see:
{2}{1}
'''
@given('OAuth is enabled')
def step_impl(context):
assert context.vendor_config['auth']['strategy'] != 'none', \
ERROR_OAUTH_DISABLED
if context.conformance is None:
assert False, ERROR_BAD_CONFORMANCE
fhir.get_oauth_uris(context.conformance)
@given('I am logged in')
def step_impl(context):
assert context.oauth is not None, ERROR_AUTHORIZATION_FAILED
assert context.oauth.access_token is not None, \
ERROR_AUTHORIZATION_FAILED
@given('I am not logged in')
def step_impl(context):
context.oauth.access_token = None
@when('I log in')
def step_impl(context):
try:
context.oauth.authorize()
except authorize.AuthorizationException as err:
error = ERROR_SELENIUM_SCREENSHOT.format(
err.args[0],
err.args[1],
context.vendor_config['host'],
)
assert False, error
@when('I ask for authorization without the {field_name} field')
def step_impl(context, field_name):
""" A step 1 implementation with a named field missing.
"""
fields = {
'response_type': 'code',
'client_id': context.vendor_config['auth']['client_id'],
'redirect_uri': context.vendor_config['auth']['redirect_uri'],
'scope': context.vendor_config['auth']['scope'],
'state': uuid.uuid4(),
}
del fields[field_name]
uris = fhir.get_oauth_uris(context.conformance)
response = requests.get(uris['authorize'],
params=fields,
allow_redirects=False,
timeout=5)
context.response = response
@when('I ask for authorization with the following override')
def step_impl(context):
urls = fhir.get_oauth_uris(context.conformance)
authorizer = authorize.Authorizer(config=context.vendor_config['auth'],
authorize_url=urls['authorize'])
with authorizer:
parameters = authorizer.launch_params
parameters.update(dict(context.table))
try:
authorizer.ask_for_authorization(parameters)
response = authorizer.provide_user_input()
except authorize.AuthorizationException as err:
error = ERROR_SELENIUM_SCREENSHOT.format(
err.args[0],
err.args[1],
context.vendor_config['host'],
)
assert False, error
context.authorizer = authorizer
context.authorization_sent = parameters
context.authorization_received = response
@when('I ask for authorization')
def step_impl(context):
try:
context.code = context.oauth.request_authorization()
except authorize.AuthorizationException as err:
error = ERROR_SELENIUM_SCREENSHOT.format(
err.args[0],
err.args[1],
context.vendor_config['host'],
)
assert False, error
@when('I exchange my authorization code')
def step_impl(context):
""" A fully formed and correct step 3 implementation.
"""
fields = {
'grant_type': 'authorization_code',
'code': context.code,
'client_id': context.vendor_config['auth']['client_id'],
'redirect_uri': context.vendor_config['auth']['redirect_uri'],
}
context.response = token_request(fields,
context.vendor_config['auth'],
context.conformance)
@when('I exchange my authorization code without the {field_name} field')
def step_impl(context, field_name):
""" A step 3 implementation missing a named field.
"""
fields = {
'grant_type': 'authorization_code',
'code': context.code,
'client_id': context.vendor_config['auth']['client_id'],
'redirect_uri': context.vendor_config['auth']['redirect_uri'],
}
del fields[field_name]
context.response = token_request(fields,
context.vendor_config['auth'],
context.conformance)
@when('I exchange my authorization code with the following override')
def step_impl(context):
""" A step 3 implementation with a table specified override.
"""
fields = {
'grant_type': 'authorization_code',
'code': context.code,
'client_id': context.vendor_config['auth']['client_id'],
'redirect_uri': context.vendor_config['auth']['redirect_uri'],
}
fields.update(dict(context.table))
context.response = token_request(fields,
context.vendor_config['auth'],
context.conformance)
@then('the authorization response redirect should validate')
def step_impl(context):
try:
response = context.authorization_received
context.authorizer._validate_state(response) # pylint: disable=protected-access
context.authorizer._validate_code(response) # pylint: disable=protected-access
except AssertionError as err:
assert False, utils.bad_redirect_assert(err,
context.authorization_sent,
response)
@when('I ask for a new access token')
def step_impl(context):
""" A fully formed and correct implementation of step 5.
"""
fields = {
'grant_type': 'refresh_token',
'refresh_token': context.oauth.refresh_token,
'scope': context.vendor_config['auth']['scope'],
}
context.response = token_request(fields,
context.vendor_config['auth'],
context.conformance)
@when('I ask for a new access token without the {field_name} field')
def step_impl(context, field_name):
""" A step 5 implementation missing a named field.
"""
fields = {
'grant_type': 'refresh_token',
'refresh_token': context.oauth.refresh_token,
'scope': context.vendor_config['auth']['scope'],
}
del fields[field_name]
context.response = token_request(fields,
context.vendor_config['auth'],
context.conformance)
def token_request(post_data, auth_config, conformance):
""" Make a token request.
Should be modeled after `testsuite.oauth.authorization_code._token_request`.
Args:
post_data (dict): The parameters to send.
auth_config (dict): The vendor auth config.
conformance (dict): The server's conformance statement so that URIs can be determined.
Returns:
A requests Response object.
"""
auth = None
if auth_config.get('confidential_client'):
auth = requests.auth.HTTPBasicAuth(auth_config['client_id'],
auth_config['client_secret'])
uris = fhir.get_oauth_uris(conformance)
response = requests.post(uris['token'],
data=post_data,
allow_redirects=False,
auth=auth,
timeout=5)
return response
|
6,129 | 92c247b827d2ca4dce9b631a2c09f2800aabe216 | import main
from pytest import approx
def test_duration():
ins = main.convert()
names = ins.multiconvert()
for name in names:
induration, outduration = ins.ffprobe(name[0], name[1])
assert induration == approx(outduration)
induration, outduration = ins.ffprobe(name[0], name[2])
assert induration == approx(outduration)
print("All files are converted successfully!")
if __name__ == '__main__':
test_duration()
|
6,130 | 2ffe4b0eb7af9b3a4d5724442b5409d27bfa92a1 | import math
def max_heapity(arr, start, end):
root = start
while True:
child = 2 * root + 1
# 若子節點指標超出範圍則結束
if child > end:
break
# 先比較左右兩個子節點大小,選擇最大的那個子節點
if child + 1 <= end and arr[child] < arr[child + 1]:
child += 1
# 如果 root 的值小於 child 最大值,則交換 (符合 max-heap 的特性)
if arr[root] < arr[child]:
arr[root], arr[child] = arr[child], arr[root]
root = child
else:
break
def build_max_heap(arr):
n = len(arr)
for start in range(n // 2 - 1, -1, -1):
max_heapity(arr, start, n-1)
def heap_sort(arr):
# 首先將資料轉換為 heap 資料結構
build_max_heap(arr)
print("Max Heap:", arr)
# 我們將第一個元素(root)和已經排好的元素前一位(unsorted part)做交換
# 再重新調整 unsorted part 使其符合 max-heap 特性
# 直到排序完畢。
n = len(arr)
for i in range(n - 1, 0, -1):
arr[0], arr[i] = arr[i], arr[0]
max_heapity(arr, 0, i-1)
if __name__ == "__main__":
data = [38, 14, 57, 59, 52, 19]
print("Original:", data)
heap_sort(data) # heap: [59, 52, 57, 14, 38, 19]
print("Sorted:", data) # [14, 19, 38, 52, 57, 59]
print()
data = [9, 15, 12, 23, 33, 26, 7, 31, 42, 36]
print("original:", data)
heap_sort(data) # [42, 36, 26, 31, 33, 12, 7, 15, 23, 9]
print("Sorted:", data) # [7, 9, 12, 15, 23, 26, 31, 33, 36, 42]
|
6,131 | 1f953b20ff0eb868c2fbff367fafa8b651617e64 | #!/usr/bin/env python3
import sys
from argparse import ArgumentParser
from arg_checks import IsFile, MinInt
from visualisation import Visualisation
parser = ArgumentParser(description="Visualises DS simulations")
# The order of arguments in descending order of file frequency is: config, failures, log.
# This should be the preferable order when using ds-viz via command-line.
# However, failure-free simulations should also be supported, so the failure argument is optional
parser.add_argument("config", action=IsFile,
help="configuration file used in simulation")
parser.add_argument("log", action=IsFile,
help="simulation log file to visualise")
parser.add_argument("-f", "--failures", metavar="RESOURCE_FAILURES", action=IsFile,
help="resource-failures file from simulation")
parser.add_argument("-c", "--core_height", type=int, default=8, action=MinInt, min_int=1,
help="set core height, minimum value of 1")
parser.add_argument("-s", "--scale", type=int, default=sys.maxsize, action=MinInt,
help="set scaling factor of visualisation")
parser.add_argument("-w", "--width", type=int, default=1, action=MinInt, min_int=1,
help="set visualisation width as a multiple of window width, minimum value of 1")
args = parser.parse_args()
viz = Visualisation(args.config, args.failures, args.log, args.core_height, args.scale, args.width)
viz.run()
|
6,132 | 46b8d0ba58d4bf17021b05fc03bd480802f65adf | # -*- coding: utf-8 -*-
"""Utilities for reading BEL Script."""
import time
from typing import Iterable, Mapping, Optional, Set
from .constants import (
ANNOTATION_PATTERN_FMT, ANNOTATION_URL_FMT, NAMESPACE_PATTERN_FMT, NAMESPACE_URL_FMT, format_annotation_list,
)
__all__ = [
'make_knowledge_header',
]
def make_knowledge_header(name: str,
version: Optional[str] = None,
description: Optional[str] = None,
authors: Optional[str] = None,
contact: Optional[str] = None,
copyright: Optional[str] = None,
licenses: Optional[str] = None,
disclaimer: Optional[str] = None,
namespace_url: Optional[Mapping[str, str]] = None,
namespace_patterns: Optional[Mapping[str, str]] = None,
annotation_url: Optional[Mapping[str, str]] = None,
annotation_patterns: Optional[Mapping[str, str]] = None,
annotation_list: Optional[Mapping[str, Set[str]]] = None,
) -> Iterable[str]:
"""Iterate over lines for the header of a BEL document, with standard document metadata and definitions.
:param name: The unique name for this BEL document
:param version: The version. Defaults to current date in format ``YYYYMMDD``.
:param description: A description of the contents of this document
:param authors: The authors of this document
:param contact: The email address of the maintainer
:param copyright: Copyright information about this document
:param licenses: The license applied to this document
:param disclaimer: The disclaimer for this document
:param namespace_url: an optional dictionary of {str name: str URL} of namespaces
:param namespace_patterns: An optional dictionary of {str name: str regex} namespaces
:param annotation_url: An optional dictionary of {str name: str URL} of annotations
:param annotation_patterns: An optional dictionary of {str name: str regex} of regex annotations
:param annotation_list: An optional dictionary of {str name: set of names} of list annotations
"""
yield from make_document_metadata(
name=name,
contact=contact,
description=description,
authors=authors,
version=version,
copyright=copyright,
licenses=licenses,
disclaimer=disclaimer,
)
yield from make_document_namespaces(
namespace_url=namespace_url,
namespace_patterns=namespace_patterns,
)
yield from make_document_annotations(
annotation_url=annotation_url,
annotation_patterns=annotation_patterns,
annotation_list=annotation_list,
)
yield '#' * 80
yield '#| Statements'
yield '#' * 80
def make_document_metadata(name: str,
version: Optional[str] = None,
contact: Optional[str] = None,
description: Optional[str] = None,
authors: Optional[str] = None,
copyright: Optional[str] = None,
licenses: Optional[str] = None,
disclaimer: Optional[str] = None,
) -> Iterable[str]:
"""Iterate over the lines for the document metadata section of a BEL document.
:param name: The unique name for this BEL document
:param version: The version. Defaults to the current date in ``YYYYMMDD`` format.
:param description: A description of the contents of this document
:param authors: The authors of this document
:param contact: The email address of the maintainer
:param copyright: Copyright information about this document
:param licenses: The license applied to this document
:param disclaimer: The disclaimer for this document
"""
yield '#' * 80
yield '#| Metadata'
yield '#' * 80 + '\n'
yield 'SET DOCUMENT Name = "{}"'.format(name)
yield 'SET DOCUMENT Version = "{}"'.format(version or time.strftime('%Y%m%d'))
if description:
yield 'SET DOCUMENT Description = "{}"'.format(description.replace('\n', ''))
if authors:
yield 'SET DOCUMENT Authors = "{}"'.format(authors)
if contact:
yield 'SET DOCUMENT ContactInfo = "{}"'.format(contact)
if licenses:
yield 'SET DOCUMENT Licenses = "{}"'.format(licenses)
if copyright:
yield 'SET DOCUMENT Copyright = "{}"'.format(copyright)
if disclaimer:
yield 'SET DOCUMENT Disclaimer = "{}"'.format(disclaimer)
yield ''
def make_document_namespaces(namespace_url: Optional[Mapping[str, str]] = None,
namespace_patterns: Optional[Mapping[str, str]] = None,
) -> Iterable[str]:
"""Iterate over lines for the namespace definitions.
:param namespace_url: dictionary of {str name: str URL} of namespaces
:param namespace_patterns: A dictionary of {str name: str regex}
"""
yield '#' * 80
yield '#| Namespaces'
yield '#' * 80
if namespace_url:
yield '\n# Enumerated Namespaces'
yield '# ---------------------'
for name, url in sorted(namespace_url.items()):
yield NAMESPACE_URL_FMT.format(name, url)
if namespace_patterns:
yield '\n# Regular Expression Namespaces'
yield '# -----------------------------'
for name, pattern in sorted(namespace_patterns.items()):
yield NAMESPACE_PATTERN_FMT.format(name, pattern)
yield ''
def make_document_annotations(annotation_url: Optional[Mapping[str, str]] = None,
annotation_patterns: Optional[Mapping[str, str]] = None,
annotation_list: Optional[Mapping[str, Set[str]]] = None,
) -> Iterable[str]:
"""Iterate over lines for the annotation definitions.
:param annotation_url: A dictionary of {str name: str URL} of annotations
:param annotation_patterns: A dictionary of {str name: str regex}
:param annotation_list: A dictionary of {str name: set of name str}
"""
if annotation_url or annotation_patterns or annotation_list:
yield '#' * 80
yield '#| Annotations'
yield '#' * 80
if annotation_url:
yield '\n# Enumerated Annotations'
yield '# ----------------------'
for name, url in sorted(annotation_url.items()):
yield ANNOTATION_URL_FMT.format(name, url)
if annotation_patterns:
yield '\n# Regular Expression Annotations'
yield '# ------------------------------'
for name, pattern in sorted(annotation_patterns.items()):
yield ANNOTATION_PATTERN_FMT.format(name, pattern)
if annotation_list:
yield '\n# Locally Defined Annotations'
yield '# ---------------------------'
for annotation, values in sorted(annotation_list.items()):
yield format_annotation_list(annotation, values)
yield ''
|
6,133 | beda3d13e3dc12f7527f5c5ba8a0eb05c2734fd9 | # -*- coding: utf-8 -*-
"""
Created on Tue Sep 4 15:19:49 2018
@author: haoyu
"""
import numpy as np
def train_test_split(X, y, test_ratio = 0.2, seed = None):
'''将数据X和y按照test_ratio分割成X_train,X_test,y_train,y_test'''
assert X.shape[0] == y.shape[0], \
'the size of X must be equal to the size of y'
assert 0.0 <= test_ratio <=1.0, \
'test_ratio must be valid'
if seed:
np.random.seed(seed)
shuffle_indexes = np.random.permutation(len(X))#打乱顺序获得索引
test_size = int(len(X) * test_ratio)
test_indexes = shuffle_indexes[:test_size]
train_indexes = shuffle_indexes[test_size:]
X_train = X[train_indexes]
y_train = y[train_indexes]
X_test = X[test_indexes]
y_test = y[test_indexes]
return X_train, X_test, y_train, y_test |
6,134 | 5e20a517131f7a372d701548e4f370766a84ba52 | """
Definition of SegmentTreeNode:
"""
class SegmentTreeNode:
def __init__(self, start, end):
self.start, self.end = start, end
self.left, self.right = None, None
class Solution:
"""
@param: start: start value.
@param: end: end value.
@return: The root of Segment Tree.
"""
def build(self, start, end):
# write your code here
if start > end:
return None
root = SegmentTreeNode(start, end)
if start == end:
return root
else:
root.left = Solution.build(start, start, (start + end)//2)
root.right = Solution.build(start, (start + end)//2 + 1, end)
return root |
6,135 | 2db6f88b733c23063803c374d7a5b651e8443bd5 | print("Hello world! im in github")
|
6,136 | 16cd89a43a1985276bd14d85ad8ddb990c4d82c3 | import discord
from discord.ext import commands
import datetime
from discord.utils import get
from discord import User
class Sinner(commands.Converter):
async def convert(self, ctx, argument):
argument = await commands.MemberConverter().convert(ctx, argument)
permission = argument.guild_permissions.manage_messages
if not permission:
return argument
else:
raise commands.BadArgument("You cannot punish other staff members")
class Redeemed(commands.Converter):
async def convert(self, ctx, argument):
argument = await commands.MemberConverter().convert(ctx, argument)
muted = discord.utils.get(ctx.guild.roles, name="Muted")
if muted in argument.roles:
return argument
else:
raise commands.BadArgument("The user was not muted.")
async def mute(ctx, user, reason="No reason"):
role = discord.utils.get(ctx.guild.roles, name="Muted")
if not role:
try:
muted = await ctx.guild.create_role(name="Muted", reason="To use for muting")
for channel in ctx.guild.channels:
await channel.set_permissions(muted, send_messages=False,
read_message_history=False,
read_messages=False)
except discord.Forbidden:
return await ctx.send("I have no permissions to make a muted role")
await user.add_roles(muted)
await ctx.send(f"{user.mention} has been muted for {reason}")
else:
await user.add_roles(role)
await ctx.send(f"{user.mention} has been muted for {reason}")
channel = ctx.bot.get_channel(718865797006753892)
await channel.send(f"{user.mention}, welcome to the bad kids club.")
class Moderation(commands.Cog):
"""Moderation Commands"""
def __init__(self, bot):
self.bot = bot
@commands.command(name="ban")
@commands.has_permissions(ban_members=True)
async def ban(self, ctx, member: discord.Member, *, reason="No reason"):
"""Bans someone"""
if member == None or member == ctx.message.author:
await ctx.send("You cannot ban yourself!")
return
try:
memberid = await self.bot.fetch_user(int(member))
await member.ban(reason=reason) or await memberid.ban(reason=reason)
except discord.Forbidden:
await ctx.send(f"It looks like i dont have the permission `BAN_MEMBERS` to do this. Please check my permissions and try running the command again.")
else:
embed = discord.Embed(title=f"`{ctx.author}` banned {member}", colour=member.color, timestamp=datetime.datetime.utcnow())
embed.add_field(name="● Details:", value=f" - Reason: {reason}")
embed.set_footer(icon_url=f"{ctx.author.avatar_url}", text=f"{ctx.author.top_role.name} ")
await ctx.send(embed=embed)
print(ctx.author.name, 'used the command ban')
@commands.command()
@commands.has_permissions(ban_members=True)
async def unban(self, ctx, member, *, reason="No reason"):
print("unbanned")
if member == None or member == ctx.message.author:
await ctx.send("You cannot unban yourself!")
return
try:
member = await self.bot.fetch_user(int(member))
await ctx.guild.unban(member, reason=reason)
except discord.Forbidden:
await ctx.send(f"It looks like i dont have the permission `BAN_MEMBERS` to do this. Please check my permissions and try running the command again.")
else:
await ctx.send(f"`{member}` was unbanned by **{ctx.author.name}**.")
print(ctx.author.name, 'used the command unban')
@commands.command(name="kick")
@commands.has_permissions(kick_members=True)
async def kick(self, ctx, member: discord.Member, *, reason="No reason"):
"""Kicks someone"""
if member == None or member == ctx.message.author:
await ctx.send("You cannot kick yourself!")
return
try:
await member.kick(reason=reason)
except discord.Forbidden:
await ctx.send(f"It looks like i dont have the permission `KICK_MEMBERS` to do this. Please check my permissions and try running the command again.")
else:
embed = discord.Embed(title=f"`{ctx.author}` kicked {member}", colour=member.color, timestamp=datetime.datetime.utcnow())
embed.add_field(name="● Details:", value=f" - Reason: {reason}")
embed.set_footer(icon_url=f"{ctx.author.avatar_url}", text=f"{ctx.author.top_role.name} ")
await ctx.send(embed=embed)
print(ctx.author.name, 'used the command kick')
@commands.command(name="clear")
@commands.has_permissions(manage_messages=True)
async def clear(self, ctx, amount: int):
"""Clears messages."""
channel = ctx.channel
try:
await channel.purge(limit=amount+1)
except discord.Forbidden:
await ctx.send(f"It looks like i dont have the permission `MANAGE_MESSAGES` to do this. Please check my permissions and try running the command again.")
else:
await ctx.send(f"{amount} messages deleted.")
@clear.error
async def clear_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send("You need to specify an amount of messages, i can't purge air...")
if isinstance(error, commands.BadArgument):
await ctx.send("Give me a valid number.")
if isinstance(error, commands.CheckFailure):
await ctx.send(f"{ctx.author.name}, you don't have permission to use this command.")
raise error
@kick.error
async def kick_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send("You need to tell me who to kick.")
if isinstance(error, commands.BadArgument):
await ctx.send("Is that a person?")
if isinstance(error, commands.CheckFailure):
await ctx.send(f"{ctx.author.name}, you don't have permission to use this command.")
raise error
@ban.error
async def ban_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send("You need to tell me who to ban.")
if isinstance(error, commands.BadArgument):
await ctx.send("Is that a person?.")
if isinstance(error, commands.CheckFailure):
await ctx.send(f"{ctx.author.name}, you don't have permission to use this command.")
raise error
@commands.command()
async def mute(self, ctx, user: Sinner, reason=None):
"""Mutes a user."""
if member == None or member == ctx.message.author:
await ctx.send("You cannot mute yourself!")
return
await mute(ctx, user, reason or "treason")
@commands.command()
async def unmute(self, ctx, user: Redeemed):
"""Unmutes a muted user"""
if member == None or member == ctx.message.author:
await ctx.send("You cannot unmute yourself!")
return
await user.remove_roles(discord.utils.get(ctx.guild.roles, name="Muted"))
await ctx.send(f"{user.mention} has been unmuted")
@mute.error
async def mute_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send("You need to tell me who do you want to mute.")
if isinstance(error, commands.BadArgument):
await ctx.send("Is that a person?")
if isinstance(error, commands.CheckFailure):
await ctx.send(f"{ctx.author.name}, you don't have permissions to use this command.")
@unmute.error
async def unmute_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send("You need to tell me who do you want to unmute.")
if isinstance(error, commands.BadArgument):
await ctx.send("Is that a person?")
if isinstance(error, commands.CheckFailure):
await ctx.send(f"{ctx.author.name}, you don't have permissions to use this command.")
@unban.error
async def unban_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send("You need to tell me who do you want to unban.")
if isinstance(error, commands.BadArgument):
await ctx.send("Is that a person?")
if isinstance(error, commands.CheckFailure):
await ctx.send(f"{ctx.author.name}, you don't have permissions to use this command.")
def setup(bot):
bot.add_cog(Moderation(bot))
|
6,137 | 4a13f05fbbe598242f5663d27d578d2eb977e103 | n = 1
ip = []
ma = []
l = [0, 0, 0, 0, 0, 0, 0] # a, b, c, d, e, wpm, pr
while n != 0:
a = input().strip().split("~")
n = len(a)
if n == 1:
break
ip.append(a[0])
ma.append(a[1])
for i in ip:
ipn = i.split(".")
try:
if 1 <= int(ipn[0]) <= 126:
p = 0
elif 128 <= int(ipn[0]) <= 191:
p = 1
elif 192 <= int(ipn[0]) <= 223:
p = 2
elif 224 <= int(ipn[0]) <= 239:
p = 3
elif 240 <= int(ipn(0)) <= 255:
p = 4
elif int(ipn[0]) == 0 or 127:
continue
if 0 <= int(ipn[1]) <= 255:
if int(ipn[0]) == 10:
p = 6
elif int(ipn[0]) == 172 and 16 <= int(ipn[1]) <= 31:
p = 6
elif int(ipn[0]) == 192 and int(ipn[1]) == 168:
p = 6
if 0 <= int(ipn[2]) <= 255:
if 0 <= int(ipn[3]) <= 255:
l[p] += 1
else:
l[5] += 1
else:
l[5] += 1
else:
l[5] += 1
except:
l[5] += 1
for m in ma:
mn = m.split(".")
b = bin(int(''.join(mn)))
le = b.find("0")
ri = b.rfind("1")
if le > ri:
l[5] += 1
for o in l:
print(str(o),end=" ") |
6,138 | 04c1765e6c2302098be2a7f3242dfd536683f742 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-08-24 22:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0026_auto_20160712_1541'),
]
operations = [
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=50, null=True)),
('addr1', models.CharField(blank=True, max_length=50, null=True)),
('addr2', models.CharField(blank=True, max_length=50, null=True)),
('city', models.CharField(blank=True, max_length=50, null=True)),
('state', models.CharField(blank=True, max_length=50, null=True)),
('zip_code', models.CharField(blank=True, max_length=20, null=True)),
('phone_main', models.CharField(blank=True, max_length=20, null=True)),
('phone_other', models.CharField(blank=True, max_length=20, null=True)),
('notes', models.TextField(blank=True, null=True)),
],
),
migrations.RemoveField(
model_name='user',
name='location',
),
migrations.AddField(
model_name='user',
name='location',
field=models.ManyToManyField(blank=True, null=True, related_name='user_location', to='users.Location'),
),
]
|
6,139 | ecf09f2c503452fefc427e8dbe151e7bc7ef677e | import tensorflow as tf
class PolicyFullyConnected:
def __init__(self, observation_space, action_space, batch_size, reuse):
height = observation_space[0]
width = observation_space[1]
self.observations = tf.placeholder(shape=(batch_size, height, width), dtype=tf.float32)
with tf.variable_scope(name_or_scope="model", reuse=reuse):
reshaped_observations = tf.reshape(tensor=tf.to_float(self.observations),
shape=(batch_size, height * width))
self.hidden = tf.layers.dense(inputs=reshaped_observations,
units=256,
activation=tf.nn.relu)
logits = tf.layers.dense(inputs=self.hidden, units=action_space)
self.probs = tf.nn.softmax(logits)
self.values = tf.layers.dense(inputs=self.hidden, units=1)[:, 0] |
6,140 | d650f578ea30772489625ee26f3e4bf04131964b | from django.shortcuts import render, redirect
from .models import Game, Player, CardsInHand, Feedback
from django.db.models import Q
from .forms import GameForm, JoinForm, FeedbackForm
from django.shortcuts import get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from django.views.generic import CreateView
import json
# from django.contrib.auth.decorators import login_required
def get_joined_players(request, game_id):
game = get_object_or_404(Game, pk=game_id)
return HttpResponse(str(game.joined_players))
def create_new_game(request):
if request.method == "POST":
form_data = json.loads(request.body.decode('utf-8'))
form = GameForm(form_data)
if form.is_valid():
number_of_players = form.cleaned_data["number_of_players"]
new_game = Game(number_of_players=int(number_of_players))
new_game.instantiate() # initializes new game
new_game.save() # save new game to db
# create first player
new_player = Player(name=form.cleaned_data["creator_name"], game_id=new_game)
new_player.save()
# create new session to allow the user to play the game
request.session['player_id'] = new_player.pk
return JsonResponse({
"code": new_game.code,
"game_id": new_game.pk,
"number_of_players": number_of_players,
})
# return render(request, "game_created.html", {
# "form": form,
# "game_code": new_game.code,
# "n_players": number_of_players,
# "game_id": new_game.pk,
# "your_name": new_player.name,
# })
else:
return JsonResponse(form.errors.as_json(), safe=False, status=400)
else:
# set a dummy player id in player's session. this is needed to make channels session persistence work (for matchmaking)
if('player_id' not in request.session):
request.session['player_id'] = 0
create_form = GameForm(initial={'number_of_players': '2'})
join_form = JoinForm()
feedback_form = FeedbackForm()
return render(
request,
"newhome.html",
{
"create_form": create_form,
"join_form": join_form,
"feedback_form": feedback_form,
}
)
def join_game(request):
if request.method != "POST":
return HttpResponseRedirect("/game")
form_data = json.loads(request.body.decode('utf-8'))
form = JoinForm(form_data)
if form.is_valid():
code = int(form.cleaned_data['code'])
input_name = form.cleaned_data['name']
else:
return JsonResponse(form.errors.as_json(), safe=False, status=400)
game = get_object_or_404(Game, code=code)
if(game.joined_players < game.number_of_players):
# increment the number of players who joined this game
game.joined_players = game.joined_players + 1
game.save()
# create player and append it to this game
new_player = Player(name=input_name, game_id=game, player_number=game.joined_players)
new_player.save()
# create new session to allow user to play
request.session['player_id'] = new_player.pk
if(new_player.player_number == game.number_of_players):
# last player joined: deal cards to all players; game can now being
game.deal_cards_to_players()
return JsonResponse(game.pk, safe=False)
def game(request, game_id):
err_str = ''
this_game = get_object_or_404(Game, pk=game_id)
print(request.session.keys())
# if game is over, redirect to home
if this_game.has_been_won:
return redirect(create_new_game)
# get players who joined this game
players = Player.objects.filter(game_id=game_id)
if('player_id' not in request.session): # check if user has a session variable player_id
err_str = "Unauthenticated user"
this_player = get_object_or_404(Player, pk=request.session['player_id'])
if(this_player not in players): # check if this player has joined the game
err_str = "La partita richiesta non esiste o si è già conclusa."
if err_str != '':
return render(
request,
'error.html',
{
'error': err_str,
},
status=403
)
return render(request, 'gametest.html', {
'game_id': this_game.pk,
'number_of_players': this_game.number_of_players,
})
def feedback_create(request):
if request.method != "POST":
return HttpResponseRedirect("/game")
form_data = json.loads(request.body.decode('utf-8'))
form = FeedbackForm(form_data)
if form.is_valid():
sender_name = form.cleaned_data['sender_name']
email = form.cleaned_data['email']
message = form.cleaned_data['message']
else:
return JsonResponse(form.errors.as_json(), safe=False, status=400)
feedback = Feedback(sender_name=sender_name, email=email, message=message)
feedback.save()
return JsonResponse("[]", status=200, safe=False)
def restart_game(request, game_id):
this_game = get_object_or_404(Game, pk=game_id)
# if game isn't over, redirect to home
if not this_game.has_been_won:
return redirect(create_new_game)
# get players who joined this game
players = Player.objects.filter(game_id=game_id)
if('player_id' not in request.session): # check if user has a session variable player_id
return redirect(create_new_game)
this_player = get_object_or_404(Player, pk=request.session['player_id'])
if(this_player not in players): # check if this player has joined the game
return redirect(create_new_game)
this_game.reset()
this_game.deal_cards_to_players()
return JsonResponse({'status': 'ok'})
|
6,141 | 0f3e19b02dbe508bc4e0ef7879af81a9eabfd8c9 | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 16 16:11:46 2021
@author: Suman
"""
import numpy as np
import cv2
rect = (0,0,0,0)
startPoint = False
endPoint = False
def mark_object(event,x,y,flags,params):
global rect,startPoint,endPoint
# get mouse click
if event == cv2.EVENT_LBUTTONDOWN:
if startPoint == True and endPoint == True:
startPoint = False
endPoint = False
rect = (0, 0, 0, 0)
if startPoint == False:
rect = (x, y, 0, 0)
startPoint = True
elif endPoint == False:
rect = (rect[0], rect[1], x, y)
endPoint = True
cap = cv2.VideoCapture('movingball.mp4')
#Reading the first frame
(grabbed, frame) = cap.read()
while(cap.isOpened()):
(grabbed, frame) = cap.read()
cv2.namedWindow('frame')
cv2.setMouseCallback('frame', mark_object)
#drawing rectangle
if startPoint == True and endPoint == True:
cv2.rectangle(frame, (rect[0], rect[1]), (rect[2], rect[3]), (0, 0, 255), 2)
cv2.imshow('frame',frame)
if cv2.waitKey(100)& 0xFF==ord('q'):
break
cap.release()
cv2.destroyAllWindows() |
6,142 | d3af5ac87474a99f1ade222995884bc8e035ce35 | from room import Room
class Office(Room):
def __init__(self):
pass |
6,143 | 753617c189a88adee8430e994aa597c9db9410fe | from genericentity import GenericEntity as GEntity
import random as ran
class GenericBreeder(object):
"""description of class: its a classy class"""
def __init__(self,nlifesize,nparentsize,nlowestscore):
self.Reset(nlifesize,nparentsize,nlowestscore)
def Reset(self,nlifesize,nparentsize,nlowestscore):
self.life=[self.CreateLife() for i in range(0,nlifesize)]
self.lifesize=nlifesize
self.parentsize=nparentsize
self.parents = self.life[0:self.parentsize]
self.lastscore = nlowestscore
self.hardtimebuffer = 0
def CopulateALot(self,parents,howmuch,list=[]):
if(len(list) == 0):
list = ([0] * howmuch)
for index in range(0,howmuch):
par1 = int(ran.random() * len(parents))
par2 = int(ran.random() * len(parents))
ob= list[index] if(index < len(list)) else 0
tmpent = self.Copulate(parents[par1],parents[par2],obj=ob)
list[index] = tmpent
return list
def Copulate(self,mom,dad,obj=0):
finfac=(mom.GetScore() + dad.GetScore()) / 2
if(obj != 0):
nextadn= self.CopulateSub(obj.adn,mom,dad)
obj.reset(nextadn,finfac)
return obj
else:
nextadn= self.CopulateSub([0]*GEntity.adnsize,mom,dad)
return self.MakeNewborn(nextadn,finfac)
def MakeNewborn(self,nadn,mutsmo):
raise NotImplementedError("MakeNewborn()")
def CopulateSub(self,nextadn,mom,dad):
raise NotImplementedError("CopulateSub()")
@staticmethod
def CreateLife():
raise NotImplementedError("CreateLife()")
def IsMaximal(self):
raise NotImplementedError("IsMaximal()")
def LetTimeFlow(self):
gencount = 0
while(True):
gencount+=1
self.life = self.CopulateALot(self.parents,self.lifesize)
self.life.sort(key=SortByFitness)
score = life[0].GetScore()
print("\r[running] score: ",score,"\t size: ",self.lifesize,"\t gen: ",gencount,end="")
self.PrintInfo(life[0])
print(" ",end="")
self.parents = self.life[0:self.parentsize]
if(self.lastscore <= score):
self.hardtimebuffer+=1
else:
self.hardtimebuffer-=1
if(self.hardtimebuffer < 0):
self.hardtimebuffer = 0
elif(self.hardtimebuffer > 3):
self.lifesize = int(self.lifesize * 1.1)
self.Struggle()
lastperfactor = perfactor
if(self.IsMaximal()):
break
print("\n[ended] score: ",score,"\t size: ",self.lifesize,"\t gen: ",gencount,end="")
self.PrintInfo(life[0])
def PrintInfo(self,best):
raise NotImplementedError("PrintInfo()")
def Struggle(self):
raise NotImplementedError("Struggle()") |
6,144 | f3b3bee494493263f8b00827e6f3ff3a1dcd8c37 | import graphics
import ply.lex as lex
import ply.yacc as yacc
import jstokens
import jsgrammar
def interpret(trees): # Hello, friend
for tree in trees: # Hello,
# ("word-element","Hello")
nodetype=tree[0] # "word-element"
if nodetype == "word-element":
graphics.word(tree[1])
elif nodetype == "tag-element":
# <b>Strong text</b>
tagname = tree[1] # b
tagargs = tree[2] # []
subtrees = tree[3] # ...Strong Text!...
closetagname = tree[4] # b
if(tagname!=closetagname):
graphics.warning("mismatched tag")
else:
graphics.begintag(tagname,tagargs)
interpret(subtrees)
graphics.endtag()
elif nodetype == "javascript-element":
jstext = tree[1]; # "document.write(55);"
jslexer = lex.lex(module=jstokens)
jsparser = yacc.yacc(module=jsgrammar)
jstree = jsparser.parse(jstext,lexer=jslexer)
# jstree is a parse tree for JavaScript
result = jsinterp.interpret(jstree)
graphics.word(result) |
6,145 | e5a71250ca9f17798011d8fbfaee6a3d55446598 | from connect.client import ClientError, ConnectClient, R
def test_import_client():
from cnct import ConnectClient as MovedConnectClient
assert MovedConnectClient == ConnectClient
def test_import_error():
from cnct import ClientError as MovedClientError
assert MovedClientError == ClientError
def test_import_r():
from cnct import R as MovedR
assert MovedR == R
|
6,146 | afa22db946f77e9b33a443657592c20fbea21eb1 | from setup import app, manager
from Users.controller import user_controller
from Test.controller import test_controller
app.register_blueprint(test_controller, url_prefix="/test") #registeting test_controller blueprint with the main "app" and asking it to handle all url that begins with "/test". For eg: http://127.0.0.1/test/anythingcanbehere/orhere/orhere all such urls will go the test_conrtoller file. For now we just have to defined endpoints "test_get", "test_post". Anything else will result in 404 not fond error.
app.register_blueprint(user_controller, url_prefix="/")
if __name__ == "__main__":
app.run(debug=True)
#manager.run() |
6,147 | cd9f94d55eb13f5fc9959546e89a0af8ab2ea0db | import urllib2
import urllib
import json
import gzip
from StringIO import StringIO
service_url = 'https://babelfy.io/v1/disambiguate'
lang = 'EN'
key = ''
filehandle = open('triples/triples2.tsv') # the triples and the sentences where the triples were extracted
filehandle_write = open('triples/disambiguated_triples_sentence.tsv', 'a')
for line in filehandle:
splitted = line.split('|')
concept1 = splitted[0].strip()
relation = splitted[1].strip()
concept2 = splitted[2].strip()
sentence = splitted[3].strip()
if concept1 not in sentence:
# I do this for the triples extracted where the concept might not be in the sentence but that sentence refers to the concept
text = concept1+" "+sentence
else:
text = sentence
babelnetid1 = -1
babelnetid2 = -1
params = {
'text' : text,
'lang' : lang,
'key' : key
}
url = service_url + '?' + urllib.urlencode(params)
request = urllib2.Request(url)
request.add_header('Accept-encoding', 'gzip')
response = urllib2.urlopen(request)
if response.info().get('Content-Encoding') == 'gzip':
buf = StringIO( response.read())
f = gzip.GzipFile(fileobj=buf)
data = json.loads(f.read())
# retrieving data
for result in data:
charFragment = result.get('charFragment')
cfStart = charFragment.get('start')
cfEnd = charFragment.get('end')
word = text[cfStart:cfEnd+1]
print(word)
synsetId = result.get('babelSynsetID')
to_lower = word.lower()
if to_lower.startswith(concept1.lower()):
babelnetid1 = synsetId
if to_lower.startswith(concept2.lower()):
babelnetid2 = synsetId
print synsetId
filehandle_write.write(concept1 + " | " + relation + " | " + concept2 + " | " + sentence+" | " + concept1+" | "+str(babelnetid1)+" | "+concept2+" | "+str(babelnetid2))
filehandle_write.write('\n')
|
6,148 | 9e8ed462e429d6c6c0fe232431ee1e98721863e9 | import platform
import keyboard
import threading
import atexit
from threading import Timer
triggerCount = 0
triggerTimer = -1
result = None
def cleanup ():
print 'cleanup before exit'
clearTimer()
keyboard
triggerCount = 0
def clearTimer ():
global triggerTimer
global triggerCount
try:
triggerTimer.isAlive()
if triggerTimer.isAlive():
triggerTimer.cancel()
triggerTimer = -1
except AttributeError:
pass
def startTimer ():
global triggerTimer
triggerTimer = Timer(0.6, validTimeout)
triggerTimer.start()
def validTimeout ():
global triggerTimer
global triggerCount
clearTimer()
triggerCount = 0
def onPresskey ():
global triggerTimer
global triggerCount
triggerCount += 1
clearTimer()
if triggerCount == 2:
print('HOTKEY-COPY')
triggerCount = 0
clearTimer()
else:
startTimer()
def registerCopyHotkey ():
if (platform.system() == 'Darwin'):
keyboard.add_hotkey('cmd+c', onPresskey)
else:
keyboard.add_hotkey('ctrl+c', onPresskey)
keyboard.wait()
def main ():
registerCopyHotkey()
if __name__ == '__main__':
atexit.register(cleanup)
main()
|
6,149 | 8dbcd7bba09f8acff860890d8201e016b587796d |
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
# from sklearn import tree
# import joblib
music_data = pd.read_csv(r"C:\Users\junha\PythonProjects\predict_music_preferences\music.csv")
# print(music_data)
X = music_data.drop(columns=['genre'])
y = music_data['genre']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
model = DecisionTreeClassifier()
model.fit(X_train, y_train)
predictions = model.predict(X_test)
print(predictions)
score = accuracy_score(y_test, predictions)
print(score)
# joblib.dump(model, 'music-recommender.joblib')
# tree.export_graphviz(model, out_file='music-recommender.dot',
# feature_names=['age', 'gender'],
# class_names=sorted(y.unique()),
# label='all', rounded= True,
# filled=True) |
6,150 | 5cb7af5ded532058db7f5520d48ff418ba856f04 | import numpy as np
#
#
#
basedir = '/n/regal/pfister_lab/haehn/CREMITEST/'
testA = basedir + 'testA.npz.npy'
testA_targets = basedir + 'testA_targets.npz.npy'
testB = basedir + 'testB.npz.npy'
testB_targets = basedir + 'testB_targets.npz.npy'
testC = basedir + 'testC.npz.npy'
testC_targets = basedir + 'testC_targets.npz.npy'
counter = 0
# testA = np.load(testA, mmap_mode='r')
# testA_count = testA.shape[0]
# testB = np.load(testB, mmap_mode='r')
# testB_count = testB.shape[0]
# testC = np.load(testC, mmap_mode='r')
# testC_count = testC.shape[0]
# all_count = testA_count + testB_count + testC_count
# #
# # allocate large array
# #
# PATCH_BYTES = 75*75
# NO_PATCHES = all_count
# P_SIZE = (NO_PATCHES, 4, 75,75) # rather than raveled right now
# p_rgba = np.zeros(P_SIZE, dtype=np.float32)
# p_rgba[0:testA_count] = testA
# p_rgba[testA_count:testA_count+testB_count] = testB
# p_rgba[testB_count:testB_count+testC_count] = testC
# # now store this bad boy
# np.save(basedir+'test.npy', p_rgba)
# print 'STORED BIG BOY!'
p_rgba = None # free them all
#
# same for targets
#
testA_targets = np.load(testA_targets)
testA_count = testA_targets.shape[0]
testB_targets = np.load(testB_targets)
testB_count = testB_targets.shape[0]
testC_targets = np.load(testC_targets)
testC_count = testC_targets.shape[0]
all_count = testA_count + testB_count + testC_count
NO_PATCHES = all_count
p_target = np.zeros(NO_PATCHES)
p_target[0:testA_count] = testA_targets
p_target[testA_count:testA_count+testB_count] = testB_targets
p_target[testB_count:testB_count+testC_count] = testC_targets
# now store this lady boy
np.save(basedir+'test_targets.npy', p_target)
print 'ALL DONE!'
# import numpy as np
# #
# #
# #
# basedir = '/n/regal/pfister_lab/haehn/CREMITEST/'
# testA = basedir + 'testA.npz.npy'
# testA_targets = basedir + 'testA_targets.npz.npy'
# testB = basedir + 'testB.npz.npy'
# testB_targets = basedir + 'testB_targets.npz.npy'
# testC = basedir + 'testC.npz.npy'
# testC_targets = basedir + 'testC_targets.npz.npy'
# counter = 0
# testA = np.load(testA, mmap_mode='r')
# testA_count = testA.shape[0]
# testB = np.load(testB, mmap_mode='r')
# testB_count = testB.shape[0]
# testC = np.load(testC, mmap_mode='r')
# testC_count = testC.shape[0]
# all_count = testA_count + testB_count + testC_count
# #
# # allocate large array
# #
# PATCH_BYTES = 75*75
# NO_PATCHES = all_count
# P_SIZE = (NO_PATCHES, 4, 75,75) # rather than raveled right now
# p_rgba = np.zeros(P_SIZE, dtype=np.float32)
# p_rgba[0:testA_count] = testA
# p_rgba[testA_count:testA_count+testB_count] = testB
# p_rgba[testB_count:testB_count+testC_count] = testC
# # now store this bad boy
# np.save(basedir+'test.npy', p_rgba)
# print 'STORED BIG BOY!'
# p_rgba = None # free them all
# #
# # same for targets
# #
# testA_targets = np.load(testA_targets)
# testB_targets = np.load(testB_targets)
# testC_targets = np.load(testC_targets)
# p_target = np.zeros(NO_PATCHES)
# p_target[0:testA_count] = testA_targets
# p_target[testA_count:testA_count+testB_count] = testB_targets
# p_target[testB_count:testB_count+testC_count] = testC_targets
# # now store this lady boy
# np.save(basedir+'test_targets.npy', p_target)
# print 'ALL DONE!'
|
6,151 | 94d303716eac7fa72370435fe7d4d1cdac0cdc48 | smodelsOutput = {'OutputStatus': {'sigmacut': 0.01, 'minmassgap': 5.0, 'maxcond': 0.2, 'ncpus': 1, 'file status': 1, 'decomposition status': 1, 'warnings': 'Input file ok', 'input file': 'inputFiles/scanExample/slha/100968509.slha', 'database version': '1.2.0', 'smodels version': '1.2.0rc'}, 'ExptRes': [{'maxcond': 0.0, 'theory prediction (fb)': 728.7491431153657, 'upper limit (fb)': 44.22312638711652, 'expected upper limit (fb)': None, 'TxNames': ['T2'], 'Mass (GeV)': [[541.4, 57.4], [541.4, 57.4]], 'AnalysisID': 'CMS-SUS-16-033', 'DataSetID': None, 'AnalysisSqrts (TeV)': 13.0, 'lumi (fb-1)': 35.9, 'dataType': 'upperLimit', 'r': 16.478915053090216, 'r_expected': None}, {'maxcond': 0.0, 'theory prediction (fb)': 728.7491431153657, 'upper limit (fb)': 55.74859999999999, 'expected upper limit (fb)': None, 'TxNames': ['T2'], 'Mass (GeV)': [[541.4, 57.4], [541.4, 57.4]], 'AnalysisID': 'CMS-SUS-16-036', 'DataSetID': None, 'AnalysisSqrts (TeV)': 13.0, 'lumi (fb-1)': 35.9, 'dataType': 'upperLimit', 'r': 13.072061775817971, 'r_expected': None}, {'maxcond': 0.0, 'theory prediction (fb)': 132.83976207255284, 'upper limit (fb)': 36.140272, 'expected upper limit (fb)': None, 'TxNames': ['T2'], 'Mass (GeV)': [[541.4, 57.4], [541.4, 57.4]], 'AnalysisID': 'CMS-SUS-13-019', 'DataSetID': None, 'AnalysisSqrts (TeV)': 8.0, 'lumi (fb-1)': 19.5, 'dataType': 'upperLimit', 'r': 3.675671341725177, 'r_expected': None}, {'maxcond': 0.0, 'theory prediction (fb)': 0.9562482176560967, 'upper limit (fb)': 0.274, 'expected upper limit (fb)': 0.154, 'TxNames': ['T2', 'T5', 'TChiZZ'], 'Mass (GeV)': None, 'AnalysisID': 'CMS-SUS-13-012', 'DataSetID': '6NJet8_1250HT1500_450MHTinf', 'AnalysisSqrts (TeV)': 8.0, 'lumi (fb-1)': 19.5, 'dataType': 'efficiencyMap', 'r': 3.489956998744878, 'r_expected': 6.209404010753875, 'chi2': 13.063642260056689, 'likelihood': 6.008581252238334e-05}, {'maxcond': 0.0, 'theory prediction (fb)': 132.83976207255284, 'upper limit (fb)': 58.50226240000003, 'expected upper limit (fb)': None, 'TxNames': ['T2'], 'Mass (GeV)': [[541.4, 57.4], [541.4, 57.4]], 'AnalysisID': 'ATLAS-SUSY-2013-02', 'DataSetID': None, 'AnalysisSqrts (TeV)': 8.0, 'lumi (fb-1)': 20.3, 'dataType': 'upperLimit', 'r': 2.270677348583237, 'r_expected': None}, {'maxcond': 0.0, 'theory prediction (fb)': 9.084517413967422, 'upper limit (fb)': 4.2419, 'expected upper limit (fb)': 5.5524, 'TxNames': ['T2'], 'Mass (GeV)': [[541.4, 57.4], [541.4, 57.4]], 'AnalysisID': 'ATLAS-SUSY-2013-02', 'DataSetID': 'SR2jm', 'AnalysisSqrts (TeV)': 8.0, 'lumi (fb-1)': 20.3, 'dataType': 'efficiencyMap', 'r': 2.141615175739037, 'r_expected': 1.6361424634333661, 'chi2': 11.844156696751806, 'likelihood': 3.1390377843658383e-07}, {'maxcond': 0.0, 'theory prediction (fb)': 132.83976207255284, 'upper limit (fb)': 67.69032800000002, 'expected upper limit (fb)': 67.79354400000003, 'TxNames': ['T2'], 'Mass (GeV)': [[541.4, 57.4], [541.4, 57.4]], 'AnalysisID': 'CMS-SUS-12-028', 'DataSetID': None, 'AnalysisSqrts (TeV)': 8.0, 'lumi (fb-1)': 11.7, 'dataType': 'upperLimit', 'r': 1.9624629691933657, 'r_expected': 1.9594751097914693}, {'maxcond': 0.0, 'theory prediction (fb)': 0.7285976790027092, 'upper limit (fb)': 0.506, 'expected upper limit (fb)': 0.464, 'TxNames': ['T5'], 'Mass (GeV)': [[881.8, 541.4, 57.4], [881.8, 541.4, 57.4]], 'AnalysisID': 'ATLAS-SUSY-2013-04', 'DataSetID': 'GtGrid_SR_7ej80_0bjet', 'AnalysisSqrts (TeV)': 8.0, 'lumi (fb-1)': 20.3, 'dataType': 'efficiencyMap', 'r': 1.4399163616654331, 'r_expected': 1.5702536185403213, 'chi2': 7.225026655774327, 'likelihood': 0.0005573265805884188}, {'maxcond': 0.0, 'theory prediction (fb)': 132.83976207255284, 'upper limit (fb)': 97.78847200000001, 'expected upper limit (fb)': 69.450736, 'TxNames': ['T2'], 'Mass (GeV)': [[541.4, 57.4], [541.4, 57.4]], 'AnalysisID': 'CMS-SUS-13-012', 'DataSetID': None, 'AnalysisSqrts (TeV)': 8.0, 'lumi (fb-1)': 19.5, 'dataType': 'upperLimit', 'r': 1.358439899465377, 'r_expected': 1.9127192845379328}, {'maxcond': 0.0, 'theory prediction (fb)': 4.245413557698921, 'upper limit (fb)': 4.0, 'expected upper limit (fb)': 4.16, 'TxNames': ['T2'], 'Mass (GeV)': [[541.4, 57.4], [541.4, 57.4]], 'AnalysisID': 'ATLAS-CONF-2013-047', 'DataSetID': 'C Medium', 'AnalysisSqrts (TeV)': 8.0, 'lumi (fb-1)': 20.3, 'dataType': 'efficiencyMap', 'r': 1.0613533894247302, 'r_expected': 1.0205321052160867, 'chi2': 2.344696287811548, 'likelihood': 8.123400145704854e-05}, {'maxcond': 0.0, 'theory prediction (fb)': 284.6597475, 'upper limit (fb)': 1041.0116, 'expected upper limit (fb)': None, 'TxNames': ['TChiWZ'], 'Mass (GeV)': [[163.6, 57.4], [165.0, 57.4]], 'AnalysisID': 'ATLAS-SUSY-2013-12', 'DataSetID': None, 'AnalysisSqrts (TeV)': 8.0, 'lumi (fb-1)': 20.3, 'dataType': 'upperLimit', 'r': 0.2734453175161545, 'r_expected': None}, {'maxcond': 0.0, 'theory prediction (fb)': 169.351124, 'upper limit (fb)': 1582.346, 'expected upper limit (fb)': None, 'TxNames': ['TChiWW'], 'Mass (GeV)': [[163.6, 57.4], [163.6, 57.4]], 'AnalysisID': 'ATLAS-SUSY-2013-11', 'DataSetID': None, 'AnalysisSqrts (TeV)': 8.0, 'lumi (fb-1)': 20.3, 'dataType': 'upperLimit', 'r': 0.10702534338254717, 'r_expected': None}, {'maxcond': 0.0, 'theory prediction (fb)': 0.10289469462216802, 'upper limit (fb)': 1.07, 'expected upper limit (fb)': 1.17, 'TxNames': ['TChiWW'], 'Mass (GeV)': [[163.6, 57.4], [163.6, 57.4]], 'AnalysisID': 'ATLAS-SUSY-2013-11', 'DataSetID': 'WWa-DF', 'AnalysisSqrts (TeV)': 8.0, 'lumi (fb-1)': 20.3, 'dataType': 'efficiencyMap', 'r': 0.09616326600202618, 'r_expected': 0.08794418343775044, 'chi2': 0.23492769120756485, 'likelihood': 0.0021296922629215516}, {'maxcond': 0.0, 'theory prediction (fb)': 0.09049519199332233, 'upper limit (fb)': 0.97, 'expected upper limit (fb)': 0.762, 'TxNames': ['T2'], 'Mass (GeV)': [[541.4, 57.4], [541.4, 57.4]], 'AnalysisID': 'ATLAS-CONF-2013-054', 'DataSetID': '8j50 flavor 0 b-jets', 'AnalysisSqrts (TeV)': 8.0, 'lumi (fb-1)': 20.3, 'dataType': 'efficiencyMap', 'r': 0.09329401236424983, 'r_expected': 0.11876009447942563, 'chi2': 0.13085006931201093, 'likelihood': 0.005704888785414326}, {'maxcond': 0.0, 'theory prediction (fb)': 602.7377329999999, 'upper limit (fb)': 17857.06, 'expected upper limit (fb)': None, 'TxNames': ['TChiWZ'], 'Mass (GeV)': [[163.6, 57.4], [165.0, 57.4]], 'AnalysisID': 'CMS-SUS-16-034', 'DataSetID': None, 'AnalysisSqrts (TeV)': 13.0, 'lumi (fb-1)': 35.9, 'dataType': 'upperLimit', 'r': 0.033753469664099235, 'r_expected': None}], 'Total xsec considered (fb)': 5455.932556090008, 'Missed Topologies': [{'sqrts (TeV)': 13.0, 'weight (fb)': 1525.2339345595758, 'element': "[[[jet]],[[jet],[jet]]] ('MET', 'MET')"}, {'sqrts (TeV)': 13.0, 'weight (fb)': 164.5650363, 'element': "[[],[[W]]] ('MET', 'MET')"}, {'sqrts (TeV)': 13.0, 'weight (fb)': 131.21450642075922, 'element': "[[[jet],[Z]],[[jet],[jet]]] ('MET', 'MET')"}, {'sqrts (TeV)': 13.0, 'weight (fb)': 131.09407599353733, 'element': "[[[jet]],[[jet],[jet],[Z]]] ('MET', 'MET')"}, {'sqrts (TeV)': 13.0, 'weight (fb)': 125.30880443708375, 'element': "[[[jet]],[[jet],[Z]]] ('MET', 'MET')"}, {'sqrts (TeV)': 13.0, 'weight (fb)': 109.09980502038648, 'element': "[[[jet],[jet]],[[jet],[jet],[Z]]] ('MET', 'MET')"}, {'sqrts (TeV)': 13.0, 'weight (fb)': 87.78855441, 'element': "[[],[[Z]]] ('MET', 'MET')"}, {'sqrts (TeV)': 13.0, 'weight (fb)': 23.328775686902066, 'element': "[[],[[jet]]] ('MET', 'MET')"}, {'sqrts (TeV)': 13.0, 'weight (fb)': 18.943846, 'element': "[[],[]] ('MET', 'MET')"}, {'sqrts (TeV)': 13.0, 'weight (fb)': 11.23256793951906, 'element': "[[[jet],[Z]],[[jet],[jet],[Z]]] ('MET', 'MET')"}], 'Long Cascades': [{'sqrts (TeV)': 13.0, 'weight (fb)': 142.32664393305637, 'mother PIDs': [[1000021, 2000001], [1000021, 2000003]]}, {'sqrts (TeV)': 13.0, 'weight (fb)': 113.78856056272761, 'mother PIDs': [[1000021, 1000021]]}, {'sqrts (TeV)': 13.0, 'weight (fb)': 2.556908397604195, 'mother PIDs': [[2000001, 2000002], [2000002, 2000003]]}, {'sqrts (TeV)': 13.0, 'weight (fb)': 1.658904680547042, 'mother PIDs': [[1000021, 2000002]]}, {'sqrts (TeV)': 13.0, 'weight (fb)': 1.5034517332026478, 'mother PIDs': [[1000002, 1000021]]}, {'sqrts (TeV)': 13.0, 'weight (fb)': 0.73751489438902, 'mother PIDs': [[1000021, 1000022]]}, {'sqrts (TeV)': 13.0, 'weight (fb)': 0.514380675953777, 'mother PIDs': [[1000001, 2000001], [1000001, 2000003], [1000003, 2000001], [1000003, 2000003]]}, {'sqrts (TeV)': 13.0, 'weight (fb)': 0.22710347967142056, 'mother PIDs': [[1000002, 2000001], [1000002, 2000003]]}], 'Asymmetric Branches': [{'sqrts (TeV)': 13.0, 'weight (fb)': 1656.3887238722155, 'mother PIDs': [[1000021, 2000001], [1000021, 2000003]]}, {'sqrts (TeV)': 13.0, 'weight (fb)': 164.5650363, 'mother PIDs': [[1000022, 1000024]]}, {'sqrts (TeV)': 13.0, 'weight (fb)': 126.94317745006455, 'mother PIDs': [[2000001, 2000001], [2000001, 2000003], [2000003, 2000003]]}, {'sqrts (TeV)': 13.0, 'weight (fb)': 81.7049616, 'mother PIDs': [[1000022, 1000023]]}, {'sqrts (TeV)': 13.0, 'weight (fb)': 25.33546877159406, 'mother PIDs': [[1000022, 2000001], [1000022, 2000003]]}, {'sqrts (TeV)': 13.0, 'weight (fb)': 8.580393075610981, 'mother PIDs': [[1000021, 1000022]]}, {'sqrts (TeV)': 13.0, 'weight (fb)': 6.08359281, 'mother PIDs': [[1000022, 1000025]]}, {'sqrts (TeV)': 13.0, 'weight (fb)': 2.055186185956878, 'mother PIDs': [[1000025, 2000001], [1000025, 2000003]]}, {'sqrts (TeV)': 13.0, 'weight (fb)': 0.5969685251910638, 'mother PIDs': [[1000023, 2000001], [1000023, 2000003]]}, {'sqrts (TeV)': 13.0, 'weight (fb)': 0.42547403652557386, 'mother PIDs': [[1000021, 1000025]]}], 'Outside Grid': [{'sqrts (TeV)': 13.0, 'weight (fb)': 0.07215987170114271, 'element': "[[[jet]],[[jet]]] ('MET', 'MET')"}, {'sqrts (TeV)': 13.0, 'weight (fb)': 0.021621502520314927, 'element': "[[[l]],[[l]]] ('MET', 'MET')"}]} |
6,152 | 1ea31a126417c2feb079339aa79f97ea9e38fa40 | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""DarkNet model."""
import mindspore.nn as nn
from mindspore.ops import operations as P
class Concat(nn.Cell):
# Concatenate a list of tensors along dimension
def __init__(self, dimension=1):
super(Concat, self).__init__()
self.d = dimension
self.concat = P.Concat(self.d)
def forward(self, x):
return self.concat
class Bottleneck(nn.Cell):
# Standard bottleneck
def __init__(self, c1, c2, shortcut=True, e=0.5): # ch_in, ch_out, shortcut, groups, expansion
super(Bottleneck, self).__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c_, c2, 3, 1)
self.add = shortcut and c1 == c2
def construct(self, x):
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
class BottleneckCSP(nn.Cell):
# CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
def __init__(self, c1, c2, n=1, shortcut=True, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
super(BottleneckCSP, self).__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = nn.Conv2d(c1, c_, 1, 1, has_bias=False)
self.cv3 = nn.Conv2d(c_, c_, 1, 1, has_bias=False)
self.cv4 = Conv(2 * c_, c2, 1, 1)
self.bn = nn.BatchNorm2d(2 * c_, momentum=0.9, eps=1e-5) # applied to cat(cv2, cv3)
self.act = nn.LeakyReLU(0.1)
self.m = nn.SequentialCell([Bottleneck(c_, c_, shortcut, e=1.0) for _ in range(n)])
self.concat = P.Concat(1)
def construct(self, x):
y1 = self.cv3(self.m(self.cv1(x)))
y2 = self.cv2(x)
concat2 = self.concat((y1, y2))
return self.cv4(self.act(self.bn(concat2)))
class C3(nn.Cell):
# CSP Bottleneck with 3 convolutions
def __init__(self, c1, c2, n=1, shortcut=True, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
super(C3, self).__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c1, c_, 1, 1)
self.cv3 = Conv(2 * c_, c2, 1) # act=FReLU(c2)
self.m = nn.SequentialCell([Bottleneck(c_, c_, shortcut, e=1.0) for _ in range(n)])
self.concat = P.Concat(1)
def construct(self, x):
y1 = self.m(self.cv1(x))
y2 = self.cv2(x)
concat2 = self.concat((y1, y2))
return self.cv3(concat2)
class SPP(nn.Cell):
# Spatial pyramid pooling layer used in YOLOv3-SPP
def __init__(self, c1, c2, k=(5, 9, 13)):
super(SPP, self).__init__()
c_ = c1 // 2 # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)
self.maxpool1 = nn.MaxPool2d(kernel_size=5, stride=1, pad_mode='same')
self.maxpool2 = nn.MaxPool2d(kernel_size=9, stride=1, pad_mode='same')
self.maxpool3 = nn.MaxPool2d(kernel_size=13, stride=1, pad_mode='same')
self.concat = P.Concat(1)
def construct(self, x):
x = self.cv1(x)
m1 = self.maxpool1(x)
m2 = self.maxpool2(x)
m3 = self.maxpool3(x)
concatm = self.concat((x, m1, m2, m3))
return self.cv2(concatm)
class Focus(nn.Cell):
# Focus wh information into c-space
def __init__(self, c1, c2, k=1, s=1, p=None, act=True):
super(Focus, self).__init__()
self.conv = Conv(c1 * 4, c2, k, s, p, act)
self.concat = P.Concat(1)
def construct(self, x):
w = P.Shape()(x)[2]
h = P.Shape()(x)[3]
concat4 = self.concat((x[..., 0:w:2, 0:h:2], x[..., 1:w:2, 0:h:2], x[..., 0:w:2, 1:h:2], x[..., 1:w:2, 1:h:2]))
return self.conv(concat4)
class Focusv2(nn.Cell):
# Focus wh information into c-space
def __init__(self, c1, c2, k=1, s=1, p=None, act=True):
super(Focusv2, self).__init__()
self.conv = Conv(c1 * 4, c2, k, s, p, act)
def construct(self, x):
return self.conv(x)
class SiLU(nn.Cell):
def __init__(self):
super(SiLU, self).__init__()
self.sigmoid = P.Sigmoid()
def construct(self, x):
return x * self.sigmoid(x)
def autopad(k, p=None): # kernel, padding
# Pad to 'same'
if p is None:
p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad
return p
class Conv(nn.Cell):
# Standard convolution
def __init__(self, c1, c2, k=1, s=1, p=None,
dilation=1,
alpha=0.1,
momentum=0.97,
eps=1e-3,
pad_mode="same",
act=True): # ch_in, ch_out, kernel, stride, padding
super(Conv, self).__init__()
self.padding = autopad(k, p)
self.pad_mode = None
if self.padding == 0:
self.pad_mode = 'same'
elif self.padding == 1:
self.pad_mode = 'pad'
self.conv = nn.Conv2d(c1, c2, k, s, padding=self.padding, pad_mode=self.pad_mode, has_bias=False)
self.bn = nn.BatchNorm2d(c2, momentum=momentum, eps=eps)
self.act = SiLU() if act is True else (act if isinstance(act, nn.Cell) else P.Identity())
def construct(self, x):
return self.act(self.bn(self.conv(x)))
class YOLOv5Backbone(nn.Cell):
def __init__(self):
super(YOLOv5Backbone, self).__init__()
# self.outchannel = 1024
# self.concat = P.Concat(axis=1)
# self.add = P.TensorAdd()
self.focusv2 = Focusv2(3, 32, k=3, s=1)
self.conv1 = Conv(32, 64, k=3, s=2)
self.C31 = C3(64, 64, n=1)
self.conv2 = Conv(64, 128, k=3, s=2)
self.C32 = C3(128, 128, n=3)
self.conv3 = Conv(128, 256, k=3, s=2)
self.C33 = C3(256, 256, n=3)
self.conv4 = Conv(256, 512, k=3, s=2)
self.spp = SPP(512, 512, k=[5, 9, 13])
self.C34 = C3(512, 512, n=1, shortcut=False)
def construct(self, x):
"""construct method"""
fcs = self.focusv2(x)
cv1 = self.conv1(fcs)
bcsp1 = self.C31(cv1)
cv2 = self.conv2(bcsp1)
bcsp2 = self.C32(cv2)
cv3 = self.conv3(bcsp2)
bcsp3 = self.C33(cv3)
cv4 = self.conv4(bcsp3)
spp1 = self.spp(cv4)
bcsp4 = self.C34(spp1)
return bcsp2, bcsp3, bcsp4
|
6,153 | 81b9fc78d92fdc4392cb71a77fdfd354ff950ae3 | n, x0, y0 = list(map(int, input().split()))
cards = [y0] + list(map(int, input().split()))
# yの手持ちはゲームに関与するため、リストに加えてしまう
xs = [[-1] * (n+1) for i in range(n+1)]
ys = [[-1] * (n+1) for i in range(n+1)]
#xs[i][j] = xの手番で、xがcards[i]を持ちyがcards[j]を持っているとき(i<j)の最善スコア
#ys[i][j] = yの手番で、xがcards[j]を持ちyがcards[i]を持っているとき(i<j)の最善スコア
for i in range(n+1):
xs[i][-1] = abs(cards[-1] - cards[i])
ys[i][-1] = abs(cards[-1] - cards[i])
for j in range(n-1, -1, -1):
# x[i][j] = max (y[j][j+1] , y[j][j+2] , ……, y[j][n] )
xs_temp = max(ys[j][j+1:n+1])
ys_temp = min(xs[j][j+1:n+1])
for i in range(0, j):
xs[i][j] = xs_temp
ys[i][j] = ys_temp
# print(xs)
# print(ys)
print(max(ys[0][1:]))
|
6,154 | b066ab81eccee538eb3f85b49a3e46c00a947428 | # 데이터베이스 연동(SQLite)
# 테이블 생성 및 삽입
# pkg 폴더안에 db 파일이 있어서 해당 파일 import 하기 위해 ... 다른 방법 없을까 ...
import os, sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
# db 정보 import 후 DbConn 메소드를 dbConn으로 사용명 변경
from pkg._DB_INFO import DbConn as dbConn
from pkg._DB_INFO import sysDate as nowDate
# doConn에 Cursor(커서) 연결
# print('---> ', dir(dbConn())) # sqlite3.connect()에서 사용가능 메소드
c = dbConn().cursor()
nowDateTime = nowDate()
print('Cursor Type : ', type(c))
# 테이블 생성(Data Type : Text. Numeric, Integer, Real, Blob)
# CREATE TABLE IF NOT EXISTS --> 있으면 그대로 사용하고 없으면 테이블 생성
# PRIMARY KEY -> 기본 키, 중복 불가
c.execute('CREATE TABLE IF NOT EXISTS users(id INTEGER PRIMARY KEY, username text, \
email text, phone text, website text, regdate text)')
# ID는 PRIMARY KEY라서 중복 불가
# INSERT 쿼리 한번 실행 후 주석 처리
# 데이터 삽입
c.execute("INSERT INTO users \
VALUES(1, 'kang', 'abcdefg@aaa.com', '010-0000-0000', 'kang.com', ?)",
(nowDateTime, ) ) # ? 뒤에 (, ) 안에 ,가 없으면 문자가 시퀀스 처리됨
# 다른 당법으로 데이터 삽입
c.execute('INSERT INTO users(id, username, email, phone, website, regdate) \
VALUES(?,?, ?, ?, ?, ?)',
(2, 'Park', 'Park@aaa.aaa', '010-0000-0001', 'Park.com', nowDateTime) )
# Many INSERT (대용량 삽입) -> 튜플, 리스트 (둘은 괄호만 바꾸면 된다)
userList = (
(3, 'Lee', 'Lee@Lee.com', '010-1111-1111', 'Lee.com', nowDateTime),
(4, 'Lee', 'Cho@Cho.com', '010-2222-2222', 'Cho.com', nowDateTime),
(5, 'Yoo', 'Yoo@Yoo.com', '010-4444-4444', 'Yoo.com', nowDateTime)
)
# 튜플 형태로 한번에 집어 넣기 --> 나중 크롤링 한 정보를 입력할 때 도움 됨
c.executemany("INSERT INTO users(id, username, email, phone, website, regdate)\
VALUES (?,?,?,?,?,?)", userList)
# 테이블 데이터 삭제
# c.execute('DELETE FROM users')
# 지우면서 print 함수로 몇개의 row를 지웠는지 확인 하는법
# print('users db delete : ', c.execute("DELETE FROM users").rowcount)
# 커밋 : isolation_level = None 일 경우 자동 반영 (오토 커밋)
# dbConn().commit() # 오토 커밋을 안했을 경우 직접 커밋을 해줘야 된다
# 롤백 : 롤백이 실행된 시점 기준으로 그 전 쿼리들을 실행 안하고 전으로 돌림
# dbConn().rollback()
# 접속 해제
dbConn().close()
# c.execute('DROP TABLE users') # 테이블 삭제
|
6,155 | b0aeede44a4b54006cf0b7d541d5b476a7178a93 | # Part 1 - Build the CNN
from keras.models import Sequential
from keras.layers import Convolution2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
## Initialize the CNN
classifier = Sequential()
## Step 1 - Convolution Layer
classifier.add(Convolution2D(32, 3, 3,
border_mode = 'same',
input_shape = (64, 64, 3),
activation = 'relu' ))
## Step 2 - Max Pooling Layer
## Specify pool size of 2 x 2 for max summation
classifier.add(MaxPooling2D( pool_size = (2, 2) ))
## Can improve performance by adding another convolutional layer
## Since input is from pooled samples, don't need to specify input shape
## as Keras will have the shape
classifier.add(Convolution2D(32, 3, 3,
border_mode = 'same',
activation = 'relu' ))
classifier.add(MaxPooling2D( pool_size = (2, 2) ))
## Step 3 - Flattening
classifier.add(Flatten())
## Step 4 - Full Connection
### Add hidden layer
### Number of hidden nodes (128) was arbitrarily selected
### Use rectifier as activation again
classifier.add(Dense(output_dim = 128,
activation = 'relu'))
## Can also improve performance by adding another hidden layer
### Add output layer
### Use sigmoid function as activation
classifier.add(Dense(output_dim = 1,
activation = 'sigmoid'))
## Compile the CNN
## Use the adam stochastic descent algorithm
## Use the binary cross entropy function for the loss function because this is
## a logistic regression classifying a binary output
## Use accuracy for metrics function
classifier.compile(optimizer = 'adam',
loss = 'binary_crossentropy',
metrics = ['accuracy'])
# Part 2 - Fit the CNN to the images
## Need this for MacOS error about libiomp5.dylib
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
## Import ImageDataGenerator that will perform
## image augmentation (random transformations to increase
## data sample size from current set of images)
from keras.preprocessing.image import ImageDataGenerator
## Creating data augmenter for training images
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True)
## Create data augmenter for test images
test_datagen = ImageDataGenerator(rescale = 1./255)
## Point training augmenter to training set
## class mode is 'binary' because it's a binary classification
training_set = train_datagen.flow_from_directory('dataset/training_set',
target_size = (64, 64),
batch_size = 32,
class_mode = 'binary')
## Point training augmenter to test set
## class mode is 'binary' because it's a binary classification
test_set = test_datagen.flow_from_directory('dataset/test_set',
target_size = (64, 64),
batch_size = 32,
class_mode = 'binary')
## Fit the classifier to the augmented images
classifier.fit_generator(training_set,
steps_per_epoch = 8000,
nb_epoch = 25,
validation_data = test_set,
nb_val_samples = 2000) |
6,156 | 3d2b8730953e9c2801eebc23b6fb56a1b5a55e3c | from sqlalchemy import create_engine, Column, Integer, Float, \
String, Text, DateTime, Boolean, ForeignKey
from sqlalchemy.orm import sessionmaker, relationship
from sqlalchemy.ext.declarative import declarative_base
from flask_sqlalchemy import SQLAlchemy
engine = create_engine('sqlite:///app/databases/fays-web-dev.db', connect_args={'check_same_thread':False})
Session = sessionmaker(bind=engine)
session = Session()
Base = declarative_base()
|
6,157 | f9261c1844cc629c91043d1221d0b76f6e22fef6 | import os.path as path
from googleapiclient.discovery import build
from google.oauth2 import service_account
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/spreadsheets.readonly']
# The ID and range of a sample spreadsheet.
SAMPLE_SPREADSHEET_ID = '1FSMATLJUNCbV8-XYM8h7yHoWRSGA8JFsaECOZy_i2T8'
def main():
service_account_json = path.join(path.dirname(
path.abspath(__file__)), 'service_account.json')
credentials = service_account.Credentials.from_service_account_file(
service_account_json, scopes=SCOPES)
service = build('sheets', 'v4', credentials=credentials)
sheet_service = service.spreadsheets()
print('Getting pie chart information')
get_pie_chart_info(sheet_service)
print('Getting line chart information')
get_line_chart_info(sheet_service)
print('Getting boolean information')
get_bool_info(sheet_service)
def get_pie_chart_info(sheet_service):
sample_range_name = 'data!F:G'
result = sheet_service.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID,
range=sample_range_name).execute()
values = result.get('values', [])
if not values:
print('No data found.')
else:
print('Race, Breakdown:')
for row in values:
# Print columns A and E, which correspond to indices 0 and 4.
print('%s, %s' % (row[0], row[1]))
def get_line_chart_info(sheet_service):
sample_range_name = 'data!D:D'
result = sheet_service.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID,
range=sample_range_name).execute()
values = result.get('values', [])
if not values:
print('No data found.')
else:
print('Time series information:')
for row in values:
print('%s' % row[0])
def get_bool_info(sheet_service):
sample_range_name = 'data!B1'
result = sheet_service.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID,
range=sample_range_name).execute()
values = result.get('values', [])
if not values:
print('No data found.')
else:
print('Time series information:')
for row in values:
print(row[0] == 'TRUE')
if __name__ == '__main__':
main()
|
6,158 | ac664cd7d62f89399e37f74e0234b3ad244fe460 | # Generated by Django 3.1.4 on 2021-01-11 16:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tutorials', '0003_auto_20210111_1705'),
]
operations = [
migrations.AlterField(
model_name='tutorial',
name='upload',
field=models.ImageField(upload_to='images'),
),
]
|
6,159 | b00c9f099fcb31262df947f47d7190912ee66965 | #-*- coding: utf-8 -*-
from django.db import models
from authentication.models import Account
class QuestionFaq(models.Model):
title = models.CharField(max_length=50, verbose_name=u'Тема вопроса')
question = models.TextField(verbose_name=u'Задайте вопрос')
date = models.DateField(auto_now_add=True)
checked = models.BooleanField(default=False)
class Meta:
verbose_name = u'Вопрос в FAQ'
verbose_name_plural = u'Вопросы в FAQ'
def __unicode__(self):
return self.title
class AnswerFaq(models.Model):
account = models.ForeignKey(Account)
answer = models.TextField(verbose_name=u'Ответ на вопрос в FAQ')
question = models.ForeignKey(QuestionFaq)
date = models.DateField(auto_now_add=True)
class Meta:
verbose_name = u'Ответ на вопрос в FAQ'
verbose_name_plural = u'Ответы на вопросы в FAQ'
def __unicode__(self):
return u'%s - вопрос: "%s"' % (
self.account.get_full_name(),
self.question.title)
|
6,160 | ce11a5c2fbd6e0ea0f8ab293dc53afd07a18c25c | from Modules.Pitch.Factory import MainFactory
from Modules.ToJson import Oto
from audiolazy.lazy_midi import midi2str
import utaupy
import string
import random
import math
import os, subprocess, shutil
def RandomString(Length):
Letters = string.ascii_lowercase
return ''.join(random.choice(Letters) for i in range(Length))
UST_FILE = "filet.ust"
OTO_FILE = "Voice\\NanaMio\\oto.ini"
VB_PATH = "Voice\\NanaMio"
RESAMPLER_PATH = "Resampler\\macres.exe"
WAVTOOL_PATH = "Resampler\\wavtool-yawu.exe"
CACHE_PATH = "Cache\\"
OUTPUT_FILE = "temp.wav"
UstObject = utaupy.ust.load(UST_FILE)
OtoObject = Oto(OTO_FILE)
UstParts = UstObject.notes[4:28]
shutil.rmtree(os.path.join(os.getcwd(), CACHE_PATH))
os.mkdir(os.path.join(os.getcwd(), CACHE_PATH))
PreviousNote = -1
PreviousLength = 0
Tempo = round(float(UstObject.tempo))
MSPassed = 0
open(OUTPUT_FILE, "w+")
for NIndex, Note in enumerate(UstParts):
print("prevnote", PreviousNote)
Rest = False
if Note.lyric in OtoObject.keys():
LocalOto = OtoObject[Note.lyric]
else:
LocalOto = None
Rest = True
Lyric = Note.lyric
Length = Note.length
NoteNum = Note.notenum
PreUtterance = float(LocalOto["PreUtterance"]) if not Rest else 0
Velocity = Note.velocity
# try:
# PreUtterance = Note.get_by_key("PreUtterance")
# except KeyError:
# PreUtterance = 0
try:
StartPoint = Note.get_by_key("StartPoint")
except KeyError:
StartPoint = 0
try:
PBS = Note.pbs
except KeyError:
PBS = None
try:
PBW = Note["PBW"].split(",")
except KeyError:
PBW = None
try:
PBY = Note["PBY"].split(",")
for Index, Var in enumerate(PBY):
if Var == "":
PBY[Index] = "0"
except KeyError:
PBY = []
try:
PBM = Note.pbm
except KeyError:
PBM = []
try:
VBR = Note.get_by_key("VBR").split(",")
except KeyError:
VBR = None
try:
Flags = Note.get_by_key("Flags")
except KeyError:
Flags = "?"
try:
Modulation = Note.get_by_key("Modulation")
except KeyError:
Modulation = 100
try:
Intensity = Note.get_by_key("Intensity")
except KeyError:
Intensity = 100
try:
StartPoint = Note.get_by_key("StartPoint")
except KeyError:
StartPoint = 0
try:
Envelope = Note.get_by_key("Envelope")
Envelope = Envelope.replace("%", LocalOto["Overlap"]).split(",")
except (KeyError, TypeError):
Envelope = ["0","5","35","0","100","100","0"]
FileOrder = f"{NIndex:05}"
if Rest:
# Parameters = [os.path.join(os.getcwd(), RESAMPLER_PATH),os.path.join(os.getcwd(), CACHE_PATH, SILENCE_FILE), os.path.join(os.getcwd(),f"{FileOrder}_Blank_{RandomString(6)}.wav"),utaupy.ust.notenum_as_abc(NoteNum),"100","?","0",str(int(Length//50 *50 if Length/50 - Length//50 < 0.5 else math.ceil(Length/50) * 50)),"0","0","100","0"]
# Segment = AudioSegment.silent(duration=Length)
WavtoolParam = [
os.path.join(os.getcwd(), WAVTOOL_PATH),
os.path.join(os.getcwd(), OUTPUT_FILE),
OutputFile,
str(MSPassed),
str(Length)
] + (["0"] * 11)
PreviousNote = -1
MSPassed += float(Length)
subprocess.call(WavtoolParam)
else:
if PreviousNote == -1:
PrevNote = NoteNum
else:
PrevNote = int(PreviousNote)
if PBS is not None and PBW is not None:
PB = MainFactory()
PB.AddPitchBends(MSPassed, MSPassed + float(Length), PBS, PBW, PrevNoteNum=PrevNote, CurrentNoteNum=NoteNum, PBY=PBY, PBM=PBM, VBR=VBR)
PitchBendData = PB.RenderPitchBends(int(math.ceil((MSPassed + PBS[0]) / 5)), int(math.floor((MSPassed + float(Length)) / 5)), NoteNum)
else:
PitchBendData = None
# Bite Correction (The previous note should last for half the length before overlap)
if PreUtterance - float(LocalOto["Overlap"]) > (PreviousLength // 2):
CorrectionRate = (PreviousLength // 2) / (PreUtterance - float(LocalOto["Overlap"]))
BitedPreUtterance = PreUtterance * CorrectionRate
BitedOverlap = float(LocalOto["Overlap"]) * CorrectionRate
else:
BitedPreUtterance = PreUtterance
BitedOverlap = float(LocalOto["Overlap"])
BitedSTP = PreUtterance - BitedPreUtterance
LengthRequire = Length + float(StartPoint) - BitedSTP + BitedOverlap + 50
if LengthRequire < float(LocalOto["Consonant"]):
LengthRequire = float(LocalOto["Consonant"])
LengthRequire = LengthRequire//50 *50 if LengthRequire/50 - LengthRequire//50 < 0.5 else math.ceil(LengthRequire/50) * 50
InputFile = os.path.join(os.getcwd(), VB_PATH, LocalOto["File"])
OutputFile = os.path.join(os.getcwd(), CACHE_PATH, f"{FileOrder}_{Lyric}_{RandomString(6)}.wav")
Parameters = [
os.path.join(os.getcwd(), RESAMPLER_PATH),
InputFile,
OutputFile,
midi2str(NoteNum),
str(Velocity),
Flags,
LocalOto["Offset"],
str(int(LengthRequire)),
LocalOto["Consonant"],
LocalOto["Cutoff"],
Intensity,
Modulation,
f"!{Tempo}" if PitchBendData is not None else "",
f"{PitchBendData}" if PitchBendData is not None else ""
]
print(Parameters)
PreviousNote = NoteNum
PreviousLength = float(Length)
MSPassed += float(Length)
subprocess.call(Parameters)
if NIndex + 1 < len(UstParts) and UstParts[NIndex+1].lyric in OtoObject.keys():
NextOto = OtoObject[UstParts[NIndex+1].lyric]
NextPreUtterance = float(NextOto["PreUtterance"])
NextOverlap = float(NextOto["Overlap"])
WavtoolCorrection = PreUtterance - NextPreUtterance + NextOverlap
else:
WavtoolCorrection = PreUtterance
sign = "+" if WavtoolCorrection >= 0 else ""
WavtoolParam = [
os.path.join(os.getcwd(), WAVTOOL_PATH),
os.path.join(os.getcwd(), OUTPUT_FILE),
OutputFile,
str(float(StartPoint)),
f"{Length}@{float(Tempo)}{sign}{WavtoolCorrection}"
] + [str(i) for i in Envelope]
subprocess.call(WavtoolParam)
|
6,161 | 03854f48751460fdc27d42ee5c766934ee356cfd | import sys
sys.stdin = open('줄긋기.txt')
T = int(input())
for tc in range(1, T+1):
N = int(input())
dot = [list(map(int, input().split())) for _ in range(N)]
ran = []
for a in range(N-1):
for b in range(a+1, N):
if dot[a][1]-dot[b][1] == 0:
if 'inf' not in ran:
ran.append('inf')
else:
K = (dot[a][0]-dot[b][0]) / (dot[a][1]-dot[b][1])
if K not in ran:
ran.append(K)
print('#{} {}'.format(tc, len(ran))) |
6,162 | 3c22b187f8538e16c0105706e6aac2875ea3a25c | from django.db import models
class Subscribe(models.Model):
mail_subscribe = models.EmailField('Пошта', max_length=40)
def __str__(self):
return self.mail_subscribe
class Meta:
verbose_name = 'підписку'
verbose_name_plural = 'Підписки'
|
6,163 | e1829904cea51909b3a1729b9a18d40872e7c13c | from django.shortcuts import render, redirect
from .game import run
from .models import Match
from team.models import Team, Player
from django.urls import reverse
# Create your views here.
def startgame(request):
match = Match(team1_pk = 1, team2_pk = 2)
team1 = Team.objects.get(pk = match.team1_pk)
team2 = Team.objects.get(pk = match.team2_pk)
player1 = Player.objects.get(pk = match.team1_pk * 5 - 4)
player2 = Player.objects.get(pk = match.team1_pk * 5 - 3)
player3 = Player.objects.get(pk = match.team1_pk * 5 - 2)
player4 = Player.objects.get(pk = match.team1_pk * 5 - 1)
player5 = Player.objects.get(pk = match.team1_pk * 5 - 0)
player6 = Player.objects.get(pk = match.team2_pk * 5 - 4)
player7 = Player.objects.get(pk = match.team2_pk * 5 - 3)
player8 = Player.objects.get(pk = match.team2_pk * 5 - 2)
player9 = Player.objects.get(pk = match.team2_pk * 5 - 1)
player10 = Player.objects.get(pk = match.team2_pk * 5 - 0)
team1list = [player1, player2, player3, player4, player5]
team2list = [player6, player7, player8, player9, player10]
return render(request, 'match/startgame.html', {'team1': team1, 'team2': team2, 'team1list': team1list, 'team2list': team2list})
def results(request):
team1damage = 0
team2damage = 0
winner = run(1, 2)
team1 = Team.objects.get(pk = 1)
team2 = Team.objects.get(pk = 2)
player1 = Player.objects.get(pk = 1)
player2 = Player.objects.get(pk = 2)
player3 = Player.objects.get(pk = 3)
player4 = Player.objects.get(pk = 4)
player5 = Player.objects.get(pk = 5)
player6 = Player.objects.get(pk = 6)
player7 = Player.objects.get(pk = 7)
player8 = Player.objects.get(pk = 8)
player9 = Player.objects.get(pk = 9)
player10 = Player.objects.get(pk = 10)
team1list = [player1, player2, player3, player4, player5]
team2list = [player6, player7, player8, player9, player10]
for i in range(5):
team1damage += team1list[i].damage_dealt
team2damage += team2list[i].damage_dealt
team1damage = round(team1damage, 2)
team2damage = round(team2damage, 2)
team1hp = round(500.0 - team2damage, 2)
if team1hp <= 0.0:
team1hp = 0.0
team2hp = round(500.0 - team1damage, 2)
if team2hp <= 0.0:
team2hp = 0.0
return render(request, 'match/results.html', {'team1': team1, 'team2': team2, 'team1list': team1list, 'team2list': team2list, 'winner': winner, 'team1damage': team1damage, 'team2damage': team2damage, 'team1hp': team1hp, 'team2hp': team2hp}) |
6,164 | 4dde161d25ed41154e13b94cc9640c6aac055f87 | # coding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=too-many-lines
"""Constants."""
UNK_TOKEN = '<unk>'
BOS_TOKEN = '<bos>'
EOS_TOKEN = '<eos>'
PAD_TOKEN = '<pad>'
UNK_IDX = 0 # This should not be changed as long as serialized token
# embeddings redistributed on S3 contain an unknown token.
# Blame this code change and see commit for more context.
LARGE_POSITIVE_FLOAT = 1e18
LARGE_NEGATIVE_FLOAT = -LARGE_POSITIVE_FLOAT
GLOVE_NPZ_SHA1 = \
{'glove.42B.300d': ('glove.42B.300d.npz',
'7deee8f4860744db53ed9e50892effe9883e6d89'),
'glove.6B.100d': ('glove.6B.100d.npz',
'01f80f202fcabcc3e0804898349087bfc191dd1c'),
'glove.6B.200d': ('glove.6B.200d.npz',
'5e6e2bdab346c257f88d80d215d518e680d86e32'),
'glove.6B.300d': ('glove.6B.300d.npz',
'1db264aa936be62f055dfb72854204450bdf4399'),
'glove.6B.50d': ('glove.6B.50d.npz',
'aa16be8d184399d2199f83fd62586f2c30497bfa'),
'glove.840B.300d': ('glove.840B.300d.npz',
'b4ba390c1154736e07c0e67d9180935f5930e83c'),
'glove.twitter.27B.100d': ('glove.twitter.27B.100d.npz',
'0f7b82c223451d0002f79ba23596983cdbe0e2b1'),
'glove.twitter.27B.200d': ('glove.twitter.27B.200d.npz',
'41cc2d26f58a54622ce96bf6c8434360ab524f20'),
'glove.twitter.27B.25d': ('glove.twitter.27B.25d.npz',
'9f563d2f296995598cc46812b2fda05ad4c3c879'),
'glove.twitter.27B.50d': ('glove.twitter.27B.50d.npz',
'ce9959c056f2a0a780c468feeb4f823af51630e9')}
FAST_TEXT_NPZ_SHA1 = \
{'crawl-300d-2M': ('crawl-300d-2M.npz',
'9dd611a1fe280c63050cd546d3595400fc0eede4'),
'wiki.aa': ('wiki.aa.npz',
'48f163b80eb37f1806142169d3d4c05cf75b7339'),
'wiki.ab': ('wiki.ab.npz',
'860ceff119dd27e5b701b605879037c1310cbc3e'),
'wiki.ace': ('wiki.ace.npz',
'62938287464040491719f56a6f521f8f808beee8'),
'wiki.ady': ('wiki.ady.npz',
'646843afa260d018ed711df3f1ca9c3e000447b6'),
'wiki.af': ('wiki.af.npz',
'7b14cd27690b67fea318d0bac2283c16430680e2'),
'wiki.ak': ('wiki.ak.npz',
'20f309adad1c45958c97b6055d5838e05bbaea72'),
'wiki.als': ('wiki.als.npz',
'a8b03aa133c4f7da12fc27c2b167b7918b1e9805'),
'wiki.am': ('wiki.am.npz',
'ed3dd10cea64737f7a1623612ee099df9dc19f66'),
'wiki.ang': ('wiki.ang.npz',
'8efe64706d9d6b8eae38b2c7ff0b277e20592bc7'),
'wiki.an': ('wiki.an.npz',
'168046283c719ab96a29b1abae2e25a6575c7be8'),
'wiki.arc': ('wiki.arc.npz',
'049021b7decea4bc009b12936e56b4dbf5b760e7'),
'wiki.ar': ('wiki.ar.npz',
'7e325e1e98dfcdc9368d2ebe40ee834a2ed44912'),
'wiki.arz': ('wiki.arz.npz',
'7d851c2c7be3ee6f7fd896de7b76ea08e3fb08b0'),
'wiki.as': ('wiki.as.npz',
'01d38c29cd4bd99c1a8534abc058822da14a5b9c'),
'wiki.ast': ('wiki.ast.npz',
'9c9846ba5084505a0adea89c95c66e04efbf5ce9'),
'wiki.av': ('wiki.av.npz',
'7ef6a920c364638504e673cfde5f7675503fa81e'),
'wiki.ay': ('wiki.ay.npz',
'c1202e110930e3902397f5cb64a8359e013b469f'),
'wiki.azb': ('wiki.azb.npz',
'10351b7ef14ec2cb610d290cb6a3f6987ef5d8b3'),
'wiki.az': ('wiki.az.npz',
'74257c3bcd533a606afae509ea835dc036d61546'),
'wiki.ba': ('wiki.ba.npz',
'4a2857ed694d66864df562b376c2fa12fcb03646'),
'wiki.bar': ('wiki.bar.npz',
'e65c6b7e9ff83798d1eea05d166148837d53e615'),
'wiki.bat_smg': ('wiki.bat_smg.npz',
'6420584ae28ba6c9dd145fea8f096243d457c2d8'),
'wiki.bcl': ('wiki.bcl.npz',
'33606c970ab336b678393e2bdb8af2116d11cf7b'),
'wiki.be': ('wiki.be.npz',
'84487d341e333344cf71bc12c7a205d923762498'),
'wiki.bg': ('wiki.bg.npz',
'56f2a175b1a1d1a9cf9f1cea277cd0b46ffd7f66'),
'wiki.bh': ('wiki.bh.npz',
'07473989853a344a41aaa18f41030dc56d0d01c7'),
'wiki.bi': ('wiki.bi.npz',
'08adfa3c9ef3016d30ef69ea539d217ff67eda09'),
'wiki.bjn': ('wiki.bjn.npz',
'998a551283222931d3a26922308449950bfa3ec7'),
'wiki.bm': ('wiki.bm.npz',
'454ff9fbd4790e4a076d9a2087a51da28aa1332f'),
'wiki.bn': ('wiki.bn.npz',
'1f36f6f39c9a9b33bb8035c9a4dc7e04933604fd'),
'wiki.bo': ('wiki.bo.npz',
'b9fe87318428de0a7790de175b5fec80c5af482d'),
'wiki.bpy': ('wiki.bpy.npz',
'5c7853173d27e2c018c24eca69de8d5f34511b0d'),
'wiki.br': ('wiki.br.npz',
'7aa66a2034fbfaa1d39e637385d48610238797c9'),
'wiki.bs': ('wiki.bs.npz',
'a019a4677677c2e9e4d899326b2b6c15ad6c011a'),
'wiki.bug': ('wiki.bug.npz',
'09ae3477941d7a99d1df494368d7efb0b2c18913'),
'wiki.bxr': ('wiki.bxr.npz',
'b832c691b8ddd95896c052d3d15e1f98d72068d5'),
'wiki.ca': ('wiki.ca.npz',
'391e0d4daad08649251274fa1cc2a5f49c7728b1'),
'wiki.cbk_zam': ('wiki.cbk_zam.npz',
'02e57a763bc9f9eadaba57953383dd12a0a78a37'),
'wiki.cdo': ('wiki.cdo.npz',
'd6e8f422327e8b2273f1f2662d793707ece6695d'),
'wiki.ceb': ('wiki.ceb.npz',
'23bc0bb9aeaa57dff35092766941a866de142aae'),
'wiki.ce': ('wiki.ce.npz',
'182b2a889256119a6d379d501c55c7621e5855db'),
'wiki.ch': ('wiki.ch.npz',
'82dd77512fcb463481f43c9cef3507e2baa90d7b'),
'wiki.cho': ('wiki.cho.npz',
'b0b620fc2442d1a6e2440e71a424861c80175f0c'),
'wiki.chr': ('wiki.chr.npz',
'3d62c6b95c5af46abd6234426ae760cca65d5bd0'),
'wiki.chy': ('wiki.chy.npz',
'34a28a22da79aebc100e3714b825c95c8d5f54a3'),
'wiki.ckb': ('wiki.ckb.npz',
'ad19461e4be583d08b7693ff5b1e9d590ed41add'),
'wiki.co': ('wiki.co.npz',
'fa60d9f0e79f1c7e15f381aef983a0f4f31c05a8'),
'wiki.crh': ('wiki.crh.npz',
'540270ba6edd9d7b2f7efca52b3b407524ac67d1'),
'wiki.cr': ('wiki.cr.npz',
'f06b77465a38ec960d7d5a7554b848c37e945c76'),
'wiki.csb': ('wiki.csb.npz',
'b8b28559cf2541341af98e2aa755856765bdeabf'),
'wiki.cs': ('wiki.cs.npz',
'19881e931fe06abf341450f00c342d364313e232'),
'wiki.cu': ('wiki.cu.npz',
'731e0d00abd53bc2a8eb6cf37f6ab883cff34e15'),
'wiki.cv': ('wiki.cv.npz',
'e60034fcffb7dfef7b236ddba1194c3aa20b7967'),
'wiki.cy': ('wiki.cy.npz',
'5a0fb967b5556f007c0d5065f951a3d3b1c1005a'),
'wiki.da': ('wiki.da.npz',
'd06258014ba2c7450bc2d55edfdf1731433e42e5'),
'wiki.de': ('wiki.de.npz',
'a21694dfd2af63bd7bb00f0b60b28e88bd1153f1'),
'wiki.diq': ('wiki.diq.npz',
'4f6c77a86b39834a7130419967759afd8cc26b84'),
'wiki.dsb': ('wiki.dsb.npz',
'e74f1d346a8db96987bff0c33ee5f886907c380a'),
'wiki.dv': ('wiki.dv.npz',
'5d6fe6f0eec2e7704121d5aba03b4edbb28af873'),
'wiki.dz': ('wiki.dz.npz',
'77c639d36d0355b2de5adead7996eae342b852a6'),
'wiki.ee': ('wiki.ee.npz',
'4b5a76127d57515d3e8a76787cdefde5856b754a'),
'wiki.el': ('wiki.el.npz',
'a00bcb97e7898931196a1c69f7a492e5b6202661'),
'wiki.eml': ('wiki.eml.npz',
'b475d626b3d97e7a68c02827fdc7900599e838c6'),
'wiki.en': ('wiki.en.npz',
'ad5ec6d49db6c6fe76b8e85ff05d34e5d0e1eb6a'),
'wiki.eo': ('wiki.eo.npz',
'18049b0010520d13e676f5a82e8bb90153d99003'),
'wiki.es': ('wiki.es.npz',
'a6d192ba7d82d762f8367e75ca951aad4d11e410'),
'wiki.et': ('wiki.et.npz',
'4beb7025cf88f1aa62d025b187f0cb09aee61858'),
'wiki.eu': ('wiki.eu.npz',
'5e1a8197e35f20a2476798bbb935b4c131289c4f'),
'wiki.ext': ('wiki.ext.npz',
'049b2d1b0a8b102b45907cf487cac30aa294e0a0'),
'wiki.fa': ('wiki.fa.npz',
'81ed274997c87ef87d73d25e166ca06272ce426f'),
'wiki.ff': ('wiki.ff.npz',
'4867dc74cd53ca0b0f769af4fa1ea420406b59bf'),
'wiki.fi': ('wiki.fi.npz',
'6d1291b854045179f8171ac7d62ede7d8ac159a2'),
'wiki.fiu_vro': ('wiki.fiu_vro.npz',
'dd87806d9dc8833fa0e21e35a50815ebdbaa6c8b'),
'wiki.fj': ('wiki.fj.npz',
'cf5c31b0a69276f5dd18ab738ed92444abaeb755'),
'wiki.fo': ('wiki.fo.npz',
'ffc19807d528af000861a94cfb8097bd686e14fc'),
'wiki.fr': ('wiki.fr.npz',
'8f06d5dbe3cf7214354fe9b2f6eca0ef7419f063'),
'wiki.frp': ('wiki.frp.npz',
'c8b200ae592478d3cd0bfaafcd7aa19de8a3bfe5'),
'wiki.frr': ('wiki.frr.npz',
'fa5e5c39ea2a45793c679eacea290a35e37405ea'),
'wiki.fur': ('wiki.fur.npz',
'a61a8940d059f25000e3fe23933e5ed0d37e65d3'),
'wiki.fy': ('wiki.fy.npz',
'46f9f41bdf6f4fb8e27a753290413d745465963b'),
'wiki.gag': ('wiki.gag.npz',
'49fb01230e6803544122d47ab7d3fe694d1444f2'),
'wiki.gan': ('wiki.gan.npz',
'716b7b26acc15975f30caf3c6effa111516fcca5'),
'wiki.ga': ('wiki.ga.npz',
'ea934bc1fdc1acf6caf9ac746c6c499251f1fdee'),
'wiki.gd': ('wiki.gd.npz',
'597017b5a32d933f194595d3656f858e37e70a62'),
'wiki.glk': ('wiki.glk.npz',
'91a5834658bc2d48714e8807ef24efb79567b4b5'),
'wiki.gl': ('wiki.gl.npz',
'2fa8e48d6ae1e9c9d542eb3f2156cf9e359e66c2'),
'wiki.gn': ('wiki.gn.npz',
'e359eef3928e1f1b5d8fcf0ea532e8794c66289a'),
'wiki.gom': ('wiki.gom.npz',
'8cd361481c23f7545cc2bd8f1bf22aa7400edd4d'),
'wiki.got': ('wiki.got.npz',
'd05daf105611150695e61775fdff2c500b36be3f'),
'wiki.gu': ('wiki.gu.npz',
'0ce175c5fc39bab4032892f70c9d2bb850af0f4a'),
'wiki.gv': ('wiki.gv.npz',
'2c573f873d607831ff01b64603c17b8db79bd7e1'),
'wiki.hak': ('wiki.hak.npz',
'e6048727799cdf149f5c50037e0fc59300d33a94'),
'wiki.ha': ('wiki.ha.npz',
'f18ea7286bbd390c5470896b2c99cb1adc740064'),
'wiki.haw': ('wiki.haw.npz',
'18bcd85d2e06b1b889f0835fc5b62697fdf32d72'),
'wiki.he': ('wiki.he.npz',
'76915ff167b6ecb7b7e22ff0ca46914a55d344af'),
'wiki.hif': ('wiki.hif.npz',
'12153aaf98d76d5502ab77a27cd0b9a539f61513'),
'wiki.hi': ('wiki.hi.npz',
'249666a598991f6ec147954c6af9e531fd1cd94e'),
'wiki.ho': ('wiki.ho.npz',
'3f804fd69780c0789708b56ea9d48715f8e38f26'),
'wiki.hr': ('wiki.hr.npz',
'9a3de28e69f97048bfb480b4f83eaab6149f66ad'),
'wiki.hsb': ('wiki.hsb.npz',
'7070bf64e13299dd66ac0e9f8e24011a56b6bfe8'),
'wiki.ht': ('wiki.ht.npz',
'a607093d511afeb584d02dc676bc5a27eff66287'),
'wiki.hu': ('wiki.hu.npz',
'9b2c4750daf1bcf39768572e874b5afda0e2f0bc'),
'wiki.hy': ('wiki.hy.npz',
'ec0461a102a6fb00bd324f66cefd3c8d55a7093a'),
'wiki.hz': ('wiki.hz.npz',
'5dfb8afbdae6b4148c3e55ab459c56a74b46b463'),
'wiki.ia': ('wiki.ia.npz',
'4cfaaf053b9513bbf5b2423258c0f01d20256de6'),
'wiki.id': ('wiki.id.npz',
'bace396bb9941cc9e5b2e5f5a19be6db833c5fd4'),
'wiki.ie': ('wiki.ie.npz',
'1bae7256c2e763ce6d692d1c0a603d99a8b22826'),
'wiki.ig': ('wiki.ig.npz',
'23128e54a5e143891d392d621723bad9cfc8cf7b'),
'wiki.ii': ('wiki.ii.npz',
'54bc16d05da512481865a89ecf30260b0acc04dc'),
'wiki.ik': ('wiki.ik.npz',
'f8015227e893d2375699b7d132b306ba381f02ac'),
'wiki.ilo': ('wiki.ilo.npz',
'185a11f81bd5d24a34558dda81ee4735f5ba150b'),
'wiki.io': ('wiki.io.npz',
'ddf8180a90aa6ee5be93a2582cc99c535f21363e'),
'wiki.is': ('wiki.is.npz',
'968f8dd2a093b279a6f7aaa734008454bf51d724'),
'wiki.it': ('wiki.it.npz',
'fdfb857a309b2c3d29482bb5cc55f21b858d2e6f'),
'wiki.iu': ('wiki.iu.npz',
'fa8896730bd6c24c3473daa22116d1016294e7f7'),
'wiki.jam': ('wiki.jam.npz',
'a8f0d0b99c89ace0a6401b8fcda261d06065faaf'),
'wiki.ja': ('wiki.ja.npz',
'8d42e5a40e4d1d8645b2d80b873a65cadcf68b5c'),
'wiki.jbo': ('wiki.jbo.npz',
'145fc999ab004b348cf9bf445f0a93a7a145308b'),
'wiki.jv': ('wiki.jv.npz',
'66978770bf06e42414395cf5fd8c596044d72bec'),
'wiki.kaa': ('wiki.kaa.npz',
'624a640ecb9901b2aba2e9f44ab615146ecb2862'),
'wiki.kab': ('wiki.kab.npz',
'e97f93b6ba65e95c85b7541932cf53c5ad9eb896'),
'wiki.ka': ('wiki.ka.npz',
'1ca8376e1e0cbd58001c1b51a2d488a2874a6743'),
'wiki.kbd': ('wiki.kbd.npz',
'f2d2a05b06723ac549784ad5470d84f5742a1352'),
'wiki.kg': ('wiki.kg.npz',
'fa7f6d5f660a173a3e75342d449980eedcdc789e'),
'wiki.ki': ('wiki.ki.npz',
'21a8c7c616c0050c51c288861f3423f313e4f634'),
'wiki.kj': ('wiki.kj.npz',
'f3c347509a0d81f4f7fdbb8b22889b8d76e5014e'),
'wiki.kk': ('wiki.kk.npz',
'bc24a3289e1c1e18e16b6789c2f9f92af1e73071'),
'wiki.kl': ('wiki.kl.npz',
'b8b7e7359f067836e2be2ecfe9f35a820b00fe1d'),
'wiki.km': ('wiki.km.npz',
'e053799fd01463808432dc035bef3e36620e2f36'),
'wiki.kn': ('wiki.kn.npz',
'2849a0a8b3453e9bf6af05d4c7bd3db881dd1068'),
'wiki.koi': ('wiki.koi.npz',
'a9b02e9bd41833bcd54769f94626019c03f29997'),
'wiki.ko': ('wiki.ko.npz',
'764d9896e74b5a26c6884d48bce3bed8ed3a7822'),
'wiki.krc': ('wiki.krc.npz',
'bfe39598c718f1cc95909db7544b3214b308a97c'),
'wiki.kr': ('wiki.kr.npz',
'1e6af853d4a8ea7830e116eb9b61ac5d7d9a315c'),
'wiki.ksh': ('wiki.ksh.npz',
'66cd0e3e0a0b0282a13960571ebe7cddd7706bf2'),
'wiki.ks': ('wiki.ks.npz',
'85f1adaa05b854df4dede745a1aaab3836e60770'),
'wiki.ku': ('wiki.ku.npz',
'faf90584e5a45e6d0f9eeb88399b82abe037d584'),
'wiki.kv': ('wiki.kv.npz',
'9f2b41822013a412da9c99fac06eed8be03ca192'),
'wiki.kw': ('wiki.kw.npz',
'3eed8a8fc97a2fc79241b8474a458c98d00fc897'),
'wiki.ky': ('wiki.ky.npz',
'0116ff90f10a6c0728e1ea86d8a44896ea83270a'),
'wiki.lad': ('wiki.lad.npz',
'5af2015b3d1c5e8563f0e92721580988ebe2ce50'),
'wiki.la': ('wiki.la.npz',
'7143303a3ea13c7668eb90ea6e3d2ca69857a3be'),
'wiki.lbe': ('wiki.lbe.npz',
'f206a3c35a184ba5d2b32ee68640eadf66c847da'),
'wiki.lb': ('wiki.lb.npz',
'143dc6337f3690379282034c460c613d7f144923'),
'wiki.lez': ('wiki.lez.npz',
'b29a680decc6b29f24e8eb9e4f8e11e3419d45f1'),
'wiki.lg': ('wiki.lg.npz',
'866640ce62cedbc1d453b7ea3c289c291ad76e13'),
'wiki.lij': ('wiki.lij.npz',
'0dcd3d7009ae89b1016ca6cdb99a9f0d70bc4baf'),
'wiki.li': ('wiki.li.npz',
'4666b3c238256d7b7623a136db19b8b9f4754734'),
'wiki.lmo': ('wiki.lmo.npz',
'ac89fa7cfe0675950bcb31c66bf3f88a3cfc98f0'),
'wiki.ln': ('wiki.ln.npz',
'fba158719944aabe58e0002a90be0ed77e11702d'),
'wiki.lo': ('wiki.lo.npz',
'1e113e340a8a93d385e14502c9c4e3bcdf6c3101'),
'wiki.lrc': ('wiki.lrc.npz',
'42cb755f398fba6f0da7949c91e92b55654bd482'),
'wiki.ltg': ('wiki.ltg.npz',
'182f75859e228d1162215f28fe7f2dca127624a4'),
'wiki.lt': ('wiki.lt.npz',
'66aa944bd2e777cb82d6d59b1f2f837b6c48cb37'),
'wiki.lv': ('wiki.lv.npz',
'2be8f926da85694fa998bf79d80b61ebb8d67576'),
'wiki.mai': ('wiki.mai.npz',
'b8a9c36e2a0f1bb84a44dc762250d2a9007ef637'),
'wiki.map_bms': ('wiki.map_bms.npz',
'6f0394d6b3d08a946e3df4b9355efe94148f018a'),
'wiki.mdf': ('wiki.mdf.npz',
'774ee35334641db57f9ac9069961c5372a5d92e8'),
'wiki.mg': ('wiki.mg.npz',
'496c48ef668f08ce95ebb11ce1ce5026b52d935c'),
'wiki.mh': ('wiki.mh.npz',
'352edd84f99c5aa277a7306f6cacea1fab065ed3'),
'wiki.mhr': ('wiki.mhr.npz',
'dd78b27a674ac10411cdf74ac32f9391506b17e0'),
'wiki.min': ('wiki.min.npz',
'628b406441ab03bc8aa68195ada50bfdc8226f34'),
'wiki.mi': ('wiki.mi.npz',
'754127b473861cd4f9ae034c9f527a34827b1f00'),
'wiki.mk': ('wiki.mk.npz',
'b09fed4f56c296f13c4020ef1fec498382a38b73'),
'wiki.ml': ('wiki.ml.npz',
'02fb55d97ca2f0408f0e7e8dd6a661bbc3319a2a'),
'wiki.mn': ('wiki.mn.npz',
'08b2c45689aa5d9ec49df96dc7c777ce9b9a0b4b'),
'wiki.mo': ('wiki.mo.npz',
'638c2e8bd2352fd52921b9ae62f578b8357bab49'),
'wiki.mrj': ('wiki.mrj.npz',
'ec5cf1f4fb8dfdca64d8172974e620eb8fa41626'),
'wiki.mr': ('wiki.mr.npz',
'074dd68c947c2f137a3e84b55012925f00213139'),
'wiki.ms': ('wiki.ms.npz',
'3dbe9e9d70251de8a374776ff1250a9c3103ee59'),
'wiki.mt': ('wiki.mt.npz',
'f5103998a68d1b178387417436a83123d44aba01'),
'wiki.multi.ar': ('wiki.multi.ar.npz',
'a010d1d81a465c56ebaf596b3e8e8795e7f0f8e3'),
'wiki.multi.bg': ('wiki.multi.bg.npz',
'c04018f3a600cee170f12a36cdd35b4727a2aade'),
'wiki.multi.ca': ('wiki.multi.ca.npz',
'eef52a0cf20c133ca9065de25f0702861a8cfa29'),
'wiki.multi.cs': ('wiki.multi.cs.npz',
'c5f547aa78c0e3d7dae67a0334d500bf2a86aa30'),
'wiki.multi.da': ('wiki.multi.da.npz',
'24374f2ee169b33327feeee46da31b0de1622fe4'),
'wiki.multi.de': ('wiki.multi.de.npz',
'2e6c119b345bebd34b56eaaf855d6703889b11f7'),
'wiki.multi.el': ('wiki.multi.el.npz',
'9d122beedb80a2e5334946641e5bafd32c01e76b'),
'wiki.multi.en': ('wiki.multi.en.npz',
'8c3c480b4cb2690304173713a646280613b244a8'),
'wiki.multi.es': ('wiki.multi.es.npz',
'483a22656e4fb2a01e9f4ef8156b261e780850ab'),
'wiki.multi.et': ('wiki.multi.et.npz',
'22498c7b91645a3874fa738b5cfb16bf98b6f97c'),
'wiki.multi.fi': ('wiki.multi.fi.npz',
'765a6f0b63777bff4ae6ca2b461c5889c03d6a70'),
'wiki.multi.fr': ('wiki.multi.fr.npz',
'decd9aacf600114b8a36072535c0309874a37c83'),
'wiki.multi.he': ('wiki.multi.he.npz',
'7eee940c1b85936f59122f4b1a166223dd946674'),
'wiki.multi.hr': ('wiki.multi.hr.npz',
'1673963416af088f8bf15576afb33d58115db35c'),
'wiki.multi.hu': ('wiki.multi.hu.npz',
'a1fbe6ededf3cbaa3eaa22dd8b20cce4b36cfc6d'),
'wiki.multi.id': ('wiki.multi.id.npz',
'6c3e721febb511ede7db7bf978d65769e4270f5c'),
'wiki.multi.it': ('wiki.multi.it.npz',
'fc5bfc11e0165e8d95c1708573dad5e456826c73'),
'wiki.multi.mk': ('wiki.multi.mk.npz',
'6cd50198355674f156fc863108d9bebf11cfabd9'),
'wiki.multi.nl': ('wiki.multi.nl.npz',
'4fa06b9230c95dfa5a9e9a5d80f1f5ba614d3cbf'),
'wiki.multi.no': ('wiki.multi.no.npz',
'63756168c1101e73fba8d1a5015f32b8892819e6'),
'wiki.multi.pl': ('wiki.multi.pl.npz',
'958b8e8bead965ba1bb1433e1c960fc3e12a10fb'),
'wiki.multi.pt': ('wiki.multi.pt.npz',
'22f07df1609d79b95344ee575ea43141424a1528'),
'wiki.multi.ro': ('wiki.multi.ro.npz',
'73180b3e382519004bf38ea7b86237aacbbe813a'),
'wiki.multi.ru': ('wiki.multi.ru.npz',
'3b2eb9163f35e90bf2ce1cd3c997b354d0c34f59'),
'wiki.multi.sk': ('wiki.multi.sk.npz',
'606a0c3ba9849070c6b6b8c22d920fdeed9a1385'),
'wiki.multi.sl': ('wiki.multi.sl.npz',
'3cfdab5043b8cfe1535cb6dbd4c9e68847ad5904'),
'wiki.multi.sv': ('wiki.multi.sv.npz',
'4f1494885b9a831e87cfa3c15f2204c4a73c0779'),
'wiki.multi.tr': ('wiki.multi.tr.npz',
'54f90d5ddb9a65538a41e37c5a67ed933a5e4885'),
'wiki.multi.uk': ('wiki.multi.uk.npz',
'500fd26b1d7a25b42458012e99f9f76642e0c787'),
'wiki.multi.vi': ('wiki.multi.vi.npz',
'3955809cceb300965c15f9372221417719bb0db8'),
'wiki.mus': ('wiki.mus.npz',
'a5f48934a3fa6eaf4929098046c93fc94dd6bcb6'),
'wiki.mwl': ('wiki.mwl.npz',
'8a5e2c272166f8a72c5694ca6c3104d5f49179ec'),
'wiki.my': ('wiki.my.npz',
'5e035aca16700d7d6695af8a6d3a88ac847aaeb7'),
'wiki.myv': ('wiki.myv.npz',
'd4cfaab70c640033e02c0fc0c5a3615ae836c569'),
'wiki.mzn': ('wiki.mzn.npz',
'ad09ac584ae455b5862b95125ef409360ae18445'),
'wiki.nah': ('wiki.nah.npz',
'2dc454ef37d059f2053af46cfa1f4f0ca939cba0'),
'wiki.na': ('wiki.na.npz',
'401f0f880eb7aa78d21348bc1e0a3953b3e81bf0'),
'wiki.nap': ('wiki.nap.npz',
'996da46aeeab5644ba766d00c5e343b1553361d7'),
'wiki.nds_nl': ('wiki.nds_nl.npz',
'5a9307e16b13a5a82ec19a52b33254537e7198e7'),
'wiki.nds': ('wiki.nds.npz',
'b249a87c78c52becf51e7b50aaf9f9b6a36585f1'),
'wiki.ne': ('wiki.ne.npz',
'a601db2647a74ffd2b4b43dcb8584735f555459c'),
'wiki.new': ('wiki.new.npz',
'c398a3775aba9c68ce765cfdfb6b188f7c47e4c6'),
'wiki-news-300d-1M': ('wiki-news-300d-1M.npz',
'0a03bbd508e5381e140476140fb121afeb0050ed'),
'wiki-news-300d-1M-subword': ('wiki-news-300d-1M-subword.npz',
'69edae21375407781c727dcb9e534e79d712d137'),
'wiki.ng': ('wiki.ng.npz',
'befd774d15f69d43547e13e5ea3a97c4cb1ab405'),
'wiki.nl': ('wiki.nl.npz',
'5a7cb6f1dd0a7621202abba9461ac2c5bf905219'),
'wiki.nn': ('wiki.nn.npz',
'8e5059ddeb24050fadaa5cc4622b13feb3e4a226'),
'wiki.no': ('wiki.no.npz',
'5ce6e0f793e66f081652f64013968099de03d9f9'),
'wiki.nov': ('wiki.nov.npz',
'95ed23b4cfd7a65afa1c12c7dbdce6af53923d77'),
'wiki.vec': ('wiki.vec.npz',
'08ebb912efeb9df1c7d05e1af90484d210dff47e'),
'wiki.nrm': ('wiki.nrm.npz',
'e58614b4508ff9810f0b58fd818f973775bc918d'),
'wiki.nso': ('wiki.nso.npz',
'56a2ebe260241402d117cd89c5c872b9c96ff05b'),
'wiki.nv': ('wiki.nv.npz',
'c713051fe03ec1f60314bb42161b2a47fb5e169a'),
'wiki.ny': ('wiki.ny.npz',
'ba5a1725955cbc13e7fd93ab499f8085840c992c'),
'wiki.oc': ('wiki.oc.npz',
'259e7d994c38a4cfc140fb07016b82d6781e5027'),
'wiki.olo': ('wiki.olo.npz',
'0fea70f887def4779ee70a79366b88f1ada65004'),
'wiki.om': ('wiki.om.npz',
'47e2d756b5f8913085d901375c1b4e0b118a4221'),
'wiki.or': ('wiki.or.npz',
'7e274ab060219b019aa02bb97941cc6e162fd01f'),
'wiki.os': ('wiki.os.npz',
'19e8199cc2aaffdb07b6c558dbc5465ac6e03155'),
'wiki.pag': ('wiki.pag.npz',
'eddf4931547649026c02f893297ef673ec6158bb'),
'wiki.pam': ('wiki.pam.npz',
'40109aa174bd9f0fa657839bb548e2b0646c58d3'),
'wiki.pa': ('wiki.pa.npz',
'8a5870717e9e641b1f757f13259171698118de2e'),
'wiki.pap': ('wiki.pap.npz',
'999c8e5b005ca20d9998fbbe4fa79177f69e24c0'),
'wiki.pcd': ('wiki.pcd.npz',
'e975066b323a65cdc5e4c27138ef674d2cf7250b'),
'wiki.pdc': ('wiki.pdc.npz',
'5c770b9d56f276b0aa535845f175c05ee1cea615'),
'wiki.pfl': ('wiki.pfl.npz',
'0063d0b633ee529a75482b36ed4f4da7d64994ec'),
'wiki.pih': ('wiki.pih.npz',
'ce1d76c94d248545eea0d7436c54849dbb380bfc'),
'wiki.pi': ('wiki.pi.npz',
'c7d56c334bf529f8b3655693d207a80feaec4aed'),
'wiki.pl': ('wiki.pl.npz',
'0d612fdf871a1a4084c867f394940475be899443'),
'wiki.pms': ('wiki.pms.npz',
'ca149a2fb138011315bb6d5d61c7a5647e515e51'),
'wiki.pnb': ('wiki.pnb.npz',
'9ec82d02ad8894056c67991cf8ce927bcca74ee2'),
'wiki.pnt': ('wiki.pnt.npz',
'3f90123407bb8fc838a0a0d3700a14e15f5b26aa'),
'wiki.ps': ('wiki.ps.npz',
'7edebc02ac16f5fab83eb10b7d0fab821a9a4d43'),
'wiki.pt': ('wiki.pt.npz',
'f172fd801edd1ad9d319ba44146d40b5d682a473'),
'wiki.qu': ('wiki.qu.npz',
'68bec60ccfe1826c3b3a8968574488dbc74cdf7b'),
'wiki.rm': ('wiki.rm.npz',
'00fb191fc736ba60cb23e76169dfccde9a9daad0'),
'wiki.rmy': ('wiki.rmy.npz',
'c5e93cc37ff7293b9a1d9fe55c42d6fbde372b97'),
'wiki.rn': ('wiki.rn.npz',
'57b8e0d6999269be227af6ef2797a9cf8386ff1b'),
'wiki.roa_rup': ('wiki.roa_rup.npz',
'e06d6b5672a59bb9e83143bc8b28300d23c09546'),
'wiki.roa_tara': ('wiki.roa_tara.npz',
'c083105f40236dc3711f06c1b40e8ee7a714b99d'),
'wiki.ro': ('wiki.ro.npz',
'766bc0cb58a65b0b1763b9a0d90e91ab982eb20d'),
'wiki.rue': ('wiki.rue.npz',
'9a91fa093cd48d7d658d526b0ccda48dc59cd7f4'),
'wiki.ru': ('wiki.ru.npz',
'd59d099481c22d5592ab9635c9ee48060aa0bf45'),
'wiki.rw': ('wiki.rw.npz',
'e99ee87d249f6c157c5c97397d1025d798b85c69'),
'wiki.sah': ('wiki.sah.npz',
'85dae39097b29bc8e2b64f343a77794e4a62f91a'),
'wiki.sa': ('wiki.sa.npz',
'7d1928d7c67400045ac1b35a37a0e3089690d875'),
'wiki.scn': ('wiki.scn.npz',
'27d7b8050bbeed8ce196061c610216760b053c39'),
'wiki.sc': ('wiki.sc.npz',
'69c7b8be0f03a1bbd615695f93bdd78f96a58e16'),
'wiki.sco': ('wiki.sco.npz',
'4880282f59d3338b67fbff75359e2d24896e95bb'),
'wiki.sd': ('wiki.sd.npz',
'0ed8da4d27223db717a612cf0c88582351db6e19'),
'wiki.se': ('wiki.se.npz',
'0f4b2e060d5e29f96ca73aab29c967e79db69c17'),
'wiki.sg': ('wiki.sg.npz',
'a5e4edf34fe1a88b322da4c3922ec5a470e200c6'),
'wiki.sh': ('wiki.sh.npz',
'c13f1e94676bc939560193f7aa7ffd7d604707b3'),
'wiki.simple': ('wiki.simple.npz',
'352d0575e7d60b08e1dfce2c5de713906f0ed78f'),
'wiki.si': ('wiki.si.npz',
'204f9ffbe7770a9f56d3b2fb26999165015f5c33'),
'wiki.sk': ('wiki.sk.npz',
'7a9820b5a343b242660bf2595d1ecbf6e00a76d6'),
'wiki.sl': ('wiki.sl.npz',
'85f3186f26d6725317a64e290363a7251b928b81'),
'wiki.sm': ('wiki.sm.npz',
'9e13452cc4bff677f4f15db04f9d2f95f6ec054c'),
'wiki.sn': ('wiki.sn.npz',
'e8d5f7dcf51280c5f99bc3df849b4889a61e9fcd'),
'wiki.so': ('wiki.so.npz',
'0f5d71b95768b33fd939a870c15344c4478364a9'),
'wiki.sq': ('wiki.sq.npz',
'8b05826df8575e65c87a2fc0b7630cf644d4216d'),
'wiki.srn': ('wiki.srn.npz',
'2711396ef297ac5dde8904508bc002bdecbcc6f4'),
'wiki.sr': ('wiki.sr.npz',
'546edc8e29a5d2e99ed10eb4a552cbef2bb8f417'),
'wiki.ss': ('wiki.ss.npz',
'2e5911bad79bb5270a64f587e326d31c95ec58f3'),
'wiki.st': ('wiki.st.npz',
'23bc954719a2962e891f02efaea754c9ea025894'),
'wiki.stq': ('wiki.stq.npz',
'dd3ece0c0aa30e53ae0f4b558309bb60ab628652'),
'wiki.su': ('wiki.su.npz',
'7e48732e8a1fcf212e692924a4416a6ac3b3b055'),
'wiki.sv': ('wiki.sv.npz',
'b9ec52e9423688f195f3145c243226c0e0b51e83'),
'wiki.sw': ('wiki.sw.npz',
'5262f0c645322b10eca73f792a970f10b2719e55'),
'wiki.szl': ('wiki.szl.npz',
'fdd6d6b291cdbbcec5ff93451a588fdd103bb2d0'),
'wiki.ta': ('wiki.ta.npz',
'da7c5bc6e1142306ff2669bf1739832beb6c1763'),
'wiki.tcy': ('wiki.tcy.npz',
'baa49e1afa2bb0dcaaef0fac1ee75bbe711d1134'),
'wiki.te': ('wiki.te.npz',
'baf48767ce85e4d41d65d25f2bbf1c5f559ec18f'),
'wiki.tet': ('wiki.tet.npz',
'11e46a893af55344dbe102d530fdfea5d949d3bc'),
'wiki.tg': ('wiki.tg.npz',
'da66abb72ec9ccc602713161e544963d59cc51d7'),
'wiki.th': ('wiki.th.npz',
'25e54bf2d305779ec9baa5f344410bd75c7702fc'),
'wiki.ti': ('wiki.ti.npz',
'1faf98f3a0eafa7559a4b2a111f43dd1f7b9a05b'),
'wiki.tk': ('wiki.tk.npz',
'34c714fa8275fd6abfe86b2d144a043774552a6c'),
'wiki.tl': ('wiki.tl.npz',
'7d7f8a0485155bce7a74a1d778824375b0029f53'),
'wiki.tn': ('wiki.tn.npz',
'd0bc3a9b948753ac2283e5e10480c9fa0f6acb53'),
'wiki.to': ('wiki.to.npz',
'e982fc31bcfcf7339988d7aad21ce29ac9e84b0b'),
'wiki.tpi': ('wiki.tpi.npz',
'448cef043fa4b7f97825dbf8ee205ef05543bcac'),
'wiki.tr': ('wiki.tr.npz',
'c9830607a4c5134c6191006f1d80bae0ec798fe6'),
'wiki.ts': ('wiki.ts.npz',
'84a0598803712c8a713943447ddb73fc0f39af43'),
'wiki.tt': ('wiki.tt.npz',
'82c29df18f33e6284af3e977a6dda7e132a7a225'),
'wiki.tum': ('wiki.tum.npz',
'358990b894a3fb09d70674465952d828c9b0eda7'),
'wiki.tw': ('wiki.tw.npz',
'1e6d2838a4f271c1808795fb929cfcbf95094d93'),
'wiki.ty': ('wiki.ty.npz',
'e41ca5192d8cb515b3561c8d6935b150deb027b7'),
'wiki.tyv': ('wiki.tyv.npz',
'ce062ed32e854604714b65698ae290c99ba28060'),
'wiki.udm': ('wiki.udm.npz',
'9e1c5891ee0c5ac8f65fc457e1b42c7b2bfc8d37'),
'wiki.ug': ('wiki.ug.npz',
'656503e54063e200980e39f00fc011395bcd8551'),
'wiki.uk': ('wiki.uk.npz',
'352b7ee24d9fc6513fff4fe13bc04086c680834a'),
'wiki.ur': ('wiki.ur.npz',
'a81e55c7adfc2cef779ce9a01fe21319a7e4943b'),
'wiki.uz': ('wiki.uz.npz',
'd60d1e67bb8574dd71c18c88114aba674fc1eecb'),
'wiki.ve': ('wiki.ve.npz',
'5bfc3dbb3e47d23597df47ef12bd1c64ab8d3ea9'),
'wiki.vep': ('wiki.vep.npz',
'7a94355754fbe56802242c0bf9d7a27335095552'),
'wiki.vi': ('wiki.vi.npz',
'f118039eb16a4ca3347b6b171eac41113350a041'),
'wiki.vls': ('wiki.vls.npz',
'9a46a2fdc6448aa54f212081643745499ea7d05c'),
'wiki.vo': ('wiki.vo.npz',
'8e2f93c85ac608bcc4ae14093b9ff016061378fb'),
'wiki.wa': ('wiki.wa.npz',
'907074f7743d30cdbb2c48d0c8b4040796ea4164'),
'wiki.war': ('wiki.war.npz',
'928fb410c394b9c18d875326b6a3e750e2611e1b'),
'wiki.wo': ('wiki.wo.npz',
'7bb352be44f7261aa926f49b13e77df30f29312f'),
'wiki.wuu': ('wiki.wuu.npz',
'0d1dc7b05867ff2156a1180ad3da3b4697924e59'),
'wiki.xal': ('wiki.xal.npz',
'd87f4a131e086dc0bdc2a7e10406820c3c03b6a9'),
'wiki.xh': ('wiki.xh.npz',
'c64e1d2e77d1c744a628e2bd7353284616e48bea'),
'wiki.xmf': ('wiki.xmf.npz',
'160b9ee9773b9099aaf37ae9bdbc8a4a93b7f6ea'),
'wiki.yi': ('wiki.yi.npz',
'0662542cee29f3392fc905004ac6443b32c1477c'),
'wiki.yo': ('wiki.yo.npz',
'5d12d3b902a1fa19d8548295c3802c0608afa5c8'),
'wiki.za': ('wiki.za.npz',
'536348ff89df62e968739b567a1245bfd4112fbe'),
'wiki.zea': ('wiki.zea.npz',
'61fa192289a7c0f73ffa8035632a38b91c31c224'),
'wiki.zh_classical': ('wiki.zh_classical.npz',
'9acc9eaf8ebe316b945fb1f56ac71a2b7e024854'),
'wiki.zh_min_nan': ('wiki.zh_min_nan.npz',
'5d38bc025c82af578299d60f7df7b399de6ed81a'),
'wiki.zh': ('wiki.zh.npz',
'94007fcf3b105bf2c21b84a3a22bdb7946e74804'),
'wiki.zh_yue': ('wiki.zh_yue.npz',
'af6f0d94e6418d528d6cedd859e07e6e2fb416ab'),
'wiki.zu': ('wiki.zu.npz',
'fc9ce07d5d0c49a3c86cf1b26056ada58f9404ca')}
GOOGLEANALOGY_CATEGORIES = [
'capital-common-countries', 'capital-world', 'currency', 'city-in-state',
'family', 'gram1-adjective-to-adverb', 'gram2-opposite',
'gram3-comparative', 'gram4-superlative', 'gram5-present-participle',
'gram6-nationality-adjective', 'gram7-past-tense', 'gram8-plural',
'gram9-plural-verbs'
]
BATS_CHECKSUMS = \
{'BATS_3.0/1_Inflectional_morphology/I01 [noun - plural_reg].txt':
'cfcba2835edf81abf11b84defd2f4daa3ca0b0bf',
'BATS_3.0/1_Inflectional_morphology/I02 [noun - plural_irreg].txt':
'44dbc56432b79ff5ce2ef80b6840a8aa916524f9',
'BATS_3.0/1_Inflectional_morphology/I03 [adj - comparative].txt':
'dc530918e98b467b8102a7dab772a66d3db32a73',
'BATS_3.0/1_Inflectional_morphology/I04 [adj - superlative].txt':
'6c6fdfb6c733bc9b298d95013765163f42faf6fb',
'BATS_3.0/1_Inflectional_morphology/I05 [verb_inf - 3pSg].txt':
'39fa47ec7238ddb3f9818bc586f23f55b55418d8',
'BATS_3.0/1_Inflectional_morphology/I06 [verb_inf - Ving].txt':
'8fabeb9f5af6c3e7154a220b7034bbe5b900c36f',
'BATS_3.0/1_Inflectional_morphology/I07 [verb_inf - Ved].txt':
'aa04df95aa2edb436cbcc03c7b15bc492ece52d6',
'BATS_3.0/1_Inflectional_morphology/I08 [verb_Ving - 3pSg].txt':
'5f22d8121a5043ce76d3b6b53a49a7bb3fe33920',
'BATS_3.0/1_Inflectional_morphology/I09 [verb_Ving - Ved].txt':
'377777c1e793c638e72c010228156d01f916708e',
'BATS_3.0/1_Inflectional_morphology/I10 [verb_3pSg - Ved].txt':
'051c0c3c633e10900f827991dac14cf76da7f022',
'BATS_3.0/2_Derivational_morphology/D01 [noun+less_reg].txt':
'5d6839e9d34ee1e9fddb5bbf6516cf6420b85d8d',
'BATS_3.0/2_Derivational_morphology/D02 [un+adj_reg].txt':
'80b82227a0d5f7377f1e8cebe28c582bfeb1afb5',
'BATS_3.0/2_Derivational_morphology/D03 [adj+ly_reg].txt':
'223e120bd61b3116298a253f392654c15ad5a39a',
'BATS_3.0/2_Derivational_morphology/D04 [over+adj_reg].txt':
'a56f8685af489bcd09c36f864eba1657ce0a7c28',
'BATS_3.0/2_Derivational_morphology/D05 [adj+ness_reg].txt':
'5da99b1f1781ecfb4a1a7448c715abf07451917b',
'BATS_3.0/2_Derivational_morphology/D06 [re+verb_reg].txt':
'4c5e1796091fade503fbf0bfc2fae2c7f98b5dd2',
'BATS_3.0/2_Derivational_morphology/D07 [verb+able_reg].txt':
'a6218162bc257d98e875fc667c23edfac59e19fd',
'BATS_3.0/2_Derivational_morphology/D08 [verb+er_irreg].txt':
'9a4236c3bbc23903e101a42fb5ad6e15e552fadf',
'BATS_3.0/2_Derivational_morphology/D09 [verb+tion_irreg].txt':
'3ab0153926d5cf890cf08a4077da6d9946133874',
'BATS_3.0/2_Derivational_morphology/D10 [verb+ment_irreg].txt':
'2a012b87a9a60e128e064c5fe24b60f99e16ddce',
'BATS_3.0/3_Encyclopedic_semantics/E01 [country - capital].txt':
'9890315d3c4e6a38b8ae5fc441858564be3d3dc4',
'BATS_3.0/3_Encyclopedic_semantics/E02 [country - language].txt':
'ef08a00e8ff7802811ace8f00fabac41b5d03678',
'BATS_3.0/3_Encyclopedic_semantics/E03 [UK_city - county].txt':
'754957101c93a25b438785bd4458404cd9010259',
'BATS_3.0/3_Encyclopedic_semantics/E04 [name - nationality].txt':
'71a6562c34fb6154992a7c3e499375fcc3529c96',
'BATS_3.0/3_Encyclopedic_semantics/E05 [name - occupation].txt':
'a9a6f9f1af959aef83106f3dbd6bed16dfe9a3ea',
'BATS_3.0/3_Encyclopedic_semantics/E06 [animal - young].txt':
'12d5b51c7b76b9136eadc719abc8cf4806c67b73',
'BATS_3.0/3_Encyclopedic_semantics/E07 [animal - sound].txt':
'91991b007a35f45bd42bd7d0d465c6f8311df911',
'BATS_3.0/3_Encyclopedic_semantics/E08 [animal - shelter].txt':
'e5af11e216db392986ba0cbb597d861066c29adb',
'BATS_3.0/3_Encyclopedic_semantics/E09 [things - color].txt':
'd30b2eb2fc7a60f19afda7c54582e30f6fe28f51',
'BATS_3.0/3_Encyclopedic_semantics/E10 [male - female].txt':
'247a588671bc1da8f615e14076bd42573d24b4b3',
'BATS_3.0/4_Lexicographic_semantics/L01 [hypernyms - animals].txt':
'4b5c4dabe2c9c038fafee85d8d3958f1b1dec987',
'BATS_3.0/4_Lexicographic_semantics/L02 [hypernyms - misc].txt':
'83d5ecad78d9de28fd70347731c7ee5918ba43c9',
'BATS_3.0/4_Lexicographic_semantics/L03 [hyponyms - misc].txt':
'a8319856ae2f76b4d4c030ac7e899bb3a06a9a48',
'BATS_3.0/4_Lexicographic_semantics/L04 [meronyms - substance].txt':
'c081e1104e1b40725063f4b39d13d1ec12496bfd',
'BATS_3.0/4_Lexicographic_semantics/L05 [meronyms - member].txt':
'bcbf05f3be76cef990a74674a9999a0bb9790a07',
'BATS_3.0/4_Lexicographic_semantics/L06 [meronyms - part].txt':
'2f9bdcc74b881e1c54b391c9a6e7ea6243b3accc',
'BATS_3.0/4_Lexicographic_semantics/L07 [synonyms - intensity].txt':
'8fa287860b096bef004fe0f6557e4f686e3da81a',
'BATS_3.0/4_Lexicographic_semantics/L08 [synonyms - exact].txt':
'a17c591961bddefd97ae5df71f9d1559ce7900f4',
'BATS_3.0/4_Lexicographic_semantics/L09 [antonyms - gradable].txt':
'117fbb86504c192b33a5469f2f282e741d9c016d',
'BATS_3.0/4_Lexicographic_semantics/L10 [antonyms - binary].txt':
'3cde2f2c2a0606777b8d7d11d099f316416a7224'}
BATS_CATEGORIES = {
'I01': '[noun - plural_reg]',
'I02': '[noun - plural_irreg]',
'I03': '[adj - comparative]',
'I04': '[adj - superlative]',
'I05': '[verb_inf - 3pSg]',
'I06': '[verb_inf - Ving]',
'I07': '[verb_inf - Ved]',
'I08': '[verb_Ving - 3pSg]',
'I09': '[verb_Ving - Ved]',
'I10': '[verb_3pSg - Ved]',
'D01': '[noun+less_reg]',
'D02': '[un+adj_reg]',
'D03': '[adj+ly_reg]',
'D04': '[over+adj_reg]',
'D05': '[adj+ness_reg]',
'D06': '[re+verb_reg]',
'D07': '[verb+able_reg]',
'D08': '[verb+er_irreg]',
'D09': '[verb+tion_irreg]',
'D10': '[verb+ment_irreg]',
'E01': '[country - capital]',
'E02': '[country - language]',
'E03': '[UK_city - county]',
'E04': '[name - nationality]',
'E05': '[name - occupation]',
'E06': '[animal - young]',
'E07': '[animal - sound]',
'E08': '[animal - shelter]',
'E09': '[things - color]',
'E10': '[male - female]',
'L01': '[hypernyms - animals]',
'L02': '[hypernyms - misc]',
'L03': '[hyponyms - misc]',
'L04': '[meronyms - substance]',
'L05': '[meronyms - member]',
'L06': '[meronyms - part]',
'L07': '[synonyms - intensity]',
'L08': '[synonyms - exact]',
'L09': '[antonyms - gradable]',
'L10': '[antonyms - binary]'
}
SEMEVAL17_CHECKSUMS = \
{'SemEval17-Task2/README.txt':
'ad02d4c22fff8a39c9e89a92ba449ec78750af6b',
'SemEval17-Task2/task2-scorer.jar':
'145ef73ce955656d59e3b67b41f8152e8ee018d8',
'SemEval17-Task2/test/subtask1-monolingual/data/de.test.data.txt':
'6fc840f989d2274509549e472a68fb88dd2e149f',
'SemEval17-Task2/test/subtask1-monolingual/data/en.test.data.txt':
'05293fcbd80b2f4aad9b6518ce1a546ad8f61f33',
'SemEval17-Task2/test/subtask1-monolingual/data/es.test.data.txt':
'552904b5988f9951311290ca8fa0441dd4351d4b',
'SemEval17-Task2/test/subtask1-monolingual/data/fa.test.data.txt':
'29d5970feac5982961bd6ab621ba31f83d3bff77',
'SemEval17-Task2/test/subtask1-monolingual/data/it.test.data.txt':
'c95fe2be8fab37e9c70610117bdedc48a0a8e95c',
'SemEval17-Task2/test/subtask1-monolingual/keys/de.test.gold.txt':
'c51463460495a242cc726d41713c5e00b66fdd18',
'SemEval17-Task2/test/subtask1-monolingual/keys/en.test.gold.txt':
'2d2bb2ed41308cc60e7953cc9036f7dc89141b48',
'SemEval17-Task2/test/subtask1-monolingual/keys/es.test.gold.txt':
'a5842ff17fe3847d15414924826a8eb236018bcc',
'SemEval17-Task2/test/subtask1-monolingual/keys/fa.test.gold.txt':
'717bbe035d8ae2bad59416eb3dd4feb7238b97d4',
'SemEval17-Task2/test/subtask1-monolingual/keys/it.test.gold.txt':
'a342b950109c73afdc86a7829e17c1d8f7c482f0',
'SemEval17-Task2/test/subtask2-crosslingual/data/de-es.test.data.txt':
'ef92b1375762f68c700e050d214d3241ccde2319',
'SemEval17-Task2/test/subtask2-crosslingual/data/de-fa.test.data.txt':
'17aa103981f3193960309bb9b4cc151acaf8136c',
'SemEval17-Task2/test/subtask2-crosslingual/data/de-it.test.data.txt':
'eced15e8565689dd67605a82a782d19ee846222a',
'SemEval17-Task2/test/subtask2-crosslingual/data/en-de.test.data.txt':
'5cb69370a46385a7a3d37cdf2018744be77203a0',
'SemEval17-Task2/test/subtask2-crosslingual/data/en-es.test.data.txt':
'402f7fed52b60e915fb1be49f935395488cf7a7b',
'SemEval17-Task2/test/subtask2-crosslingual/data/en-fa.test.data.txt':
'9bdddbbde3da755f2a700bddfc3ed1cd9324ad48',
'SemEval17-Task2/test/subtask2-crosslingual/data/en-it.test.data.txt':
'd3b37aac79ca10311352309ef9b172f686ecbb80',
'SemEval17-Task2/test/subtask2-crosslingual/data/es-fa.test.data.txt':
'a2959aec346c26475a4a6ad4d950ee0545f2381e',
'SemEval17-Task2/test/subtask2-crosslingual/data/es-it.test.data.txt':
'ca627c30143d9f82a37a8776fabf2cee226dd35c',
'SemEval17-Task2/test/subtask2-crosslingual/data/it-fa.test.data.txt':
'a03d79a6ce7b798356b53b4e85dbe828247b97ef',
'SemEval17-Task2/test/subtask2-crosslingual/keys/de-es.test.gold.txt':
'7564130011d38daad582b83135010a2a58796df6',
'SemEval17-Task2/test/subtask2-crosslingual/keys/de-fa.test.gold.txt':
'c9e23c2e5e970e7f95550fbac3362d85b82cc569',
'SemEval17-Task2/test/subtask2-crosslingual/keys/de-it.test.gold.txt':
'b74cc2609b2bd2ceb5e076f504882a2e0a996a3c',
'SemEval17-Task2/test/subtask2-crosslingual/keys/en-de.test.gold.txt':
'428dfdad2a144642c13c24b845e6b7de6bf5f663',
'SemEval17-Task2/test/subtask2-crosslingual/keys/en-es.test.gold.txt':
'1dd7ab08a10552486299151cdd32ed19b56db682',
'SemEval17-Task2/test/subtask2-crosslingual/keys/en-fa.test.gold.txt':
'17451ac2165aa9b695dae9b1aba20eb8609fb400',
'SemEval17-Task2/test/subtask2-crosslingual/keys/en-it.test.gold.txt':
'5041c0b84a603ed85aa0a5cbe4b1c34f69a2fa7c',
'SemEval17-Task2/test/subtask2-crosslingual/keys/es-fa.test.gold.txt':
'8c09a219670dc32ab3864078bf0c28a287accabc',
'SemEval17-Task2/test/subtask2-crosslingual/keys/es-it.test.gold.txt':
'b1cdd13209354cc2fc2f4226c80aaa85558daf4a',
'SemEval17-Task2/test/subtask2-crosslingual/keys/it-fa.test.gold.txt':
'e0b560bb1d2db39ce45e841c8aad611734dc94f1',
'SemEval17-Task2/trial/subtask1-monolingual/data/de.trial.data.txt':
'dd071fd90f59bec8d271a447d86ee2e462941f52',
'SemEval17-Task2/trial/subtask1-monolingual/data/en.trial.data.txt':
'e8e5add0850b3dec07f102be26b8791a5e9bbbcf',
'SemEval17-Task2/trial/subtask1-monolingual/data/es.trial.data.txt':
'8956c78ff9ceae1d923a57816e55392c6a7dfc49',
'SemEval17-Task2/trial/subtask1-monolingual/data/fa.trial.data.txt':
'2f7c4247cde0d918b3508e90f6b49a1f5031c81b',
'SemEval17-Task2/trial/subtask1-monolingual/data/it.trial.data.txt':
'c11e0b5b55f94fc97c7b11fa455e71b071be879f',
'SemEval17-Task2/trial/subtask1-monolingual/keys/de.trial.gold.txt':
'ce5567b1accf3eb07da53229dfcb2a8a1dfac380',
'SemEval17-Task2/trial/subtask1-monolingual/keys/en.trial.gold.txt':
'693cb5928e807c79e39136dc0981dadca7832ae6',
'SemEval17-Task2/trial/subtask1-monolingual/keys/es.trial.gold.txt':
'8241ca66bf5ba55f77607e9bcfae8e34902715d8',
'SemEval17-Task2/trial/subtask1-monolingual/keys/fa.trial.gold.txt':
'd30701a93c8c5500b82ac2334ed8410f9a23864b',
'SemEval17-Task2/trial/subtask1-monolingual/keys/it.trial.gold.txt':
'bad225573e1216ba8b35429e9fa520a20e8ce031',
'SemEval17-Task2/trial/subtask1-monolingual/output/de.trial.sample.output.txt':
'f85cba9f6690d61736623c16e620826b09384aa5',
'SemEval17-Task2/trial/subtask1-monolingual/output/en.trial.sample.output.txt':
'f85cba9f6690d61736623c16e620826b09384aa5',
'SemEval17-Task2/trial/subtask1-monolingual/output/es.trial.sample.output.txt':
'f85cba9f6690d61736623c16e620826b09384aa5',
'SemEval17-Task2/trial/subtask1-monolingual/output/fa.trial.sample.output.txt':
'f85cba9f6690d61736623c16e620826b09384aa5',
'SemEval17-Task2/trial/subtask1-monolingual/output/it.trial.sample.output.txt':
'f85cba9f6690d61736623c16e620826b09384aa5',
'SemEval17-Task2/trial/subtask2-crosslingual/data/de-es.trial.data.txt':
'c27c8977d8d4434fdc3e59a7b0121d87e0a03237',
'SemEval17-Task2/trial/subtask2-crosslingual/data/de-fa.trial.data.txt':
'88a6f6dd1bba309f7cae7281405e37f442782983',
'SemEval17-Task2/trial/subtask2-crosslingual/data/de-it.trial.data.txt':
'ebdab0859f3b349fa0120fc8ab98be3394f0d73d',
'SemEval17-Task2/trial/subtask2-crosslingual/data/en-de.trial.data.txt':
'128d1a460fe9836b66f0fcdf59455b02edb9f258',
'SemEval17-Task2/trial/subtask2-crosslingual/data/en-es.trial.data.txt':
'508c5dde8ffcc32ee3009a0d020c7c96a338e1d1',
'SemEval17-Task2/trial/subtask2-crosslingual/data/en-fa.trial.data.txt':
'1a3640eb5facfe15b1e23a07183a2e62ed80c7d9',
'SemEval17-Task2/trial/subtask2-crosslingual/data/en-it.trial.data.txt':
'141c83d591b0292016583d9c23a2cc5514a006aa',
'SemEval17-Task2/trial/subtask2-crosslingual/data/es-fa.trial.data.txt':
'a0a548cd698c389ee80c34d6ec72abed5f1625e5',
'SemEval17-Task2/trial/subtask2-crosslingual/data/es-it.trial.data.txt':
'8d42bed8a43ff93d26ca95794758d9392ca707ed',
'SemEval17-Task2/trial/subtask2-crosslingual/data/it-fa.trial.data.txt':
'9c85223f1f734de61c28157df0ce417bb0537803',
'SemEval17-Task2/trial/subtask2-crosslingual/keys/de-es.trial.gold.txt':
'126c92b2fb3b8f2784dd4ae2a4c52b02a87a8196',
'SemEval17-Task2/trial/subtask2-crosslingual/keys/de-fa.trial.gold.txt':
'1db6201c2c8f19744c39dbde8bd4a803859d64c1',
'SemEval17-Task2/trial/subtask2-crosslingual/keys/de-it.trial.gold.txt':
'5300bf2ead163ff3981fb41ec5d0e291c287c9e0',
'SemEval17-Task2/trial/subtask2-crosslingual/keys/en-de.trial.gold.txt':
'd4f5205de929bb0c4020e1502a3f2204b5accd51',
'SemEval17-Task2/trial/subtask2-crosslingual/keys/en-es.trial.gold.txt':
'3237e11c3a0d9c0f5d583f8dc1d025b97a1f8bfe',
'SemEval17-Task2/trial/subtask2-crosslingual/keys/en-fa.trial.gold.txt':
'c14de7bf326907336a02d499c9b92ab229f3f4f8',
'SemEval17-Task2/trial/subtask2-crosslingual/keys/en-it.trial.gold.txt':
'3c0276c4b4e7a6d8a618bbe1ab0f30ad7b07929c',
'SemEval17-Task2/trial/subtask2-crosslingual/keys/es-fa.trial.gold.txt':
'359f69e9dfd6411a936baa3392b8f05c398a7707',
'SemEval17-Task2/trial/subtask2-crosslingual/keys/es-it.trial.gold.txt':
'44090607fabe5a26926a384e521ef1317f6f00d0',
'SemEval17-Task2/trial/subtask2-crosslingual/keys/it-fa.trial.gold.txt':
'97b09ffa11803023c2143fd4a4ac4bbc9775e645',
'SemEval17-Task2/trial/subtask2-crosslingual/output/de-es.trial.sample.output.txt':
'a0735361a692be357963959728dacef85ea08240',
'SemEval17-Task2/trial/subtask2-crosslingual/output/de-fa.trial.sample.output.txt':
'b71166d8615e921ee689cefc81419398d341167f',
'SemEval17-Task2/trial/subtask2-crosslingual/output/de-it.trial.sample.output.txt':
'b71166d8615e921ee689cefc81419398d341167f',
'SemEval17-Task2/trial/subtask2-crosslingual/output/en-de.trial.sample.output.txt':
'b71166d8615e921ee689cefc81419398d341167f',
'SemEval17-Task2/trial/subtask2-crosslingual/output/en-es.trial.sample.output.txt':
'b71166d8615e921ee689cefc81419398d341167f',
'SemEval17-Task2/trial/subtask2-crosslingual/output/en-fa.trial.sample.output.txt':
'a0735361a692be357963959728dacef85ea08240',
'SemEval17-Task2/trial/subtask2-crosslingual/output/en-it.trial.sample.output.txt':
'a0735361a692be357963959728dacef85ea08240',
'SemEval17-Task2/trial/subtask2-crosslingual/output/es-fa.trial.sample.output.txt':
'b71166d8615e921ee689cefc81419398d341167f',
'SemEval17-Task2/trial/subtask2-crosslingual/output/es-it.trial.sample.output.txt':
'b71166d8615e921ee689cefc81419398d341167f',
'SemEval17-Task2/trial/subtask2-crosslingual/output/it-fa.trial.sample.output.txt':
'a0735361a692be357963959728dacef85ea08240'}
UD21_DATA_FILE_SHA1 = \
{'af': {'dev': ('af-ud-dev.conllu',
'e37b104f4425ee00afc81779201816d5ac525194'),
'test': ('af-ud-test.conllu',
'd2bf02370d308ee957c04242bd0871db0e488389'),
'train': ('af-ud-train.conllu',
'a652c7b19c236063d3ea489947f83095893b699a')},
'grc_proiel': {'dev': ('grc_proiel-ud-dev.conllu',
'd199530c7e40ff0214e510957bb126af0dc12c1c'),
'test': ('grc_proiel-ud-test.conllu',
'bb7825ddeb18fc2d86638e4725f04563f3e08aab'),
'train': ('grc_proiel-ud-train.conllu',
'fe6c861299b033abe8c4ce2b6131cd74f87b96a7')},
'grc': {'dev': ('grc-ud-dev.conllu',
'debdfec0272cd558ccd29fe0ae2f13175dd20a33'),
'test': ('grc-ud-test.conllu',
'f19accf31db95e2c736d716d3438c09aa877eb07'),
'train': ('grc-ud-train.conllu',
'e98d3eabea67787c5d43a498f5a0fa4246f38104')},
'ar_nyuad': {'dev': ('ar_nyuad-ud-dev.conllu',
'b740de9bd68e68b30b9b313eb050d44e94470ca5'),
'test': ('ar_nyuad-ud-test.conllu',
'f5d5b8979b7fedd76235d4bae77e0b4a7b0a750a'),
'train': ('ar_nyuad-ud-train.conllu',
'd065f03958fd8782a7431b6778c6665ad09444a6')},
'ar_pud': {'test': ('ar_pud-ud-test.conllu',
'2161701e6726b6feb14733a312fba6160b9eb722')},
'ar': {'dev': ('ar-ud-dev.conllu',
'5f8964974d5ba5eb3504cdafb93c34c473c4177c'),
'test': ('ar-ud-test.conllu',
'58df161047f310cc3bb4d0e615ca33466e630bb9'),
'train': ('ar-ud-train.conllu',
'0a3d5cefa1fecd6a74f2016ee73ea7a7a02eb359')},
'eu': {'dev': ('eu-ud-dev.conllu',
'3ee15b5ed46ec93d7278c8cc0351d242417d553d'),
'test': ('eu-ud-test.conllu',
'aa68d6442ac6dc1abedc19c1b98c4a9944786188'),
'train': ('eu-ud-train.conllu',
'd56ec997916e38ee6ab1badd78c119e81e4797c9')},
'be': {'dev': ('be-ud-dev.conllu',
'015473e91cf8937c46e8b721f206415abac16a35'),
'test': ('be-ud-test.conllu',
'f009ea1885f54cfd77fca8a2c89133b2af8f9f5e'),
'train': ('be-ud-train.conllu',
'26b871e28d2f356a709f106b6e3e86b417ba74e7')},
'bg': {'dev': ('bg-ud-dev.conllu',
'0a2284b10547681eb65691eb2a9f0f1662e16e90'),
'test': ('bg-ud-test.conllu',
'75ea2a5e1d55bb57efecae6ec2b5ac3cc1b37e57'),
'train': ('bg-ud-train.conllu',
'd4b2fa267010c4486885c91f3af65ff66c8be94c')},
'bxr': {'sample': ('bxr-ud-sample.conllu',
'9239bdd251a60820c71111ec54de9e7d58a8579d'),
'test': ('bxr-ud-test.conllu',
'0a06e527454ae0b547153222f67eb5db94e528fd')},
'yue': {'test': ('yue-ud-test.conllu',
'd91477c65aa75cd45489cca13f7a122066972bdb')},
'ca': {'dev': ('ca-ud-dev.conllu',
'5737824f0afff0d07a43db331f102d62c6da2d96'),
'test': ('ca-ud-test.conllu',
'0e28bd2a3b982515c1158194ad52bcbbe741e170'),
'train': ('ca-ud-train.conllu',
'b5ff2392722d4a1df3bfc52fa5b8f2043b7aec0c')},
'zh_cfl': {'test': ('zh_cfl-ud-test.conllu',
'32fe45cd0e4e11ced95202971bce74acbc6a8c30')},
'zh_hk': {'test': ('zh_hk-ud-test.conllu',
'4c75fa5bbcdcb181447b4e037224d50feb2776fb')},
'zh_pud': {'test': ('zh_pud-ud-test.conllu',
'b3e448884b7b6229379f9723b97c6e9a6fedcb61')},
'zh': {'dev': ('zh-ud-dev.conllu',
'34d8253b35ad2245d59ddffa71b5689ef267b6b2'),
'test': ('zh-ud-test.conllu',
'0f00516097650c12262298dd0fbd1b17a6d2bfe2'),
'train': ('zh-ud-train.conllu',
'9444eec5f4561f289ad140e47e49013689512a65')},
'cop': {'dev': ('cop-ud-dev.conllu',
'863d1004df1a92df52515105f6fae6ff68539595'),
'test': ('cop-ud-test.conllu',
'd3b33566679f071d4ad622ad840cd98381835706'),
'train': ('cop-ud-train.conllu',
'33d0e5de5d6077f7c52a4cd90bce0047f3e9ff6f')},
'hr': {'dev': ('hr-ud-dev.conllu',
'8da2a419980807d2e91e09b6bf496e58d442b0ba'),
'test': ('hr-ud-test.conllu',
'49d673cba3d32d39d413e557276a45a0214ed83e'),
'train': ('hr-ud-train.conllu',
'e5cc686bb46c80c84c3ac60ed459e1f124c04c08')},
'cs_cac': {'dev': ('cs_cac-ud-dev.conllu',
'69dfed28c29146b41a3428f4715bde70a6aecf00'),
'test': ('cs_cac-ud-test.conllu',
'a994b33ebbde486c1818a9df460fb112055e95de'),
'train': ('cs_cac-ud-train.conllu',
'694f8559471dc481612606bf5df078daa094a84e')},
'cs_cltt': {'dev': ('cs_cltt-ud-dev.conllu',
'f35d5dbe57cd95760901ea29de4f493d5d2a44d4'),
'test': ('cs_cltt-ud-test.conllu',
'a8f6696785e658471f759bc736b738a105cba9a3'),
'train': ('cs_cltt-ud-train.conllu',
'ab97886066bfa462e5da03d25f802489292c0b56')},
'cs_fictree': {'dev': ('cs_fictree-ud-dev.conllu',
'dc67c07737a3a8bf2633068941f2d55f1500e192'),
'test': ('cs_fictree-ud-test.conllu',
'06becaedef1cfdb8e1b2dce3f0d3a3a607d178a4'),
'train': ('cs_fictree-ud-train.conllu',
'fe7dbe3a0e6ee73e19e788c43bbb8f8f47ae1645')},
'cs_pud': {'test': ('cs_pud-ud-test.conllu',
'9f205677041de694157ba2ef3e1eadb44d467f2f')},
'cs': {'dev': ('cs-ud-dev.conllu',
'd609e895b21b8710337e23a98b58ffd7b7a54bf1'),
'test': ('cs-ud-test.conllu',
'34091286a11b1ce2a9c8bcfa03fdd86fb0e13965'),
'train': ('cs-ud-train.conllu',
'd1f855798a29d433b580d01ade0d8d062cd58534')},
'da': {'dev': ('da-ud-dev.conllu',
'2c0c798c20a2efb30273172d388342a82bb0ce3c'),
'test': ('da-ud-test.conllu',
'85a95a8527f8773f1575ceaf0ab51f204b211047'),
'train': ('da-ud-train.conllu',
'b653c029a7ae5c106f865dcef949fb3fe2aa0420')},
'nl_lassysmall': {'dev': ('nl_lassysmall-ud-dev.conllu',
'2a169af74c2206c9073c3932b4a300492a314ee5'),
'test': ('nl_lassysmall-ud-test.conllu',
'39f08896a40ad370f2acc37d58689cdc43a660a9'),
'train': ('nl_lassysmall-ud-train.conllu',
'e4fd6bac246c81bb17a3c932e251b8662739cc19')},
'nl': {'dev': ('nl-ud-dev.conllu',
'33a9387eef9f5c0b15bd1e76e78776863f1f6d90'),
'test': ('nl-ud-test.conllu',
'01b3e1048792c851fdd59882c353fcdb76dc165e'),
'train': ('nl-ud-train.conllu',
'8e6a10152b7d09ce61433dd5f715ab2401611cf6')},
'en_lines': {'dev': ('en_lines-ud-dev.conllu',
'83b63b7670ea4394b558bc26e16a004339f0a0ef'),
'test': ('en_lines-ud-test.conllu',
'ccc9d3c71a873313d138c3adb12405a97eb270d8'),
'train': ('en_lines-ud-train.conllu',
'da42bfac9fd97d98ebbbc37c65d83ff4c53b4e79')},
'en_pud': {'test': ('en_pud-ud-test.conllu',
'4a9c83ba058a7e51979af790ba0440cc274b948f')},
'en_partut': {'dev': ('en_partut-ud-dev.conllu',
'863a6f571158acaaca95223e50bd08fc0c1134f0'),
'test': ('en_partut-ud-test.conllu',
'0c0780b0f14e4623f1014e6496d639cd2d2f6ffd'),
'train': ('en_partut-ud-train.conllu',
'e00a2d6f7efa28c8aaa40dccdf29b59a50f48e18')},
'en': {'dev': ('en-ud-dev.conllu',
'e2159dda4400d289ad8a403b466c8d23d733ba35'),
'test': ('en-ud-test.conllu',
'bd36ef23f76155625b379d063427bd62f19b7658'),
'train': ('en-ud-train.conllu',
'993c44f62104971fe2d056847349facbb7986258')},
'et': {'dev': ('et-ud-dev.conllu',
'312f9477f7ee1dd380c1fbcf77a6f0c63476fdbb'),
'test': ('et-ud-test.conllu',
'd70907f0771b41a27406672b9d91043a0954f946'),
'train': ('et-ud-train.conllu',
'b6d788e7a3362d0984d1cff06c1ba3d66f6bf773')},
'fi_ftb': {'dev': ('fi_ftb-ud-dev.conllu',
'552ec574acdb3209e7545af4e16a43a1e2956979'),
'test': ('fi_ftb-ud-test.conllu',
'13c34838a0fa9e379f9624ed1f4c368ca50a7d98'),
'train': ('fi_ftb-ud-train.conllu',
'73d025250bfc82a24181b5ed601dc4ae7c8e846c')},
'fi_pud': {'test': ('fi_pud-ud-test.conllu',
'4ab7b0d99ce6697d79732e401be97585a28c2afa')},
'fi': {'dev': ('fi-ud-dev.conllu',
'e023cf7eaffbda20bd4518d87fe9086207bb5361'),
'test': ('fi-ud-test.conllu',
'fd57c5106e43994250f4472890572bdbb8b4a48b'),
'train': ('fi-ud-train.conllu',
'ab27bda8cbb62886196b78de87985a4c6cf8215d')},
'fr_ftb': {'dev': ('fr_ftb-ud-dev.conllu',
'71b3cc02601f64711f98e33a6b2af10aa00700be'),
'test': ('fr_ftb-ud-test.conllu',
'723b8c44e74202a18b7e71268b738a5e1aa15f86'),
'train': ('fr_ftb-ud-train.conllu',
'9a347120478254647deb7c7e02871b28aad23ec4')},
'fr_pud': {'test': ('fr_pud-ud-test.conllu',
'570b7e31dc359ed62123bea6546efa13cfc2cf25')},
'fr_partut': {'dev': ('fr_partut-ud-dev.conllu',
'1505030048829a8dccc466cc86bca057996301ae'),
'test': ('fr_partut-ud-test.conllu',
'f6446317c9f82cc0b70a76be75282804a3359ac0'),
'train': ('fr_partut-ud-train.conllu',
'f87c246cfa91186b90c7780cb64783034f196622')},
'fr_sequoia': {'dev': ('fr_sequoia-ud-dev.conllu',
'859b10d80c7b3a382571cce9b2620039673539d1'),
'test': ('fr_sequoia-ud-test.conllu',
'be0ef69e392e64030414748da2995433f23e033d'),
'train': ('fr_sequoia-ud-train.conllu',
'48ac01913518888a32670a687123ed1bac57e0e9')},
'fr': {'dev': ('fr-ud-dev.conllu',
'5de0aee778bcc69d14285ada88f0ff7e5ac0a0cd'),
'test': ('fr-ud-test.conllu',
'd20a014acd38193155a33a5233c13f89541c78c3'),
'train': ('fr-ud-train.conllu',
'feee0cc85a2d7dcb3397399ef22c8af8ef75420b')},
'gl_treegal': {'dev': ('gl_treegal-ud-dev.conllu',
'272558614cff4a5e1f2805626904e6dc488b8d25'),
'test': ('gl_treegal-ud-test.conllu',
'18d99474d3aa9c83878c42a79d7881330dd9b861'),
'train': ('gl_treegal-ud-train.conllu',
'b1691dd5f587a19eb9dc6f141ecbd3eec3bb0e07')},
'gl': {'dev': ('gl-ud-dev.conllu',
'e72390dce9bf973442deef31ed0cd7a975361fe5'),
'test': ('gl-ud-test.conllu',
'7d82ba3672bd4427674428e1dcbcae4feebc3aeb'),
'train': ('gl-ud-train.conllu',
'd586e7bffa314f8c5b85288e060e68dddc1f5d33')},
'de_pud': {'test': ('de_pud-ud-test.conllu',
'2c91e42b7345145290b68385ff5270910048b8c4')},
'de': {'dev': ('de-ud-dev.conllu',
'9b4f49bfa2b609d54369890d9e7d8d24a3c229af'),
'test': ('de-ud-test.conllu',
'48f0f6f98b38710906481b5e9fe1d459d28f1b4a'),
'train': ('de-ud-train.conllu',
'04a1d6a6a2da9d9c38496118e0432c9a6720db64')},
'got': {'dev': ('got-ud-dev.conllu',
'501c47193ca2af5826e4afcc04941df87a7c47c3'),
'test': ('got-ud-test.conllu',
'cfcf16d562434987562bd1f5faa0d8c007e9ddb8'),
'train': ('got-ud-train.conllu',
'b4951ede89d947c6617df782ac248566235f78fb')},
'el': {'dev': ('el-ud-dev.conllu',
'9df0919ed6f9dcab3ba3f60f0ad31d0c79ae6cdb'),
'test': ('el-ud-test.conllu',
'1bb4a6b24521f0c3c7d6cf71e2456ef3a1ee31aa'),
'train': ('el-ud-train.conllu',
'32f4abc821624c4cd4d3b3b555c1558f06366e2c')},
'he': {'dev': ('he-ud-dev.conllu',
'c5b76874fcf11c7733e1555957bb49e8298af140'),
'test': ('he-ud-test.conllu',
'4fbe4115948250fc2e42dd43399d1c6c11ddcfd2'),
'train': ('he-ud-train.conllu',
'eae49a515b38d224b109138bf006a112e80a7caf')},
'hi_pud': {'test': ('hi_pud-ud-test.conllu',
'd237fecc594186e7a52ad33313ac52e927905d73')},
'hi': {'dev': ('hi-ud-dev.conllu',
'48b592bb1aa1cbc30d41d2913421cfd3f9d2c790'),
'test': ('hi-ud-test.conllu',
'004a7fdde368f32f9f230bc5e2cf4ce9e1d8f8d7'),
'train': ('hi-ud-train.conllu',
'9be8afb2cabda361817c55b3de6ebba2c3fef7e0')},
'hu': {'dev': ('hu-ud-dev.conllu',
'ec622e6bcf2a84b0b47eba0de01cf5768157a50e'),
'test': ('hu-ud-test.conllu',
'fd717d25add38c2fb2dc8e82e2f9e5b0b9f3c5b8'),
'train': ('hu-ud-train.conllu',
'e5486523a8bebe40d633ad8b4050be8a3d11c78a')},
'id': {'dev': ('id-ud-dev.conllu',
'7b181aa954a4f4b22b80a18e4f67cbf423e9c701'),
'test': ('id-ud-test.conllu',
'357ed8c216725760bf5be561ed6e918ce602b5ac'),
'train': ('id-ud-train.conllu',
'328ea588b75de55ef48373c2bf9983bca277d724')},
'ga': {'dev': ('ga-ud-dev.conllu',
'180a1a9dcfcec6528a559032c67e9a15693a039d'),
'test': ('ga-ud-test.conllu',
'b74a56372af3f68f089ea82ba858e5a82aae4e22'),
'train': ('ga-ud-train.conllu',
'40df0b12fbadae6e56c0a01da483d6c612d9450c')},
'it_pud': {'test': ('it_pud-ud-test.conllu',
'c7121c03dbdc7d27f89c6f6dd8f046b89233438e')},
'it_partut': {'dev': ('it_partut-ud-dev.conllu',
'0bb5dc0c0815212c9832eaef3b802cf885e0543b'),
'test': ('it_partut-ud-test.conllu',
'b5eccd3d9a94a2f96c8c3a6e4192a287ac563898'),
'train': ('it_partut-ud-train.conllu',
'784b18bf8d3b59d967d147075a3cb5b03fb28637')},
'it_postwita': {'dev': ('it_postwita-ud-dev.conllu',
'07f6f658246aa070e2166e688f7569d61aafff54'),
'test': ('it_postwita-ud-test.conllu',
'c2d58f50e51d37cb5f55bd0a3129138e95a72a8a'),
'train': ('it_postwita-ud-train.conllu',
'69684c47fba99230f6ef1a204b95c37d28eaa5a6')},
'it': {'dev': ('it-ud-dev.conllu',
'ea8fd59f36280fbd77b9a807959491636048a698'),
'test': ('it-ud-test.conllu',
'34839fdeeef883f8034c723a18772947106cec6b'),
'train': ('it-ud-train.conllu',
'a0cae413f46a344366f86bc7ffe4f5d7ecbf6a14')},
'ja_pud': {'test': ('ja_pud-ud-test.conllu',
'4c914016a0968ca434348370d38c9579a60e8fd7')},
'ja': {'dev': ('ja-ud-dev.conllu',
'21f06fef7fbeccd05a298385bf40f8b4ffe95146'),
'test': ('ja-ud-test.conllu',
'240d3532698356a7c6f93c3215718ef2f66a672f'),
'train': ('ja-ud-train.conllu',
'35eaf307d94c2006241fe08f745d7b1b17f049cf')},
'kk': {'dev': ('kk-ud-dev.conllu',
'038033c822b407040a4ecb87c077506cd0d1a322'),
'test': ('kk-ud-test.conllu',
'4124bcaa6e4fc132613d94a882abcff8ecad8ca0'),
'train': ('kk-ud-train.conllu',
'48d664d273ad6731cb65228ce9b57ad3cf50f7f5')},
'ko': {'dev': ('ko-ud-dev.conllu',
'60e7da7cca44c923873a062e80262726659f5528'),
'test': ('ko-ud-test.conllu',
'bc9a0fc4ddfed14b70bb58048bf8b8d50062cffd'),
'train': ('ko-ud-train.conllu',
'ee21328f9ea39668e802f0cb6a794358f5c256bf')},
'kmr': {'sample': ('kmr-ud-sample.conllu',
'd76d631400d17b63b9592ce3c0f4ecada012d6d0'),
'test': ('kmr-ud-test.conllu',
'606a338db2d6adde6b4d7d8c9ee2bdf1f988d729')},
'la_ittb': {'dev': ('la_ittb-ud-dev.conllu',
'd9f17992bd0258a734aea9b6c53759039717c86a'),
'test': ('la_ittb-ud-test.conllu',
'f4d097d076083240c48594d4cb058840ff16be8e'),
'train': ('la_ittb-ud-train.conllu',
'627d5b30b20655efab194c75fc9219b0aa2cf4b6')},
'la_proiel': {'dev': ('la_proiel-ud-dev.conllu',
'9a510ff1f29b507ce46d32c04eb8f02ec8bdb4fb'),
'test': ('la_proiel-ud-test.conllu',
'697dbeae38507856a4fafa8506dfc8db5e8e4054'),
'train': ('la_proiel-ud-train.conllu',
'5e57e0a83ed8dcdfcc892c2558249cb6bc02b37a')},
'la': {'dev': ('la-ud-dev.conllu',
'2748bb0479cb599e1a007d1d1634d5870b45549b'),
'test': ('la-ud-test.conllu',
'19c62c64ce41a650e9b55a345c61e7c0d994816e'),
'train': ('la-ud-train.conllu',
'183ce6f58b0305e5926161e29b9a6aacc424662c')},
'lv': {'dev': ('lv-ud-dev.conllu',
'6bf3843d92aeb5b4a5e3b457708ad0aca176fbd2'),
'test': ('lv-ud-test.conllu',
'9f7806a24656db0e859efe041a88926b220b8e28'),
'train': ('lv-ud-train.conllu',
'f1eeff608e8f27d92b683ae041591355198841eb')},
'lt': {'dev': ('lt-ud-dev.conllu',
'0b8dc19005571fa7b66d8302b797d51a241f128b'),
'test': ('lt-ud-test.conllu',
'def54d6caf97610eb4ca8c0179d661c8eab98951'),
'train': ('lt-ud-train.conllu',
'13fe42a3d21f17a5cad5aaf38692619c7713e177')},
'mr': {'dev': ('mr-ud-dev.conllu',
'abf7ac90a3696bb979e6ddc17cbc0fc761040b1b'),
'test': ('mr-ud-test.conllu',
'b70e2a135e69dc17474951bfd9c7cf3f203d4798'),
'train': ('mr-ud-train.conllu',
'24a1370184054a7f5af647997dca783d6c571242')},
'sme': {'sample': ('sme-ud-sample.conllu',
'8c456f06b363c4d273fc454a49505f783f00fe43'),
'test': ('sme-ud-test.conllu',
'6c2084f60d7f2d1468a0cb4f4a4b9669274b122e'),
'train': ('sme-ud-train.conllu',
'203eab4183fd585efe3fea7e6df493a6746b0a9f')},
'no_bokmaal': {'dev': ('no_bokmaal-ud-dev.conllu',
'3a1aa6646ee62c605a6e5a7b535434ce93d0581f'),
'test': ('no_bokmaal-ud-test.conllu',
'18336ef0e4877ae28eb7d6019afe05b5a53245d5'),
'train': ('no_bokmaal-ud-train.conllu',
'c6a1d75956dfb9376e568bf241b3ee5ebf3be3a5')},
'no_nynorsk': {'dev': ('no_nynorsk-ud-dev.conllu',
'5b95a070d11a61a23fc340ecbbbbb70f86884498'),
'test': ('no_nynorsk-ud-test.conllu',
'3eaab8e4af82de2333521e9be0954ffaf6b1440b'),
'train': ('no_nynorsk-ud-train.conllu',
'79319993097c30ddf28d4c1137b8662f4f35d17e')},
'no_nynorsklia': {'dev': ('no_nynorsklia-ud-dev.conllu',
'f3e3cc9b156784c12e7540b6e09a19963df8d7d9'),
'test': ('no_nynorsklia-ud-test.conllu',
'c43abf4ad0d9c1d844edb9ff0fdf8b00949c4a0b')},
'cu': {'dev': ('cu-ud-dev.conllu',
'0b67035ed5ca52aeefae443611232ed202fb990a'),
'test': ('cu-ud-test.conllu',
'0fed872a5a2480b601c67ebbecf8dcd680b6863b'),
'train': ('cu-ud-train.conllu',
'1c58f7322b96aa65e2b6bbeb5cb5226b46dc3ef0')},
'fa': {'dev': ('fa-ud-dev.conllu',
'098f97ff4c0a6a9dcaafe2c83908b1ff044b4446'),
'test': ('fa-ud-test.conllu',
'0024aa6bad5eceed2e36f77d88578304a5886a80'),
'train': ('fa-ud-train.conllu',
'1692f90f58fb1ed2faaa4e8c5d2d47a37c47082b')},
'pl': {'dev': ('pl-ud-dev.conllu',
'b7af7bee091feb0788eb9793a7102972006421dc'),
'test': ('pl-ud-test.conllu',
'e141e793ba35f8a08510ec1ce494099b5c800ca8'),
'train': ('pl-ud-train.conllu',
'f2227ba184a5030fc47b1aff732e04ae11b9ab94')},
'pt_br': {'dev': ('pt_br-ud-dev.conllu',
'8eedc77096a87fe8ab251100d460780e161e5397'),
'test': ('pt_br-ud-test.conllu',
'37a64e3acef107b62ab62ce478fc36ed112fb58f'),
'train': ('pt_br-ud-train.conllu',
'023cafcb6959d52298ad619f7838f26db9798aa9')},
'pt_pud': {'test': ('pt_pud-ud-test.conllu',
'4f7a98b59255ff58a1a423dda6f2cb7261dcea7d')},
'pt': {'dev': ('pt-ud-dev.conllu',
'2171b4ac2b0726c9dfae6adf394b76be927accab'),
'test': ('pt-ud-test.conllu',
'9e819a4592db42905806141d6fca3b7b20396ce3'),
'train': ('pt-ud-train.conllu',
'b5fbb6598d5cc53a0f7e699adeb4a61948a49b5c')},
'ro_nonstandard': {'test': ('ro_nonstandard-ud-test.conllu',
'300d53091412dc5700dc5cad0fd3e136f7c8cb11'),
'train': ('ro_nonstandard-ud-train.conllu',
'ed97f51129b63857627f838f68f41c9ef8541686')},
'ro': {'dev': ('ro-ud-dev.conllu',
'a320e29582e837fa48bbe0aab8e205cadfcb4a02'),
'test': ('ro-ud-test.conllu',
'0cfe4806a28ebdc02dc7ea58635d8b550c3a9d7b'),
'train': ('ro-ud-train.conllu',
'74beb2aa92d2fca50dbb1a4f716b936afb436ab9')},
'ru_pud': {'test': ('ru_pud-ud-test.conllu',
'bca81ce7aaf3cb8add98b19faecc1d8303901631')},
'ru_syntagrus': {'dev': ('ru_syntagrus-ud-dev.conllu',
'304c6ec7fb5060583af5f890384e3a480f8c3ad5'),
'test': ('ru_syntagrus-ud-test.conllu',
'c138e39b48dc1c66d106e68ee75c6fce28ef780c'),
'train': ('ru_syntagrus-ud-train.conllu',
'8fa56fa80845e4ad946189d1e7af228b5595e312')},
'ru': {'dev': ('ru-ud-dev.conllu',
'd3b11c0fd8a87bfb7ce9666a1888126ae5ddca90'),
'test': ('ru-ud-test.conllu',
'ae13bbf49e0d2fddae8ba2eeacd15a9a77c7bfff'),
'train': ('ru-ud-train.conllu',
'fd43e7323ad2e62a6924fc5b5d48e85c6ab5a430')},
'sa': {'test': ('sa-ud-test.conllu',
'fad3a03a6834884a092b1d326625c6f663e36636')},
'sr': {'dev': ('sr-ud-dev.conllu',
'dcb9a242986285e83512ddaa4b3ada07c4cea17a'),
'test': ('sr-ud-test.conllu',
'0f0c9e394c440bb2dd514bdd6873d3ffef13821b'),
'train': ('sr-ud-train.conllu',
'97ea9bfe4ac97011598fbb5ca20b5cbaf5093334')},
'sk': {'dev': ('sk-ud-dev.conllu',
'c84563c08922d60b0c765e9f9c22d9f6f2765ff9'),
'test': ('sk-ud-test.conllu',
'89af4581c5f9058809f48788eb635a92cda0603c'),
'train': ('sk-ud-train.conllu',
'89e108093bbf5619578955fdadfe200cefd8cf01')},
'sl_sst': {'dev': ('sl_sst-ud-dev.conllu',
'c65ae82123af95ec11f47262546b5ab2fc5735e5'),
'test': ('sl_sst-ud-test.conllu',
'144a0124c1181b49d0c542a4a6d4465e45545f3b'),
'train': ('sl_sst-ud-train.conllu',
'4cbb97d5c19cfb1d85cdd54a13e24de2343a4ac5')},
'sl': {'dev': ('sl-ud-dev.conllu',
'0078572c19574d32defeae9924176da2dd701ede'),
'test': ('sl-ud-test.conllu',
'616ace00e25df99be8dd49b7bf7c48f1093df96a'),
'train': ('sl-ud-train.conllu',
'1462ac69163b30cf1399527e95f686ebf91be2d3')},
'es_ancora': {'dev': ('es_ancora-ud-dev.conllu',
'94b00cc6449a1793b5ba1d9d5c1e4b34ad1cc7d5'),
'test': ('es_ancora-ud-test.conllu',
'8d7dc8d8441e1ca4b54708a5382ed61b48bf7920'),
'train': ('es_ancora-ud-train.conllu',
'95d5bf7ad33304f3440ffb014ac094c4967c303f')},
'es_pud': {'test': ('es_pud-ud-test.conllu',
'c2b17fce1da3bdd2a50d9dd7eca101db1d2907e0')},
'es': {'dev': ('es-ud-dev.conllu',
'4cdb828c492c6b7707af0ab6c7fbf734f770630a'),
'test': ('es-ud-test.conllu',
'afd1ae1b7eb73a91456c30acf388eef4faf4785a'),
'train': ('es-ud-train.conllu',
'5ce48b44ba1b3e748a40cb5bf893d3096518ecbc')},
'sv_lines': {'dev': ('sv_lines-ud-dev.conllu',
'15f1a04d960518fe7bfee23ce227fc7b78d4b755'),
'test': ('sv_lines-ud-test.conllu',
'843df4ea3ab4f551b1eaa661652a8d6489a81d41'),
'train': ('sv_lines-ud-train.conllu',
'16e3533bf174b36d728847a36a3600f16c63baa6')},
'sv_pud': {'test': ('sv_pud-ud-test.conllu',
'18dadac0c15468256b340835ebc0529facbe9b73')},
'sv': {'dev': ('sv-ud-dev.conllu',
'6d14e1aae5c9ae37c35481c44c04bf74a4233455'),
'test': ('sv-ud-test.conllu',
'7ead0f7b49508db0022c042195ac5925b611c5b7'),
'train': ('sv-ud-train.conllu',
'68affb85efde6ed017eab1e998e9666108559e04')},
'swl': {'dev': ('swl-ud-dev.conllu',
'828e0a08f12cabfa75f9dd2b53dba58606522a7c'),
'test': ('swl-ud-test.conllu',
'674f76631cf16172d67b795ff92dfbb297eb4930'),
'train': ('swl-ud-train.conllu',
'46b721f9cae2d5ba43f818dd487600b0ce76362a')},
'ta': {'dev': ('ta-ud-dev.conllu',
'4d01f555012ddc1976933d4d928e26470f71bfa1'),
'test': ('ta-ud-test.conllu',
'e8db8816a98d8b7e81188786db7c405979a7e3c3'),
'train': ('ta-ud-train.conllu',
'6753d8c7b1b016de39c087aab45056de6021c3ae')},
'te': {'dev': ('te-ud-dev.conllu',
'29f46355d767e54e8565f76a063c43e95ead0fca'),
'test': ('te-ud-test.conllu',
'50abe345d4ab5bae021cacd096266c57b00572b8'),
'train': ('te-ud-train.conllu',
'1794469abe09e7364cda0d9764cf515dcb4a61b6')},
'tr_pud': {'test': ('tr_pud-ud-test.conllu',
'aae839e2476a2f149c98e0274d245d07a50dafaa')},
'tr': {'dev': ('tr-ud-dev.conllu',
'421de4d8d0fbdda46750523bde72880414c134a3'),
'test': ('tr-ud-test.conllu',
'b175f136f6f0271c494a58a1846971c4a07cda27'),
'train': ('tr-ud-train.conllu',
'5aeaf25fc9e00c75e377983a0d0a642e4df6ae7d')},
'uk': {'dev': ('uk-ud-dev.conllu',
'0d3e3507edcd46a3eaa8c4702d0f5d84661a6d9d'),
'test': ('uk-ud-test.conllu',
'46c88fd623894fabdafb01a826016c215e4f65cc'),
'train': ('uk-ud-train.conllu',
'd06e0e2fa67c35a20517738bd728ac3b26d8eafe')},
'hsb': {'sample': ('hsb-ud-sample.conllu',
'148eddbb19b06115ea54e17a3fca58e99a85cbd9'),
'test': ('hsb-ud-test.conllu',
'3d319288b4c06395b2627980737131995949f770')},
'ur': {'dev': ('ur-ud-dev.conllu',
'dc41e72b5adeb92f308cdc8dfcbf71f84b4a5cf9'),
'test': ('ur-ud-test.conllu',
'af5da25be4c4ec1f2a222bc462b39ca4bbcc0eb0'),
'train': ('ur-ud-train.conllu',
'488d65b394d0de264be1221614c09e541f92f9de')},
'ug': {'dev': ('ug-ud-dev.conllu',
'a2e6cd7ef51ffd7c83de7c62fbad998f1020f857'),
'test': ('ug-ud-test.conllu',
'4877323d8dbfaa8ab862f0aa8e5484fdadb9ef43')},
'vi': {'dev': ('vi-ud-dev.conllu',
'1c733d3ea3e4cce00cb0aa4d599bcb3b0a6096a8'),
'test': ('vi-ud-test.conllu',
'1bb822e58f21aa5ccac15fe6c6742a42e8389d41'),
'train': ('vi-ud-train.conllu',
'ac86132afc061625740abd524c5cdf3d35ebbbc4')}}
|
6,165 | 2c181a33c84ce262404c192abdc515924a1916a9 | import numpy as np
import pandas as pd
import geopandas as gp
from sklearn.cluster import KMeans
import shapely
from descartes import PolygonPatch
# -- load the data
data = pd.read_csv('/scratch/share/gdobler/parqa/output/Tables/'
'ParkQualityScores/QualityArea_ZipCode_FiscalYears.csv')
zips = gp.GeoDataFrame.from_file('/scratch/share/gdobler/parqa/output/'
'ShapeData/ZIPCODE_Modified_Final.shp')
# -- prepare the data
cols = ['F2{0:03}'.format(i) for i in range(4,16)]
vals = data[cols].values
vals -=vals[:,np.newaxis].mean(-1)
vals /=vals[:,np.newaxis].std(-1)
# -- cluster
km = KMeans(n_clusters=5)
km.fit(vals)
# -- assign clusters to zips
zips['cluster'] = np.zeros(len(zips),dtype=int)-1
dzips = [i for i in data.ZIPCODE]
for ii in range(len(zips)):
tzip = int(zips.ZIPCODE[ii])
if tzip in dzips:
zips['cluster'][ii] = km.labels_[dzips.index(tzip)]
# -- assign color
zips['color'] = np.zeros(len(zips),dtype=str)
for tcluster in range(km.n_clusters):
print("tcluster = " + str(tcluster))
zips['color'][zips['cluster']==tcluster] = 'red'
zips['color'][zips['cluster']!=tcluster] = 'none'
# -- plot
close('all')
yrs = range(2004,2016)
fig, ax = plt.subplots(1,2,figsize=[10,5])
fig.set_facecolor('white')
ax[1].set_xlim([-74.26,-74.26+0.6])
ax[1].set_ylim([40.4,40.4+0.6])
ax[1].axis('off')
for ii in range(len(zips)):
geo = zips['geometry'][ii]
tzip = zips.ZIPCODE[ii]
if type(geo)==shapely.geometry.polygon.Polygon:
ax[1].add_patch(PolygonPatch(geo,fc=zips['color'][ii],
linewidth=0.2))
ax[0].plot(yrs,vals[km.labels_==tcluster].T,color='k',lw=0.1)
ax[0].plot(yrs,km.cluster_centers_[tcluster],color='indianred')
ax[0].set_title('Cluster {0}'.format(tcluster))
fig.canvas.draw()
fig.savefig('../Outputs/cluster_{0}_{1}.png'.format(tcluster,
km.n_clusters),
clobber=True)
|
6,166 | 888ec915d89f1fd8fd6465f1035f7c658af78596 | {% load code_generator_tags %}from rest_framework.serializers import ModelSerializer
{% from_module_import app.name|add:'.models' models %}{% comment %}
{% endcomment %}{% for model in models %}
class {{ model.name }}Serializer(ModelSerializer):
class Meta:
model = {{ model.name }}
depth = 1
fields = (
{% indent_items model.field_names 12 quote='simple' %}
)
read_only_fields = (){% comment %}
{% endcomment %}{% endfor %}
|
6,167 | 6903584b27c0720cebf42ed39968b18f0f67f796 | """ Url router for the federated search application
"""
from django.conf.urls import include
from django.urls import re_path
urlpatterns = [
re_path(r"^rest/", include("core_federated_search_app.rest.urls")),
]
|
6,168 | 93a47d6ba1f699d881f0d22c4775433e4a451890 | # -*- coding:utf-8 -*-
"""
逆波兰表达式,中缀表达式可以对应一棵二叉树,逆波兰表达式即该二叉树后续遍历的结果。
"""
def isOperator(c):
return c == '+' or c == '-' or c == '*' or c == '/'
def reversePolishNotation(p):
stack = list()
for cur in p:
if not isOperator(cur):
stack.append(cur)
else:
b = float(stack.pop())
a = float(stack.pop())
if cur == '+':
stack.append(a + b)
elif cur == '-':
stack.append(a - b)
elif cur == '*':
stack.append(a * b)
elif cur == '/':
stack.append(a / b)
return stack[-1]
if __name__ == '__main__':
p = ['2', '1', '+', '3', '*']
print reversePolishNotation(p) |
6,169 | 3f5096ef5677373a1e436f454109c7b7577c0205 | from IPython import display
display.Image("./image.png") |
6,170 | e5d704541acd0f68a7885d7323118e1552e064c9 | '''
You're playing casino dice game. You roll a die once. If you reroll, you earn the amount equal to the number on your second roll otherwise, you earn the amount equal to the number on your first roll.
Assuming you adopt a profit-maximizing strategy, what would be the expected amount of money you would win?
This question was asked in a data scientist interview at Tinder.
'''
import numpy as np
for threshold in range(1, 6):
rolls = np.random.randint(1, 7, size=10**7)
rerolls = np.random.randint(1, 7, size=10**7)
avg_roll = np.mean(np.where(rolls <= threshold, rerolls, rolls))
print(f'Rerolling all {threshold}s and below yields an average roll of {avg_roll}.')
|
6,171 | 28e5667db4a620ec627cd94154a024b4c8dbc5f7 | from nonebot_plugin_datastore import get_plugin_data
from sqlalchemy import UniqueConstraint
from sqlalchemy.orm import Mapped, MappedAsDataclass, mapped_column
Model = get_plugin_data().Model
class MorningGreeting(MappedAsDataclass, Model):
__table_args__ = (
UniqueConstraint(
"platform",
"bot_id",
"group_id",
"guild_id",
"channel_id",
name="unique_morning_greeting",
),
)
id: Mapped[int] = mapped_column(init=False, primary_key=True)
platform: Mapped[str]
bot_id: Mapped[str]
group_id: Mapped[str] = mapped_column(default="")
guild_id: Mapped[str] = mapped_column(default="")
channel_id: Mapped[str] = mapped_column(default="")
|
6,172 | b92497396e711d705760db547b43cc65beba6cfd | # Generated by Django 2.1.1 on 2019-11-20 12:34
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('sandbox_report', '0006_sandboxreportlink_sandboxreportval'),
]
operations = [
migrations.DeleteModel(
name='SandboxReportLink',
),
migrations.DeleteModel(
name='SandboxReportVal',
),
migrations.DeleteModel(
name='SandboxTask',
),
]
|
6,173 | bb02ba68eb6629dad364b5f015680e4126e655f3 | # *** Обработка исключений (исключительные события, искл. ситуации)***
# генерация исключения
a=100
b=0
# "деление на ноль" - пример ошибки (не рабочий)
# c=a/b
# решение - обработка исключений (отлов исключения)
# конструкция "try-except"
# try:
# c = a / b
# print("Все отлично")
# except:
# # тут должен быть код, который срабатывает при исключительных ситуациях
# # т.е. "запасной" код
# print("Что-то пошло не так")
# c=a/1
# # тут может быть код который выполняется после предыдущего блока
# print("Result: ", c)
# обработка множества исключений
# result=None
# try:
# var = int(input("Введите число, но не ноль: "))
# result = 50/var
# # обработка исключения конкретного типа (класса)
# except ZeroDivisionError: # в данном примере тип исключения - ZeroDivisionError
# print("Вы попытались поделить на ноль!")
# result=50/1
# except ValueError as val_error: # в данном примере тип исключения - ValueError,
# print(f"По-моему, Вы ввели не число. Инфо: {val_error}")
# result=0
# # обработка общего (базового) исключения - отлавливает все исключения
# except Exception as err:
# print(f"Что-то пошло не так: {err}")
# print("Result: ", result)
# конструкция "try-except-finally"
# try:
# var=int(input("Введите число: "))
# c = 100/var
# print("Полет нормальный!")
# except ZeroDivisionError:
# c=0
# print("Попытка деления на ноль")
# finally:
# # finally срабатывает в любом случае, даже если программа завершится аварийно
# # т.е. тут должна быть критически важная логика
# print("Критически важное действие")
# print("Result", c)
# конструкция "try-except-finally"
try:
var=int(input("Введите число: "))
c = 100/var
print("Полет нормальный!")
except ZeroDivisionError:
c=0
print("Попытка деления на ноль")
else:
#else срабатывает только тогда, когда нет исключений
print("Логика, которая выполняется только если нет исключений")
finally:
# finally срабатывает в любом случае, даже если программа завершится аварийно
# т.е. тут должна быть критически важная логика
print("Критически важное действие")
print("Result", c) |
6,174 | e41df44db92e2ef7f9c20a0f3052e1c8c28b76c7 | class Sala:
def __init__(self, sala):
self.Turmas = []
self.numero = sala
def add_turma(self, turma):
# do things
self.Turmas.append(turma)
def __str__(self):
return str(self.numero)
|
6,175 | 4905b820f33619a80a9915d0603bc39e0d0368d9 | # !/usr/bin/env python3
# -*- coding:utf-8 -*-
# @Time : 2021/05/08 20:06
# @Author : Yi
# @FileName: show_slices.py
import os
import pydicom
import glob
import shutil
import random
import numpy as np
import cv2
import skimage.io as io
from data_Parameter import parse_args
import matplotlib.pyplot as plt
def dir_create(path):
"""创造新的文件夹。
:param path: 文件夹路径
:return:
"""
if (os.path.exists(path)) and (os.listdir(path) != []):
shutil.rmtree(path)
os.makedirs(path)
if not os.path.exists(path):
os.makedirs(path)
def read_dicom(path):
"""读取一个病例所有的slices,并转成一个720*720*720的numpy.array.
:param path: 一个病例dcm路径
:return:
"""
print(os.path.basename(path))
pi = os.path.basename(path).split("_")[1]
dcm_size = len(glob.glob(path + "/*.dcm"))
dcms = [
path + "/E" + pi + "S101I%d.dcm" % dicom_slicei
for dicom_slicei in range(1, dcm_size + 1)
]
length = int(len(dcms))
print(length)
dcm_f = pydicom.read_file(dcms[0]).pixel_array
dcm_size = max(max(dcm_f.shape), 720)
# print(dcm_f.shape)
dcm_img = np.zeros((dcm_size, dcm_size, dcm_size), dtype=np.float32)
for dcmi in range(len(dcms)):
cdcm = pydicom.read_file(dcms[dcmi]).pixel_array.astype(np.float32)
cdcm -= np.mean(cdcm)
cdcm /= np.std(cdcm)
dcm_img[
dcm_size // 2 - cdcm.shape[0] // 2: dcm_size // 2 + cdcm.shape[0] // 2,
dcm_size // 2 - cdcm.shape[1] // 2: dcm_size // 2 + cdcm.shape[1] // 2,
dcmi,
] = cdcm
return dcm_img
def show_image(input_dir):
"""随机展示一个病例一些病理图像。
:param input_dir:
:return:
"""
# special cases: "P556", "P576", "P887",160*640*640
for casei in os.listdir(input_dir)[5:6]:
pi = casei.split("_")[1]
dcm_img = read_dicom(input_dir + "/" + casei)
print("Dcm shape: ", dcm_img.shape)
# choices = random.sample(list(np.arange(0, 720, 1)), 10)
# choices.append(316)
choices = range(330,350)
for i in choices:
fig = plt.figure(num=i, figsize=(10, 10))
ax = fig.add_subplot(111)
img=ax.imshow(dcm_img[:, :, i], cmap='gray')
ax.set_title(pi + '_' + str(i))
plt.colorbar(img)
plt.show()
def show_image_avail(input_dir):
"""随机展示一个位置的一些有标注的病例图像。
:param input_dir:
:return:
"""
choices = random.sample(os.listdir(input_dir), 15)
for file in choices:
image_numpy = np.load(input_dir + '/' + file)
fig = plt.figure(figsize=(10, 5))
ax1 = fig.add_subplot(111)
img1=ax1.imshow(image_numpy, cmap='gray')
ax1.set_title(str(file))
plt.colorbar(img1)
plt.show()
def show_mask(input_dir):
"""随机展示一个位置标注的mask,2个channels.
:param input_dir:
:return:
"""
index = 0
choices = random.sample(os.listdir(input_dir), 10)
for file in choices:
mask_numpy = np.load(input_dir + '/' + file)
fig = plt.figure(num=index, figsize=(10, 5))
ax1 = fig.add_subplot(211)
ax1.imshow(mask_numpy[:, :, 0], cmap='gray')
ax1.set_title(str(file) + '_outer')
ax2 = fig.add_subplot(212)
ax2.imshow(mask_numpy[:, :, 1], cmap='gray')
ax2.set_title(str(file) + '_luman')
plt.show()
index += 1
def show_mask_circle(input_dir):
"""随机展示一个位置标注的mask环。
:param input_dir:
:return:
"""
choices = random.sample(os.listdir(input_dir), 10)
for file in choices:
mask_numpy = np.load(input_dir + '/' + file)
fig = plt.figure(figsize=(10, 5))
ax1 = fig.add_subplot(111)
img1=ax1.imshow(mask_numpy[:, :], cmap='gray')
ax1.set_title(str(file) + '_circle')
plt.colorbar(img1)
plt.show()
def show_image_mask(image_path,mask_path):
"""随机展示一个位置的病例图像及其标注。
:param image_path:
:param mask_path:
:return:
"""
files_choice=random.sample(os.listdir(image_path),10)
for file_name in files_choice:
image_numpy=np.load(image_path+'/'+file_name)
mask_numpy =np.load(mask_path+'/'+file_name)
fig =plt.figure(figsize=(10,5))
ax1 =fig.add_subplot(211)
img1=ax1.imshow(image_numpy,cmap='gray')
ax1.set_title(str(file_name))
plt.colorbar(img1)
ax2=fig.add_subplot(212)
img2=ax2.imshow(mask_numpy,cmap='gray')
# ax2.set_title(str(file_name))
plt.colorbar(img2)
plt.show()
def main(args):
image_input_dir = args.datasets_path
# image_avail_dir = args.image_save_sep_position + '/ICAR/positive'
# image_avail_dir = args.image_save_sep_position + '/ICAR/negative'
# circle_mask_dir=args.circle_mask_save_sep+'/ICAR/positive'
circle_mask_dir = args.circle_mask_save_sep + '/ICAR/positive'
# show_image(image_input_dir) # 随机展示一些病例图像。
# show_image_avail(image_avail_dir)
show_mask_circle(circle_mask_dir)
# show_image_mask(image_avail_dir,circle_mask_dir)
if __name__ == '__main__':
args = parse_args()
main(args) |
6,176 | d869aa32cb9793ce11a5b6a782cc66c2dd0be309 | import numpy as np
import matplotlib.pyplot as plt
x_list = []
y_list = []
file1 = open("pos_data_x.txt", "r")
for line in file1:
#x_list.append(float(file1.readline(line)))
x_list.append(float(line))
file2 = open("pos_data_y.txt", "r")
for line in file2:
#y_list.append(float(file1.readline(line)))
y_list.append(float(line))
file2.close
file1.close
desired_x = [0.0, 0.5, 0.5]
desired_y = [0.0, 0.0, 0.5]
desired_pos_x_list = [1.0, 1.0, 0.0, 0.0] #[0.5, 0.5, 0.0, 0.0]
desired_pos_y_list = [0.0, 0.7, 0.7, 0.0] #[0.0, 0.5, 0.5, 0.0]
plt.plot(x_list, y_list, label = 'robot trajectory')
#plt.plot(desired_x, desired_y, marker = 'x', label = 'desired position')
plt.plot(desired_pos_x_list, desired_pos_y_list, marker = 'x', label = 'desired position')
plt.title("Robot trajectory based on the wheel encoders ")
plt.xlabel("x [m]")
plt.ylabel("y [m]")
#plt.axis("square")
plt.legend()
plt.show()
|
6,177 | fad2ad89e4d0f04fad61e27048397a5702870ca9 | import random
import datetime
import os
import time
import json
#
l_target_path = "E:/code/PYTHON_TRAINING/Training/Apr2020/BillingSystem/bills/"
while True:
l_store_id = random.randint(1, 4)
now = datetime.datetime.now()
l_bill_id = now.strftime("%Y%m%d%H%M%S")
# Generate Random Date
start_date = datetime.date(2000, 1, 1)
end_date = datetime.date(2020, 1, 1)
time_between_dates = end_date - start_date
days_between_dates = time_between_dates.days
random_number_of_days = random.randrange(days_between_dates)
l_date = start_date + datetime.timedelta(days=random_number_of_days)
l_bill_details = {}
for i in range(random.randint(1, 25)):
l_prod_id = random.randint(1,25)
l_qty = random.randint(1,20)
l_bill_details[l_prod_id] = l_qty
l_data = { "bill_id":l_bill_id
,"store_id":l_store_id
,"bill_date":l_date
,"bill_details":l_bill_details}
print(l_data) #json.dumps(l_data)
new_file = open(l_target_path + l_bill_id + ".json", "w")
new_file.write(str(l_data))
new_file.close()
time.sleep(3)
|
6,178 | d61024ecbd092852fc3396e6919d6d3c8aa554db | import json
import redis
redis_client = redis.StrictRedis(host="redis", port=6379, db=1, password="pAssw0rd")
def publish_data_on_redis(data, channel):
redis_client.publish(channel, json.dumps(data)) |
6,179 | 8be70543a7aa177d9ad48fb736228b1ffba5df16 | from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from interface_app.models import TestTask, TestCase
from interface_app.extend.task_run import run_cases
import os
import json
from interface_app.apps import TASK_PATH, RUN_TASK_FILE
"""
说明:接口任务文件,返回HTML页面
"""
# 获取任务列表
def task_manage(request):
testtasks = TestTask.objects.all()
if request.method == "GET":
return render(request, "task_manage.html", {
"type": "list",
"testtasks": testtasks,
})
else:
return HttpResponse("404")
# 创建任务
def add_task(request):
if request.method == "GET":
return render(request, "add_task.html", {
"type": "add",
})
else:
return HttpResponse("404")
# 运行任务
def run_task(request, tid):
if request.method == "GET":
task_obj = TestTask.objects.get(id=tid)
cases_list = task_obj.cases.split(",")
cases_list.pop(-1)
task_obj.status = 1 # 修改状态
task_obj.save()
print(cases_list)
# run_cases() #运行函数
all_cases_dict = {}
for case_id in cases_list:
case_obj = TestCase.objects.get(id=case_id)
case_dict = {
"url": case_obj.url,
"method": case_obj.req_method,
"type_": case_obj.req_type,
"header": case_obj.req_header,
"parameter": case_obj.req_parameter,
"assert_": case_obj.resp_assert
}
all_cases_dict[case_obj.id] = case_dict
print(all_cases_dict)
cases_str = json.dumps(all_cases_dict)
cases_data_file = TASK_PATH + "cases_data.json"
print(cases_data_file)
with open(cases_data_file, "w+") as f:
f.write(cases_str)
# 运行测试
os.system("python3 " + RUN_TASK_FILE)
return HttpResponseRedirect("/interface/task_manage")
else:
return HttpResponse("404")
# 如何去运行这些用例?--单元测试框架 + 数据驱动
# unittest + ddt
|
6,180 | 18e032b7ff7ae9d3f5fecc86f63d12f4da7b8067 | # 예시 입력값
board = [[0,0,0,0,0],[0,0,1,0,3],[0,2,5,0,1],[4,2,4,4,2],[3,5,1,3,1]]
moves = [1,5,3,5,1,2,1,4]
# 로직
resultList = []
count = 0
for nth in moves:
for i in range(len(board)):
selected = board[i][nth - 1]
if selected == 0:
continue
else:
# 인형을 resultList에 넣고
resultList.append(selected)
# resultList를 탐색하여 같은 인형이 있는지 보기
lenR = len(resultList)
if lenR > 1:
if resultList[lenR - 2] == resultList[lenR - 1]:
del resultList[lenR - 2:]
count += 2
# 뽑힌 인형은 board에서 사라짐
board[i][nth - 1] = 0
break
# print(resultList)
print(count)
|
6,181 | e22574b5c458c23c48915274656f95a375cdc0e6 |
i = 0
while i < 10:
print("Hello", 2 * i + 5)
i = i + 1 |
6,182 | 8c5815c1dd71b2ae887b1c9b1968176dfceea4f9 | from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from webdriver_manager.chrome import ChromeDriverManager
import time
import csv
options = Options()
# options.add_argument('--headless')
options.add_argument('--disable-gpu')
driver = webdriver.Chrome(ChromeDriverManager().install(), chrome_options=options)
from selenium.common.exceptions import NoSuchElementException
try:
driver.get("http://localhost:1667/")
#Cookie accept:
button_accept = driver.find_element_by_xpath('//*[@id="cookie-policy-panel"]/div/div[2]/button[2]').click()
from selenium.webdriver.common.action_chains import ActionChains
# Activate Sign in input field
login = driver.find_element_by_xpath('//*[@id="app"]/nav/div/ul/li[2]/a')
mousehover = driver.find_element_by_xpath('//*[@id="app"]/nav/div/ul/li[2]/a')
ActionChains(driver).move_to_element(mousehover).perform()
time.sleep(3)
actions = ActionChains(driver)
actions.click(login)
actions.perform()
# Fill input fields:
def fill_login(mail, pw):
email = driver.find_element_by_xpath('//*[@id="app"]//fieldset[1]/input')
password = driver.find_element_by_xpath('//*[@id="app"]//fieldset[2]/input')
button = driver.find_element_by_xpath('//*[@id="app"]//form/button')
email.send_keys(mail)
password.send_keys(pw)
button.click()
username="kiskacsa3"
fill_login("kiskacsa3@gmail.com", "Kiskacsa3$")
# Activate Log out:
time.sleep(3)
logout = driver.find_element_by_xpath('//*[@id="app"]/nav/div/ul/li[5]')
mousehover = driver.find_element_by_xpath('//*[@id="app"]/nav/div/ul/li[5]/a')
ActionChains(driver).move_to_element(mousehover).perform()
time.sleep(3)
actions = ActionChains(driver)
actions.click(logout)
actions.perform()
# Checking the disappered username:
if logout:
def test_element_does_not_exist(self):
with self.assertRaises(NoSuchElementException):
driver.find_element_by_xpath("log_out")
return("User panel disappered.")
finally:
pass
# driver.close() |
6,183 | ed5dd954dedb00bf645f9ca14b5ca9cd122b2adc | from .gunicorn import *
from .server_app import *
|
6,184 | 6e557c2b85031a0038afd6a9987e3417b926218f | import os
from setuptools import setup
from django_spaghetti import __version__
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-themes',
version=__version__,
packages=['django_themes'],
include_package_data=True,
license='MIT License',
description='Admin extensions to make theming django sites easier for end users of django sites',
long_description=README,
url='https://github.com/LegoStormtroopr/django-themes/',
author='Samuel Spencer',
author_email='sam@aristotlemetadata.com',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
# Replace these appropriately if you are stuck on Python 2.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
keywords='django themes',
install_requires=['django'], # I mean obviously you'll have django installed if you want to use this.
)
|
6,185 | be58a2e0dcdbcb3a3df0da87be29ce7ebcee7fe9 | class Process:
def __init__(self, id, at, bt):
self.id = id
self.at = at
self.bt = bt
self.wt = 0
self.ct = 0
self.st = 0
self.tat = 0
def fill(self, st):
print('Current process:', self.id)
self.st = st
self.ct = self.st + self.bt
self.tat = self.ct - self.at
self.wt = self.tat - self.bt
return self.ct
def print(self):
st = '\t'.join(map(str, [self.id, self.at, self.bt, self.ct, self.tat, self.wt]))
print(st)
@classmethod
def display(cls, process_list):
print('ID\tAT\tBT\tCT\tTAT\tWT')
for process in process_list:
process.print()
print('----------------------')
if __name__ == '__main__':
# n = int(input("Enter the number of processes: "))
# print("Enter the process and their details in the format ID AT BT")
l = [
[1, 5, 0],
[2, 3, 1],
[3, 8, 2],
[4, 6, 3],
]
n = len(l)
processes = []
for p in l:
processes.append(Process(*p))
# for i in range(n):
# processes.append(Process(random.randint(0, 10), random.randint(0, 10), random.randint(0, 10)))
# processes.append(Process(*[int(x.strip()) for x in input().split(' ')]))
Process.display(processes)
print('Sorting.')
processes.sort(key=lambda x: x.at)
Process.display(processes)
t = processes[0].at
for process in processes:
t = process.fill(max(t, process.at))
Process.display(processes)
Process.display(processes)
|
6,186 | e1228f5e17bae6632f8decd114f72723dbbce944 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from libtbx.program_template import ProgramTemplate
from mmtbx import pdbtools
from libtbx import Auto
import os
import mmtbx.pdbtools
from cctbx import uctbx
class Program(ProgramTemplate):
description = '''
phenix.pdbtools tools for PDB model manipulations.
Usage examples:
phenix.pdbtools model.pdb sites.shake=0.4
phenix.pdbtools model.cif remove="element H"
'''
datatypes = ['model', 'phil']
master_phil_str = """\
include scope mmtbx.pdbtools.master_params
output {
prefix = None
.type = str
suffix = _modified
.type = str
serial = None
.type = int
overwrite = True
.type = bool
}
# temporary GUI PHIL
include scope libtbx.phil.interface.tracking_params
gui
.help = "GUI-specific parameter required for output directory"
{
output_dir = None
.type = path
.style = output_dir
}
"""
def validate(self):
print('Validating inputs', file=self.logger)
self.data_manager.has_models(
raise_sorry = True,
expected_n = 1,
exact_count = True)
def run(self):
self.model = self.data_manager.get_model()
cs = self.model.crystal_symmetry()
if(cs is None or cs.is_empty() or cs.is_nonsense()):
print("Crystal symmetry undefined, creating fake P1 box.")
box_crystal_symmetry = \
uctbx.non_crystallographic_unit_cell_with_the_sites_in_its_center(
sites_cart = self.model.get_sites_cart(),
buffer_layer = 5).crystal_symmetry()
self.model.set_crystal_symmetry(crystal_symmetry = box_crystal_symmetry)
print('Performing manipulations', file=self.logger)
self.model = mmtbx.pdbtools.modify(
model = self.model,
params = self.params.modify,
log = self.logger).get_results().model
# Write output model file
input_file_name_base = os.path.basename(
self.data_manager.get_default_model_name())[:-4]
if( self.model.input_model_format_cif()): extension = ".cif"
elif(self.model.input_model_format_pdb()): extension = ".pdb"
if(self.params.output.prefix is not None):
output_file_name = self.params.output.prefix
if(self.params.output.suffix is not None):
output_file_name = output_file_name + self.params.output.suffix
else:
output_file_name = input_file_name_base + self.params.output.suffix
output_file_name = output_file_name + extension
ofn = self.get_default_output_filename(
prefix=output_file_name,
suffix=None,
serial=Auto)
print('Writing output model', file=self.logger)
output_cs=True
if(cs is None): output_cs = False
self.data_manager.write_model_file(self.model.model_as_str(
output_cs=output_cs), ofn)
self.result = ofn
def get_results(self):
return self.result
# So master_phil_str can be called
master_phil_str = Program.master_phil_str
|
6,187 | 5bfb7fc60ddf4f6ad6d89771eb0a8903b04da3d9 | '''
Import necessary libraries
'''
import re
import csv
import os
from urllib.request import urlopen, Request
from bs4 import BeautifulSoup as soup
'''
Function to request page html from given URL
'''
def page_html(requested_url):
try:
# define headers to be provided for request authentication
headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) '
'AppleWebKit/537.11 (KHTML, like Gecko) '
'Chrome/23.0.1271.64 Safari/537.11',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'}
# make request, read request object to get page html and return it.
request_obj = Request(url = requested_url, headers = headers)
opened_url = urlopen(request_obj)
page_html = opened_url.read()
opened_url.close()
return page_html
except Exception as e:
# print(e)
pass
'''
Function to acquire the maximum number of jobs (only applicable for the base/ first html)
'''
def max_num_jobs(page_html):
page_soup = soup(page_html, "html.parser")
max_ = page_soup.find("p", {"class": "jobsCount"})
return(max_.get_text())
'''
Function to return a list of job page links from a given html page
'''
def get_listing_links(page_html):
try:
# use of dictionary to make sure that there are no duplicates
obj_links = {}
id_temp_dict = {}
page_soup = soup(page_html, "html.parser")
#grab all divs with a class of result
results = page_soup.findAll("ul", {"class": "jlGrid hover"})
for result in results:
links = result.findAll('a')
for a in links:
formatted_link = "http://www.glassdoor.sg" + a['href']
id_temp = formatted_link[-10:]
if id_temp not in id_temp_dict.keys():
id_temp_dict[id_temp] = None
obj_links[formatted_link] = None
return list(obj_links.keys())
except Exception as e:
# print(e)
pass
'''
Function to return a dictionary of scrapped information from a single job page link
'''
def jobpage_scrape(extracted_link, page_html):
jobpage_info = {}
page_soup = soup(page_html, "html.parser")
try:
jobpage_info['job_link'] = extracted_link
except Exception as e:
# print(e)
jobpage_info['job_link'] = None
try:
job_title = page_soup.find("div", {"class": "jobViewJobTitleWrap"})
jobpage_info['job_title'] = job_title.get_text()
except Exception as e:
# print(e)
jobpage_info['job_title'] = None
try:
sum_col = page_soup.find("div", {"class": "summaryColumn"})
summary_column = sum_col.get_text()
summary_column = summary_column.replace("\xa0–\xa0", ' ')
jobpage_info['summary_column'] = summary_column
except Exception as e:
# print(e)
jobpage_info['summary_column'] = None
try:
j_d = page_soup.find("div", {"class": "jobDescriptionContent desc"})
job_desc = j_d.get_text()
pattern = '\n' + '{2,}'
job_desc = re.sub(pattern, '\n', job_desc)
job_desc = job_desc.replace('\n', " ")
jobpage_info['job_description'] = job_desc
except Exception as e:
# print(e)
jobpage_info['job_description'] = None
return jobpage_info
'''
Function to write a dictionary of scrapped information onto a csv file
'''
def write_to_file(jobpage_info):
with open('output.csv', 'a', newline='', encoding="utf-8") as f:
try:
writer = csv.writer(f)
writer.writerow(jobpage_info.values())
except Exception as e:
# print(e)
pass |
6,188 | 49679782ac696b3dc4f5038565f88304a44098e1 | #!/usr/bin/env python3
import json
import sys
import time
import zmq
log_file = "./mavlink-log.txt"
zmq_context = zmq.Context()
connect_to = sys.argv[1]
send_socket = zmq_context.socket(zmq.PUSH)
send_socket.connect(connect_to)
def get_first_timestamp(log_file):
with open(log_file) as f:
for line in f:
line_json = json.loads(line)
return line_json["timestamp"]
start_time_file = get_first_timestamp(log_file)
start_time_importer = time.time()
with open(log_file) as f:
for line in f:
line_json = json.loads(line)
importer_age = time.time() - start_time_importer
line_age = line_json["timestamp"] - start_time_file
sleep_time = line_age - importer_age
if sleep_time > 0:
#print(str(line_age)+" - "+str(importer_age))
#print(sleep_time)
time.sleep(sleep_time)
print(line_json)
send_socket.send_json(line_json)
|
6,189 | 52eec56f7f5da8356f61301994f846ef7769f73b | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import tempfile
from functools import partial
import numpy as np
import torch
from ax.benchmark.benchmark_problem import SimpleBenchmarkProblem
from ax.core.metric import Metric
from ax.core.runner import Runner
from ax.exceptions.storage import JSONDecodeError, JSONEncodeError
from ax.modelbridge.base import ModelBridge
from ax.modelbridge.registry import Models
from ax.storage.json_store.decoder import (
generation_strategy_from_json,
object_from_json,
)
from ax.storage.json_store.decoders import class_from_json
from ax.storage.json_store.encoder import object_to_json
from ax.storage.json_store.encoders import botorch_modular_to_dict
from ax.storage.json_store.load import load_experiment
from ax.storage.json_store.registry import CLASS_ENCODER_REGISTRY
from ax.storage.json_store.save import save_experiment
from ax.storage.metric_registry import register_metric
from ax.storage.runner_registry import register_runner
from ax.utils.common.testutils import TestCase
from ax.utils.measurement.synthetic_functions import ackley, branin, from_botorch
from ax.utils.testing.benchmark_stubs import (
get_branin_benchmark_problem,
get_branin_simple_benchmark_problem,
get_mult_simple_benchmark_problem,
get_sum_simple_benchmark_problem,
)
from ax.utils.testing.core_stubs import (
get_abandoned_arm,
get_acquisition_function_type,
get_acquisition_type,
get_arm,
get_augmented_branin_metric,
get_augmented_hartmann_metric,
get_batch_trial,
get_botorch_model,
get_botorch_model_with_default_acquisition_class,
get_branin_data,
get_branin_experiment,
get_branin_metric,
get_choice_parameter,
get_experiment_with_batch_and_single_trial,
get_experiment_with_data,
get_experiment_with_trial_with_ttl,
get_experiment_with_map_data_type,
get_factorial_metric,
get_fixed_parameter,
get_generator_run,
get_map_data,
get_hartmann_metric,
get_list_surrogate,
get_metric,
get_mll_type,
get_model_type,
get_multi_objective,
get_multi_objective_optimization_config,
get_multi_type_experiment,
get_objective,
get_objective_threshold,
get_optimization_config,
get_order_constraint,
get_outcome_constraint,
get_parameter_constraint,
get_percentile_early_stopping_strategy,
get_range_parameter,
get_scalarized_objective,
get_search_space,
get_simple_experiment_with_batch_trial,
get_sum_constraint1,
get_sum_constraint2,
get_surrogate,
get_synthetic_runner,
get_trial,
)
from ax.utils.testing.modeling_stubs import (
get_generation_strategy,
get_observation_features,
get_transform_type,
)
from botorch.test_functions.synthetic import Ackley
TEST_CASES = [
("AbandonedArm", get_abandoned_arm),
("Arm", get_arm),
("AugmentedBraninMetric", get_augmented_branin_metric),
("AugmentedHartmannMetric", get_augmented_hartmann_metric),
("BatchTrial", get_batch_trial),
("BenchmarkProblem", get_branin_benchmark_problem),
("BoTorchModel", get_botorch_model),
("BoTorchModel", get_botorch_model_with_default_acquisition_class),
("BraninMetric", get_branin_metric),
("ChoiceParameter", get_choice_parameter),
("Experiment", get_experiment_with_batch_and_single_trial),
("Experiment", get_experiment_with_trial_with_ttl),
("Experiment", get_experiment_with_data),
("Experiment", get_experiment_with_map_data_type),
("FactorialMetric", get_factorial_metric),
("FixedParameter", get_fixed_parameter),
("Hartmann6Metric", get_hartmann_metric),
("GenerationStrategy", partial(get_generation_strategy, with_experiment=True)),
("GeneratorRun", get_generator_run),
("ListSurrogate", get_list_surrogate),
("MapData", get_map_data),
("Metric", get_metric),
("MultiObjective", get_multi_objective),
("MultiObjectiveOptimizationConfig", get_multi_objective_optimization_config),
("MultiTypeExperiment", get_multi_type_experiment),
("ObservationFeatures", get_observation_features),
("Objective", get_objective),
("ObjectiveThreshold", get_objective_threshold),
("OptimizationConfig", get_optimization_config),
("OrderConstraint", get_order_constraint),
("OutcomeConstraint", get_outcome_constraint),
("PercentileEarlyStoppingStrategy", get_percentile_early_stopping_strategy),
("ParameterConstraint", get_parameter_constraint),
("RangeParameter", get_range_parameter),
("ScalarizedObjective", get_scalarized_objective),
("SearchSpace", get_search_space),
("SimpleBenchmarkProblem", get_mult_simple_benchmark_problem),
("SimpleBenchmarkProblem", get_branin_simple_benchmark_problem),
("SimpleBenchmarkProblem", get_sum_simple_benchmark_problem),
("SimpleExperiment", get_simple_experiment_with_batch_trial),
("SumConstraint", get_sum_constraint1),
("SumConstraint", get_sum_constraint2),
("Surrogate", get_surrogate),
("SyntheticRunner", get_synthetic_runner),
("Type[Acquisition]", get_acquisition_type),
("Type[AcquisitionFunction]", get_acquisition_function_type),
("Type[Model]", get_model_type),
("Type[MarginalLogLikelihood]", get_mll_type),
("Type[Transform]", get_transform_type),
("Trial", get_trial),
]
class JSONStoreTest(TestCase):
def setUp(self):
self.experiment = get_experiment_with_batch_and_single_trial()
def testJSONEncodeFailure(self):
self.assertRaises(JSONEncodeError, object_to_json, RuntimeError("foobar"))
def testJSONDecodeFailure(self):
self.assertRaises(JSONDecodeError, object_from_json, RuntimeError("foobar"))
self.assertRaises(JSONDecodeError, object_from_json, {"__type": "foobar"})
def testSaveAndLoad(self):
with tempfile.NamedTemporaryFile(mode="w+", delete=False, suffix=".json") as f:
save_experiment(self.experiment, f.name)
loaded_experiment = load_experiment(f.name)
self.assertEqual(loaded_experiment, self.experiment)
os.remove(f.name)
def testSaveValidation(self):
with self.assertRaises(ValueError):
save_experiment(self.experiment.trials[0], "test.json")
def testValidateFilename(self):
bad_filename = "test"
self.assertRaises(ValueError, save_experiment, self.experiment, bad_filename)
def testEncodeDecode(self):
for class_, fake_func in TEST_CASES:
# Can't load trials from JSON, because a batch needs an experiment
# in order to be initialized
if class_ == "BatchTrial" or class_ == "Trial":
continue
# Can't load parameter constraints from JSON, because they require
# a SearchSpace in order to be initialized
if class_ == "OrderConstraint" or class_ == "SumConstraint":
continue
original_object = fake_func()
json_object = object_to_json(original_object)
converted_object = object_from_json(json_object)
if class_ == "SimpleExperiment":
# Evaluation functions will be different, so need to do
# this so equality test passes
with self.assertRaises(RuntimeError):
converted_object.evaluation_function(parameterization={})
original_object.evaluation_function = None
converted_object.evaluation_function = None
self.assertEqual(
original_object,
converted_object,
msg=f"Error encoding/decoding {class_}.",
)
def testEncodeDecodeTorchTensor(self):
x = torch.tensor(
[[1.0, 2.0], [3.0, 4.0]], dtype=torch.float64, device=torch.device("cpu")
)
expected_json = {
"__type": "Tensor",
"value": [[1.0, 2.0], [3.0, 4.0]],
"dtype": {"__type": "torch_dtype", "value": "torch.float64"},
"device": {"__type": "torch_device", "value": "cpu"},
}
x_json = object_to_json(x)
self.assertEqual(expected_json, x_json)
x2 = object_from_json(x_json)
self.assertTrue(torch.equal(x, x2))
def testDecodeGenerationStrategy(self):
generation_strategy = get_generation_strategy()
experiment = get_branin_experiment()
gs_json = object_to_json(generation_strategy)
new_generation_strategy = generation_strategy_from_json(gs_json)
self.assertEqual(generation_strategy, new_generation_strategy)
self.assertGreater(len(new_generation_strategy._steps), 0)
self.assertIsInstance(new_generation_strategy._steps[0].model, Models)
# Model has not yet been initialized on this GS since it hasn't generated
# anything yet.
self.assertIsNone(new_generation_strategy.model)
# Check that we can encode and decode the generation strategy after
# it has generated some generator runs. Since we now need to `gen`,
# we remove the fake callable kwarg we added, since model does not
# expect it.
generation_strategy = get_generation_strategy(with_callable_model_kwarg=False)
gr = generation_strategy.gen(experiment)
gs_json = object_to_json(generation_strategy)
new_generation_strategy = generation_strategy_from_json(gs_json)
self.assertEqual(generation_strategy, new_generation_strategy)
self.assertIsInstance(new_generation_strategy._steps[0].model, Models)
# Since this GS has now generated one generator run, model should have
# been initialized and restored when decoding from JSON.
self.assertIsInstance(new_generation_strategy.model, ModelBridge)
# Check that we can encode and decode the generation strategy after
# it has generated some trials and been updated with some data.
generation_strategy = new_generation_strategy
experiment.new_trial(gr) # Add previously generated GR as trial.
# Make generation strategy aware of the trial's data via `gen`.
generation_strategy.gen(experiment, data=get_branin_data())
gs_json = object_to_json(generation_strategy)
new_generation_strategy = generation_strategy_from_json(gs_json)
self.assertEqual(generation_strategy, new_generation_strategy)
self.assertIsInstance(new_generation_strategy._steps[0].model, Models)
self.assertIsInstance(new_generation_strategy.model, ModelBridge)
def testEncodeDecodeNumpy(self):
arr = np.array([[1, 2, 3], [4, 5, 6]])
self.assertTrue(np.array_equal(arr, object_from_json(object_to_json(arr))))
def testEncodeDecodeSimpleBenchmarkProblem(self):
branin_problem = get_branin_simple_benchmark_problem()
sum_problem = get_sum_simple_benchmark_problem()
new_branin_problem = object_from_json(object_to_json(branin_problem))
new_sum_problem = object_from_json(object_to_json(sum_problem))
self.assertEqual(
branin_problem.f(1, 2), new_branin_problem.f(1, 2), branin(1, 2)
)
self.assertEqual(sum_problem.f([1, 2]), new_sum_problem.f([1, 2]), 3)
# Test using `from_botorch`.
ackley_problem = SimpleBenchmarkProblem(
f=from_botorch(Ackley()), noise_sd=0.0, minimize=True
)
new_ackley_problem = object_from_json(object_to_json(ackley_problem))
self.assertEqual(
ackley_problem.f(1, 2), new_ackley_problem.f(1, 2), ackley(1, 2)
)
def testRegistryAdditions(self):
class MyRunner(Runner):
def run():
pass
def staging_required():
return False
class MyMetric(Metric):
pass
register_metric(MyMetric)
register_runner(MyRunner)
experiment = get_experiment_with_batch_and_single_trial()
experiment.runner = MyRunner()
experiment.add_tracking_metric(MyMetric(name="my_metric"))
with tempfile.NamedTemporaryFile(mode="w+", delete=False, suffix=".json") as f:
save_experiment(experiment, f.name)
loaded_experiment = load_experiment(f.name)
self.assertEqual(loaded_experiment, experiment)
os.remove(f.name)
def testEncodeUnknownClassToDict(self):
# Cannot encode `UnknownClass` type because it is not registered in the
# CLASS_ENCODER_REGISTRY.
class UnknownClass:
def __init__(self):
pass
with self.assertRaisesRegex(
ValueError, "is a class. Add it to the CLASS_ENCODER_REGISTRY"
):
object_to_json(UnknownClass)
# `UnknownClass` type is registered in the CLASS_ENCODER_REGISTRY and uses the
# `botorch_modular_to_dict` encoder, but `UnknownClass` is not registered in
# the `botorch_modular_registry.py` file.
CLASS_ENCODER_REGISTRY[UnknownClass] = botorch_modular_to_dict
with self.assertRaisesRegex(
ValueError,
"does not have a corresponding parent class in CLASS_TO_REGISTRY",
):
object_to_json(UnknownClass)
def testDecodeUnknownClassFromJson(self):
with self.assertRaisesRegex(
ValueError,
"does not have a corresponding entry in CLASS_TO_REVERSE_REGISTRY",
):
class_from_json({"index": 0, "class": "unknown_path"})
|
6,190 | f6f1cd95e4aaa5e434c3cf3cff0d46b45fc7b830 | import re
import datetime
from django import forms
from django.utils.translation import ugettext as _
from vcg.util.forms import mobile_number_validation
from vcg.company_management.models import ConfigurationContact, ConfigurationLogo, ConfigurationHomepage, ConfigurationLocation
class ConfigurationContactForm(forms.ModelForm):
class Meta:
model = ConfigurationContact
def __init__(self, *args, **kwargs):
super(ConfigurationContactForm, self).__init__(*args, **kwargs)
self.fields['company'].widget.attrs['class'] = 'form-dropdownfield'
self.fields['name_of_institution'].widget.attrs['class'] = 'form-text'
self.fields['email_external'].widget.attrs['class'] = 'form-text'
self.fields['country_code_external'].widget.attrs['class'] = 'form-text-small'
self.fields['phone_number_external'].widget.attrs['class'] = 'form-text-phone'
self.fields['email_internal'].widget.attrs['class'] = 'form-text'
self.fields['country_code_internal'].widget.attrs['class'] = 'form-text-small'
self.fields['phone_number_internal'].widget.attrs['class'] = 'form-text-phone'
if 'instance' in kwargs:
self.id = kwargs['instance'].id
else:
self.id = ""
def clean(self):
phone_number_external = self.cleaned_data.get("phone_number_external")
country_code_external = self.cleaned_data.get("country_code_external")
phone_number_internal = self.cleaned_data.get("phone_number_internal")
country_code_internal = self.cleaned_data.get("country_code_internal")
if phone_number_external and not country_code_external:
raise forms.ValidationError(_('External Country code Field is required .'))
if country_code_external and not phone_number_external:
raise forms.ValidationError(_('External Phone Number Field is required .'))
if phone_number_internal and not country_code_internal:
raise forms.ValidationError(_('Internal Country code Field is required .'))
if country_code_internal and not phone_number_internal:
raise forms.ValidationError(_('Internal Phone Number Field is required .'))
return self.cleaned_data
def clean_name_of_institution(self):
name_of_institution = self.cleaned_data['name_of_institution']
if name_of_institution:
if len(name_of_institution) < 3:
raise forms.ValidationError(_('Enter minimum 3 characters.'))
elif re.match(r'^[\s]*$', name_of_institution):
raise forms.ValidationError(_("Enter a valid name."))
return name_of_institution
def clean_country_code_external(self):
country_code_external = self.cleaned_data['country_code_external']
if country_code_external:
if len(str(country_code_external)) > 5:
raise forms.ValidationError(_('maximum 5 characters.'))
return country_code_external
def clean_phone_number_external(self):
phone_number_external = self.cleaned_data['phone_number_external']
if phone_number_external:
phone_number_external = mobile_number_validation(phone_number_external)
if not phone_number_external:
raise forms.ValidationError(_("Enter a valid contact number"))
return phone_number_external
def clean_country_code_internal(self):
country_code_internal = self.cleaned_data['country_code_internal']
if country_code_internal:
if len(str(country_code_internal)) > 5:
raise forms.ValidationError(_('maximum 5 characters.'))
return country_code_internal
def clean_phone_number_internal(self):
phone_number_internal = self.cleaned_data['phone_number_internal']
if phone_number_internal:
phone_number_internal = mobile_number_validation(phone_number_internal)
if not phone_number_internal:
raise forms.ValidationError(_("Enter a valid contact number"))
return phone_number_internal
class ConfigurationLogoForm(forms.ModelForm):
class Meta:
model = ConfigurationLogo
def __init__(self, *args, **kwargs):
super(ConfigurationLogoForm, self).__init__(*args, **kwargs)
if 'instance' in kwargs:
self.id = kwargs['instance'].id
else:
self.id = ""
class ConfigurationHomepageForm(forms.ModelForm):
class Meta:
model = ConfigurationHomepage
def __init__(self, *args, **kwargs):
super(ConfigurationHomepageForm, self).__init__(*args, **kwargs)
self.fields['company'].widget.attrs['class'] = 'form-dropdownfield'
self.fields['header'].widget.attrs['class'] = 'form-text'
self.fields['introduction'].widget.attrs['class'] = 'form-textarea'
if 'instance' in kwargs:
self.id = kwargs['instance'].id
else:
self.id = ""
def clean_header(self):
header = self.cleaned_data['header']
if header:
if len(header) < 3:
raise forms.ValidationError(_('Enter minimum 3 characters.'))
elif re.match(r'^[\s]*$', header):
raise forms.ValidationError(_("Enter a valid name."))
return header
def clean_introduction(self):
introduction = self.cleaned_data['introduction']
if introduction:
if len(introduction) < 10:
raise forms.ValidationError(_('Enter minimum 10 characters.'))
elif re.match(r'^[\s]*$', introduction):
raise forms.ValidationError(_("Enter a valid address."))
return introduction
class ConfigurationLocationForm(forms.ModelForm):
class Meta:
model = ConfigurationLocation
def __init__(self, *args, **kwargs):
super(ConfigurationLocationForm, self).__init__(*args, **kwargs)
self.fields['company'].widget.attrs['class'] = 'form-dropdownfield'
self.fields['country'].widget.attrs['class'] = 'form-dropdownfield'
self.fields['continent'].widget.attrs['class'] = 'form-dropdownfield'
if 'instance' in kwargs:
self.id = kwargs['instance'].id
else:
self.id = "" |
6,191 | de7cd231aceb2700acb3ecafe36d1ba1f5c1643b | #!/usr/bin/python
import sys
import itertools as it
pop_list = []
#with open("/Users/dashazhernakova/Documents/Doby/GenomeRussia/ancientDNA/GR+Lazaridis.ind") as f:
with open(sys.argv[1]) as f:
[pop_list.append(l.strip().split("\t")[2]) for l in f if l.strip().split("\t")[2] not in pop_list]
triplets = it.combinations(pop_list, 3)
for a,b,c in triplets:
print a + "\t" + b + "\t" + c + "\tMbuti.DG"
|
6,192 | 16850d931eec0356f71317cc24461e006fbcd59c | start = input()
user_list = start.split()
if user_list[-1] == 'wolf':
print('Please go away and stop eating my sheep')
else:
user_list.reverse()
print(f'Oi! Sheep number {user_list.index("wolf,") }! You are about to be eaten by a wolf!')
|
6,193 | a2d2ffe5ed6a844341f7ad731357bb837cee4787 | import math
import random
from PILL import Image, ImageDraw
for i in range(1,1025):
pass
for j in range(1,1025):
pass
epipedo[i][j]
for i in range(1,21):
pass
im = Image.new("RGB", (512, 512), "white")
x=random.choice(1,1025)
y=random.choice(1,1025)
r=random.choice(10,51)
draw = ImageDraw.Draw(im)
draw.ellipse((x-r, y-r, x+r, y+r), fill=(255,255,0), outline ='red')
for j in range(1,4):#apothikeuw ta stoixeia tou kathe kuklou(kentro kai aktina)
pass
if j==1:
pass
kukloi[i][1]=x
if j==2:
pass
kukloi[i][2]=y
if j==3:
pass
kukloi[i][3]=r
for i in range(1,21):
pass
for k in range(i,20):#sugkrinw kathe kuklo me tous upoloipous xwris na epanalambanontai oi idioi elegxoi
pass
a=math.pow(kukloi[k+1][2]-kukloi[i][2], 2)
b=math.pow(kukloi[k+1][1]-kukloi[i][1], 2)
d=math.sqrt(a+b)
if math.fabs(kukloi[i][3]-kykloi[k+1][3])<d and d<kukloi[i][3]+kykloi[k+1][3]:
pass
temkuk=0#oi temonomenoi kukloi
temkuk=temkuk+1
print "temnontai",temkuk, "kukloi"# emfanizei tous temonomenous kuklous
im.show()#kai tin eikona
|
6,194 | f7174bf4e7612921e730ac87141c85654a2f2411 | from PyQt5.QtWidgets import QHeaderView, QWidget
from presenters.studyings_presenter import StudyingsPresenter
from view.q_objects_view import QObjectsView
class QStudyingsView(QObjectsView):
def __init__(self, parent):
QWidget.__init__(self, parent)
QObjectsView.__init__(self, parent)
self.set_presenter(StudyingsPresenter(view=self))
def init_table(self):
self.table.setColumnCount(3)
self.table.setHorizontalHeaderLabels(['Время начала', 'Число', 'Темы'])
self.table.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch) |
6,195 | 878937e19d6a48a0d44309efbac1d41c208ce849 | '''
This module is used for handling the button.
'''
import RPi.GPIO as GPIO
from aiy.voicehat import *
class Button:
status = bool() #status indicates whether it is supposed to be on or off.
LED_pin = 25 #Pin for the LED in the button in the Google AIY kit.
button_pin = 23#The button is handled through the Google AIY lib because that one might actually work.
def __init__(self):
self.status = True
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.LED_pin , GPIO.OUT)
self.status = get_button(self.button_pin)
def read_button(self):
self.status = GPIO.input(self.button_pin)
#Turns on the button light as prompted
def light(self, stat):
if (stat):
GPIO.output(self.LED_pin, 1)
else:
GPIO.output(self.LED_pin, 0)
def cleanup(self):
GPIO.cleanup() |
6,196 | 3ec0c20fb2dfed9930885885288cc5d47f4f5ee5 |
import xmlrpclib
import socket
import time
import math
import re
from roundup.exceptions import Reject
REVPAT = re.compile(r'(r[0-9]+\b|rev(ision)? [0-9]+\b)')
def extract_classinfo(db, klass, nodeid, newvalues):
if None == nodeid:
node = newvalues
content = newvalues['content']
else:
node = db.getnode(klass.classname, nodeid)
content = klass.get(nodeid, 'content')
if node.has_key('creation') or node.has_key('date'):
nodets = node.get('creation', node.get('date')).timestamp()
else:
nodets = time.time()
if node.has_key('author') or node.has_key('creator'):
authorid = node.get('author', node.get('creator'))
else:
authorid = db.getuid()
authorage = nodets - db.getnode('user', authorid)['creation'].timestamp()
tokens = ["klass:%s" % klass.classname,
"author:%s" % authorid,
"authorage:%d" % int(math.log(authorage)),
"hasrev:%s" % (REVPAT.search(content) is not None)]
return (content, tokens)
def check_spambayes(db, content, tokens):
try:
spambayes_uri = db.config.detectors['SPAMBAYES_URI']
except KeyError, e:
return (False, str(e))
try:
server = xmlrpclib.ServerProxy(spambayes_uri, verbose=False)
except IOError, e:
return (False, str(e))
try:
prob = server.score({'content':content}, tokens, {})
return (True, prob)
except (socket.error, xmlrpclib.Error), e:
return (False, str(e))
def check_spam(db, klass, nodeid, newvalues):
"""Auditor to score a website submission."""
if newvalues.has_key('spambayes_score'):
if not db.security.hasPermission('SB: May Classify', db.getuid()):
raise ValueError, "You don't have permission to spamclassify messages"
# Don't do anything if we're explicitly setting the score
return
if not newvalues.has_key('content'):
# No need to invoke spambayes if the content of the message
# is unchanged.
return
(content, tokens) = extract_classinfo(db, klass, nodeid, newvalues)
(success, other) = check_spambayes(db, content, tokens)
if success:
newvalues['spambayes_score'] = other
newvalues['spambayes_misclassified'] = False
else:
newvalues['spambayes_score'] = -1
newvalues['spambayes_misclassified'] = True
def init(database):
"""Initialize auditor."""
database.msg.audit('create', check_spam)
database.msg.audit('set', check_spam)
database.file.audit('create', check_spam)
database.file.audit('set', check_spam)
|
6,197 | ecbb64223b0d5aa478cf91e1fcafe45572eac1af | # Copyright 2021 TerminalWarlord under the terms of the MIT
# license found at https://github.com/TerminalWarlord/Subtitle-Downloader-Bot/blob/master/LICENSE
# Encoding = 'utf-8'
# Fork and Deploy, do not modify this repo and claim it yours
# For collaboration mail me at dev.jaybee@gmail.com
from pyrogram import Client, filters
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton
import shutil
import requests
import os
import glob
from bs4 import BeautifulSoup as bs
import time
from datetime import timedelta
from dotenv import load_dotenv
import zipfile
load_dotenv()
bot_token = os.environ.get('BOT_TOKEN')
api = int(os.environ.get('API_KEY'))
hash = os.environ.get('API_HASH')
workers = int(os.environ.get('WORKERS'))
app = Client("SubtitleDLbot", bot_token=bot_token, api_id=api, api_hash=hash, workers=workers)
cuttly = os.environ.get('CUTTLY_API')
timestarted = timedelta(seconds=int(time.time()))
@app.on_message(filters.command('start'))
def start(client,message):
kb = [[InlineKeyboardButton('🍿 Channel', url="https://telegram.me/MyTestBotZ"),InlineKeyboardButton('🍿 BotsList', url="https://t.me/mybotzlist")]]
reply_markup = InlineKeyboardMarkup(kb)
app.send_message(chat_id=message.from_user.id, text=f"Hello there, I am a __**Subtitle Downloader Bot**__.\nGive me a Movie/Series name and I will fetch it from __**Subscene**__.\n\n"
"__**Made with ♥️ by @OO7ROBot :**__",
parse_mode='md',
reply_markup=reply_markup)
@app.on_message(filters.command('help'))
def help(client,message):
url = [[InlineKeyboardButton(f"Channel❤️", url=f"https://t.me/MyTestBotZ")],
[InlineKeyboardButton(f"OtherBots🍿", url=f"https://t.me/mybotzlist")]]
reply_markup = InlineKeyboardMarkup(url)
message.reply_text(reply_to_message_id= message.message_id,text=f"Send me any Movie/Series name and I will -\n"
f"__ * Search for it on `Subscene.com`\n"
f" * Let you choose your preferable language.\n"
f" * Download the subtitle, unzip and upload in `.srt/.ass` format__", parse_mode='md', reply_markup=reply_markup)
@app.on_message(filters.command('uptime'))
def uptime(client, message):
timecheck = timedelta(seconds=int(time.time()))
uptime = timecheck - timestarted
app.send_message(chat_id=message.from_user.id, text=f"__**Uptime :**__ __{uptime}__",
parse_mode='md')
@app.on_message(filters.text)
def search(client, message):
query = message.text.replace(" ", "+")
data = {
'query' : query,
'l' : ''
}
res = requests.post('https://subscene.com/subtitles/searchbytitle', data=data)
soup = bs(res.text, 'html.parser')
results = soup.find('div', {'class': 'search-result'}).find_all('div', {'class': 'title'})
kb = []
i = 0
l = 0
for sub in results:
if l < 10:
sublink = sub.find('a').attrs['href'].split('/')[-1]
subtitlename = sub.find('a').text
if len(sublink)<64:
kb.append([InlineKeyboardButton(f"{subtitlename}", callback_data=f'LANG*{sublink}')])
i += 1
else:
pass
else:
pass
l += 1
if len(results) > i:
kb.append([InlineKeyboardButton(f"Next ⏭", callback_data=f'SRCNX*{i}*{query}')])
reply_markup = InlineKeyboardMarkup(kb)
app.send_message(chat_id=message.chat.id,
text=f"__Showing Result for **{query}**\n"
f"Choose your desired Movie/Series:__",
parse_mode='md',
reply_markup=reply_markup)
@app.on_callback_query(filters.regex('SRCNX'))
def searchnext(client, callback_query):
query = callback_query.data.split('*')[-1]
data = {
'query' : query,
'l' : ''
}
res = requests.post('https://subscene.com/subtitles/searchbytitle', data=data)
soup = bs(res.text, 'html.parser')
results = soup.find('div', {'class': 'search-result'}).find_all('div', {'class': 'title'})
kb = []
i = int(callback_query.data.split('*')[-2]) + 1
j = i - 1
k = i + 10
l = 0
for sub in results:
if l > j and l < k:
sublink = sub.find('a').attrs['href'].split('/')[-1]
subtitlename = sub.find('a').text
if len(sublink)<64:
kb.append([InlineKeyboardButton(f"{subtitlename}", callback_data=f'LANG*{sublink}')])
i += 1
else:
pass
else:
pass
l += 1
if len(results) > i:
kb.append([InlineKeyboardButton(f"Next ⏭", callback_data=f'SRCNX*{i}*{query}')])
kb.append([InlineKeyboardButton(f"Previous ⏮️", callback_data=f'SRCPR*{i}*{query}')])
reply_markup = InlineKeyboardMarkup(kb)
callback_query.edit_message_reply_markup(reply_markup=reply_markup)
@app.on_callback_query(filters.regex('SRCPR'))
def searchprev(client, callback_query):
query = callback_query.data.split('*')[-1]
data = {
'query' : query,
'l' : ''
}
res = requests.post('https://subscene.com/subtitles/searchbytitle', data=data)
soup = bs(res.text, 'html.parser')
results = soup.find('div', {'class': 'search-result'}).find_all('div', {'class': 'title'})
kb = []
i = int(callback_query.data.split('*')[-2])
j = i - 21
k = i - 10
l = 0
for sub in results:
if l > j and l < k:
sublink = sub.find('a').attrs['href'].split('/')[-1]
subtitlename = sub.find('a').text
if len(sublink)<64:
kb.append([InlineKeyboardButton(f"{subtitlename}", callback_data=f'LANG*{sublink}')])
i -= 1
else:
pass
else:
pass
l += 1
if j > 10:
kb.append([InlineKeyboardButton(f"Previous ⏮️", callback_data=f'SRCPR*{i}*{language}*{suburl}')])
if len(results) > i:
kb.append([InlineKeyboardButton(f"Next ⏭", callback_data=f'SRCNX*{i}*{query}')])
reply_markup = InlineKeyboardMarkup(kb)
callback_query.edit_message_reply_markup(reply_markup=reply_markup)
@app.on_callback_query(filters.regex('LANG'))
def chooselang(client, callback_query):
sublink = callback_query.data.split('*')[-1]
kb = [[InlineKeyboardButton("English 🇬🇧", callback_data=f'PREL*english*{sublink}')],
[InlineKeyboardButton("Bengali 🇧🇩", callback_data=f'PREL*bengali*{sublink}')],
[InlineKeyboardButton("Hindi 🇮🇳", callback_data=f'PRE*hindi*{sublink}')],
[InlineKeyboardButton("Indonesian 🇮🇩", callback_data=f'PREL*indonesian*{sublink}')]]
reply_markup = InlineKeyboardMarkup(kb)
app.edit_message_text(chat_id=callback_query.message.chat.id,
message_id=callback_query.message.message_id,
text=f"__Select a Subtitle Language__",
parse_mode='md',
reply_markup=reply_markup)
@app.on_callback_query(filters.regex('PREL'))
def langset(client, callback_query):
language = callback_query.data.split('*')[-2]
callback_query.answer(f"Preffered Language : {language.capitalize()}", show_alert=False)
suburl = callback_query.data.split('*')[-1]
url = f'https://subscene.com/subtitles/{suburl}/{language}'
r = requests.get(url)
soup = bs(r.text, 'html.parser')
allsubs = soup.find('tbody').find_all('tr')
kb = []
i = 0
for subs in allsubs:
try:
if i < 10:
subid = subs.find('td', {'class': 'a1'}).find('a').attrs['href'].split('/')[-1]
sublink = subs.find('td', {'class': 'a1'}).find('a').attrs['href'].split('/')[-3]
subname = subs.find('td', {'class': 'a1'}).find_all('span')[1].text.strip()
if len(sublink) < 64:
kb.append([InlineKeyboardButton(f"{subname}", callback_data=f'DTL*{language}*{sublink}*{subid}')])
i += 1
else:
pass
else:
break
except:
pass
if i > 10:
kb.append([InlineKeyboardButton(f"Next ⏭️", callback_data=f'NXT*{i}*{language}*{suburl}')])
try:
reply_markup = InlineKeyboardMarkup(kb)
app.edit_message_text(chat_id=callback_query.message.chat.id,
message_id=callback_query.message.message_id,
text=f"__Select a Subtitle__",
parse_mode='md',
reply_markup=reply_markup)
except:
app.edit_message_text(chat_id=callback_query.message.chat.id,
message_id=callback_query.message.message_id,
text=f"__Sorry no subtitle available for that specific language!\n"
f"Try another one!__",
parse_mode='md')
@app.on_callback_query(filters.regex('DTL'))
def subdetails(client, callback_query):
language = callback_query.data.split('*')[-3]
suburl = callback_query.data.split('*')[-2]
subid = callback_query.data.split('*')[-1]
kb = []
# getsub
url = f'https://subscene.com/subtitles/{suburl}/{language}/{subid}'
callback_query.answer(f"Getting sub from : {url}", show_alert=False)
r = requests.get(url)
soup = bs(r.text, 'html.parser')
poster = soup.find('div', {'class': 'poster'}).find('img').attrs['src'].replace('154-', '')
info = soup.find('div', {'id': 'details'}).find('ul').find_all('li')
dload = "https://subscene.com" + soup.find('a', {'id': 'downloadButton'}).attrs['href']
subdetails = []
for a in info:
try:
w = a.text.replace('-', '')
a = "".join(line.strip() for line in w.split("\n"))
subdetails.append(a)
except:
pass
subtext = "\n".join(subdetails)
#cuttly
data = requests.get(f"https://cutt.ly/api/api.php?key={cuttly}&short={dload}").json()["url"]
shortened_url = data["shortLink"]
kb = [[InlineKeyboardButton(f"Download", callback_data=f'DOWNLOAD*{shortened_url}')]]
reply_markup = InlineKeyboardMarkup(kb)
app.send_photo(caption=f'__{subtext}__',
photo=poster,
chat_id=callback_query.message.chat.id,
parse_mode='md',
reply_markup=reply_markup)
@app.on_callback_query(filters.regex('DOWNLOAD'))
def download(client, callback_query):
callback_query.answer(f"Downloading!!!", show_alert=False)
link = callback_query.data.split('*')[-1]
# unzip
url = requests.get(link).url
r = requests.head(url)
a = r.headers
filename = a['Content-Disposition'].split('=')[-1]
directory = a['Content-Disposition'].split('=')[-1].replace('.zip', '')
with open(filename, 'wb') as f:
im = requests.get(link)
f.write(im.content)
with zipfile.ZipFile(filename,"r") as zip_ref:
zip_ref.extractall(directory)
try:
a = glob.glob(f'./{directory}/*srt', recursive=True)
for file in a:
app.send_document(document=file,
chat_id=callback_query.message.chat.id,
parse_mode='md')
app.delete_messages(chat_id=callback_query.message.chat.id,
message_ids=callback_query.message.message_id)
except:
a = glob.glob(f'./{directory}/*', recursive=True)
for file in a:
app.send_document(document=file,
chat_id=callback_query.message.chat.id,
parse_mode='md')
app.delete_messages(chat_id=callback_query.message.chat.id,
message_ids=callback_query.message.message_id)
try:
os.remove(filename)
shutil.rmtree(directory)
except:
pass
@app.on_callback_query(filters.regex('NXT'))
def nextres(client, callback_query):
language = callback_query.data.split('*')[-2]
suburl = callback_query.data.split('*')[-1]
url = f'https://subscene.com/subtitles/{suburl}/{language}'
print(url)
r = requests.get(url)
soup = bs(r.text, 'html.parser')
allsubs = soup.find('tbody').find_all('tr')
kb = []
i = int(callback_query.data.split('*')[-3]) + 1
j = i - 1
k = i + 10
l = 0
for subs in allsubs:
try:
if l > j and l < k:
subid = subs.find('td', {'class': 'a1'}).find('a').attrs['href'].split('/')[-1]
sublink = subs.find('td', {'class': 'a1'}).find('a').attrs['href'].split('/')[-3]
subname = subs.find('td', {'class': 'a1'}).find_all('span')[1].text.strip()
if len(sublink) < 64:
kb.append([InlineKeyboardButton(f"{subname}", callback_data=f'DTL*{language}*{sublink}*{subid}')])
i += 1
else:
pass
else:
pass
l += 1
except:
pass
if len(allsubs) > i:
kb.append([InlineKeyboardButton(f"Next ⏭️", callback_data=f'NXT*{i}*{language}*{suburl}')])
kb.append([InlineKeyboardButton(f"Previous ⏮️", callback_data=f'PRV*{i}*{language}*{suburl}')])
reply_markup = InlineKeyboardMarkup(kb)
a = app.edit_message_text(chat_id=callback_query.message.chat.id,
message_id=callback_query.message.message_id,
text=f"__Select a Subtitle__",
parse_mode='md',
reply_markup=reply_markup)
@app.on_callback_query(filters.regex('PRV'))
def prevres(client, callback_query):
language = callback_query.data.split('*')[-2]
suburl = callback_query.data.split('*')[-1]
url = f'https://subscene.com/subtitles/{suburl}/{language}'
r = requests.get(url)
soup = bs(r.text, 'html.parser')
allsubs = soup.find('tbody').find_all('tr')
kb = []
i = int(callback_query.data.split('*')[-3])
j = i - 21
k = i - 10
l = 0
for subs in allsubs:
try:
if l > j and l < k:
subid = subs.find('td', {'class': 'a1'}).find('a').attrs['href'].split('/')[-1]
sublink = subs.find('td', {'class': 'a1'}).find('a').attrs['href'].split('/')[-3]
subname = subs.find('td', {'class': 'a1'}).find_all('span')[1].text.strip()
if len(sublink) < 64:
kb.append([InlineKeyboardButton(f"{subname}", callback_data=f'DTL*{language}*{sublink}*{subid}')])
i -= 1
else:
pass
else:
pass
l += 1
except:
pass
if j > 10:
kb.append([InlineKeyboardButton(f"Previous ⏮️", callback_data=f'PRV*{i}*{language}*{suburl}')])
if len(allsubs) > i:
kb.append([InlineKeyboardButton(f"Next ⏭️", callback_data=f'NXT*{i}*{language}*{suburl}')])
reply_markup = InlineKeyboardMarkup(kb)
app.edit_message_text(chat_id=callback_query.message.chat.id,
message_id=callback_query.message.message_id,
text=f"__Select a Subtitle__",
parse_mode='md',
reply_markup=reply_markup)
app.run()
|
6,198 | ad813216ba8162a7089340c677e47c3e656f7c95 | from flask import Flask, request, render_template, redirect
from pymongo import MongoClient
from envparse import env
from flask_httpauth import HTTPDigestAuth
import os.path
# Get env vars stored either in an env file or on the machine
def get_env(name):
if (os.path.exists('./env')):
env.read_envfile('./env')
return env(name)
app = Flask(__name__)
app.config['SECRET_KEY'] = get_env('SECRET_KEY')
users = users = {
"admin": get_env('ADMIN_PASS')
}
auth = HTTPDigestAuth()
@auth.get_password
def get_pw(username):
if username in users:
return users.get(username)
return None
# Utility method for mongo connections
def mongo_login():
mongo_uri=get_env('MONGO_URI')
client = MongoClient(mongo_uri)
return client['rescuebnb']
# Home page with host form
@app.route('/')
def show_home():
return render_template('index.html')
# Post endpoint for committing host to db
@app.route('/addhost', methods = ['GET', 'POST'])
def hosts():
if request.method == 'POST':
db = mongo_login()
hosts_collection = db.hosts
host = request.form.to_dict()
hosts_collection.insert_one(host) # should probably check for completed insert
return redirect('/')
return render_template('addhosts.html')
# Post endpoint for committing people who need shelter to db
@app.route('/requestshelter', methods = ['GET', 'POST'])
def guests():
if request.method == 'POST':
db = mongo_login()
guest_collection = db.guests
guest = request.form.to_dict()
guest_collection.insert_one(guest) # should probably check for completed insert
return redirect('/')
return render_template('request_shelter.html')
# Get involved page
@app.route('/getinvolved')
def get_involved():
return render_template('get_involved.html')
# Get involved page
@app.route('/volunteer')
def volunteer():
return render_template('volunteer.html')
# "Secured" endpoint for viewing registered hosts
@app.route('/hosts')
@auth.login_required
def viewhosts():
db = mongo_login()
hosts_collection = db.hosts
guests_collection = db.guests
return render_template('viewhosts.html', hosts=list(hosts_collection.find()),
guests=list(guests_collection.find()))
@app.route('/ussd')
def ussd():
db = mongo_login()
ussd_collection = db.ussd
ussd = request.form.to_dict()
ussd_collection.insert_one(ussd)
return render_template('index.html')
if __name__ == '__main__':
app.run()
#app.run(debug=True) |
6,199 | c5b50420788ddde7483a46c66aca3922ddb47952 | #-*- coding: utf-8 -*-
from SPARQLWrapper import SPARQLWrapper, SPARQLWrapper2, JSON
import time, random
# testes
NOW=time.time()
sparql = SPARQLWrapper("http://dbpedia.org/sparql")
sparql.setQuery("""
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
SELECT ?label
WHERE { <http://dbpedia.org/resource/Love> rdfs:label ?label }
""")
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
print("%.2f segundos para consultar a dbpedia"%(time.time()-NOW,))
for result in results["results"]["bindings"]:
print(result["label"]["value"]+", "+result["label"]["xml:lang"])
PREFIX="""PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX ops: <http://purl.org/socialparticipation/ops#>
PREFIX opa: <http://purl.org/socialparticipation/opa#>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
PREFIX dc: <http://purl.org/dc/terms/>
PREFIX tsioc: <http://rdfs.org/sioc/types#>
PREFIX schema: <http://schema.org/>
"""
q2="SELECT ?nome WHERE {?s rdf:type ops:Participant . ?s foaf:name ?nome .}"
NOW=time.time()
sparql3 = SPARQLWrapper("http://localhost:82/participabr/query")
#sparql3 = SPARQLWrapper("http://200.144.255.210:8082/participabr/query")
sparql3.setQuery(PREFIX+q2)
sparql3.setReturnFormat(JSON)
results3 = sparql3.query().convert()
print("%.2f segundos para puxar todos os nomes dos participantes do Participa.br"%(time.time()-NOW,))
for i in results3["results"]["bindings"][-10:]: print(u"participante: " +i["nome"]["value"])
NOW=time.time()
q="SELECT ?comentario ?titulo ?texto WHERE {?comentario dc:type tsioc:Comment. OPTIONAL {?comentario dc:title ?titulo . } OPTIONAL {?comentario schema:text ?texto .}}"
sparql3.setQuery(PREFIX+q)
sparql3.setReturnFormat(JSON)
results4 = sparql3.query().convert()
print("%.2f segundos para puxar todos os comentários do Participa.br"%(time.time()-NOW,))
NOW=time.time()
print("dados lidos, processando")
import string, nltk as k
# histograma com as palavras
palavras=string.join([i["texto"]["value"].lower() for i in results4["results"]["bindings"]])
exclude = set(string.punctuation)
palavras = ''.join(ch for ch in palavras if ch not in exclude)
palavras_=palavras.split()
#fdist=k.FreqDist(palavras_)
print("feita primeira freq dist em %.2f"%(time.time()-NOW,))
NOW=time.time()
stopwords = set(k.corpus.stopwords.words('portuguese'))
palavras__=[pp for pp in palavras_ if pp not in stopwords]
fdist_=k.FreqDist(palavras__)
print("feita segunda freq dist (retiradas stopwords) em %.2f"%(time.time()-NOW,))
#NOW=time.time()
#stemmer = k.stem.RSLPStemmer()
#palavras___=[stemmer.stem(pp) for pp in palavras__]
#fdist__=k.FreqDist(palavras___)
#print("feita terceira freq dist (radicalizada) em %.2f"%(time.time()-NOW,))
##################
# bebe comentarios do endpoint sparql.
# guarda 10 e os classifica na mão
# faz histograma de todas as palavras
# escolhe as mais frequentes ou com offset
# ou as menos frequentes
# faz feture vector com elas.
# escolhendo as 200 palavras mais frequentes
palavras_escolhidas=fdist_.keys()[:200]
# outras features que podemos escolher é:
# *) número de palavras terminadas em a, o, e ou s
# *) tamanho médio das palavras utilizadas
# *) uso das stopwords
# é necessário um conjunto maior de classificações na mão
# para julgar qual parte do histograma
# é melhor de ser considerada.
#########
def document_features(documento):
features={}
for palavra in palavras_escolhidas:
features["contains(%s)"%(palavra,)]=(palavra in documento)
return features
# fazendo com classes dummy
msgs= [(rr["texto"]["value"],"pos") for rr in results4["results"]["bindings"][:1000]]
msgs2=[(rr["texto"]["value"],"neg") for rr in results4["results"]["bindings"][1000:2000]]
msgs_=msgs+msgs2
random.shuffle(msgs_)
feature_sets=[(document_features(msg[0]),msg[1]) for msg in msgs_]
train_set, test_set = feature_sets[1000:], feature_sets[:1000]
classifier = k.NaiveBayesClassifier.train(train_set)
########
# As mais frequentes podem ser úteis já que os comentários
# são pequenos e queremos que o vetor de atributos tenha informação
# As menos frequentes são as palavras mais incomuns, informativas
# para detecção de nichos do autor
# As de incidência intermediária são consideradas as mais representativas
# do assunto
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.