text stringlengths 0 1.05M | meta dict |
|---|---|
import datetime
# currentDate = datetime.date.today()
# # print(currentDate)
# # print(currentDate.month)
# # print(currentDate.day)
# # print(currentDate.year)
# # print(currentDate.strftime('%d %B %Y'))
# # print(currentDate.strftime('Please attend our event %A, %B %d in the year %Y'))
#
# userInput = input("What is your birthday? (dd/mm/yyyy)\n")
# birthday = datetime.datetime.strptime(userInput, '%d/%m/%Y').date()
# print(birthday)
# days = currentDate - birthday
# print(days.days,'days have passed since you were born')
# currentTime = datetime.datetime.now()
# print(currentTime)
# print(currentTime.hour)
# print(currentTime.minute)
# print(currentTime.second)
# print(datetime.datetime.strftime(currentTime, '%H:%M:%S %p'))
# print(currentTime.minute)
# This code is to calculate the dateline
# currentDate = datetime.date.today()
# datelineInput = input('Enter the dateline for your project, (dd/mm/yyyy)\n').upper()
# dateline = datetime.datetime.strptime(datelineInput, '%d/%m/%Y').date()
# remainingDays = dateline - currentDate
# if remainingDays == '0':
# print('the dateline has passed. You\'re out')
# else:
# print(dateline)
# print('You have', remainingDays.days, 'days left.')
import turtle
for steps in range(20):
turtle.forward(100)
turtle.right(45)
for moresteps in range(6):
turtle.color('blue')
turtle.forward(50)
turtle.color('green')
turtle.left(20)
turtle.backward(90)
turtle.left(45)
| {
"repo_name": "areriff/pythonlearncanvas",
"path": "firstMain.py",
"copies": "1",
"size": "1864",
"license": "mit",
"hash": -8787906141495993000,
"line_mean": 32.8909090909,
"line_max": 108,
"alpha_frac": 0.6899141631,
"autogenerated": false,
"ratio": 3.1754684838160134,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43653826469160134,
"avg_score": null,
"num_lines": null
} |
__author__ = 'argi'
import cv2
import numpy as np
class RectangleDetect:
def black_rectangle(self):
cam = cv2.VideoCapture(0)
n = 0
while True:
return_val, frame = cam.read()
img = cv2.GaussianBlur(frame, (5, 5), 0)
img = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
blue_lower = np.array([0, 0, 0], np.uint8)
blue_upper = np.array([0, 0, 255], np.uint8)
blue = cv2.inRange(img, blue_lower, blue_upper)
contours, hierarchy = cv2.findContours(blue, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
max_area = 0
largest_contour = None
for idx, contour in enumerate(contours):
area = cv2.contourArea(contour)
if area > max_area:
max_area = area
largest_contour = contour
if not (largest_contour is None):
moment = cv2.moments(largest_contour)
if moment["m00"] > 1000:
rect = cv2.minAreaRect(largest_contour)
rect = ((rect[0][0], rect[0][1]), (rect[1][0], rect[1][1]), rect[2])
box = cv2.cv.BoxPoints(rect)
box = np.int0(box)
cv2.drawContours(frame, [box], 0, (0, 0, 255), 2)
cv2.imshow('img', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
| {
"repo_name": "agounaris/python-computer-vision",
"path": "bootstrap/rectangle_detect.py",
"copies": "1",
"size": "1536",
"license": "mit",
"hash": 9066839454569687000,
"line_mean": 31.6808510638,
"line_max": 96,
"alpha_frac": 0.462890625,
"autogenerated": false,
"ratio": 3.7012048192771085,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9624258772942302,
"avg_score": 0.007967334266961378,
"num_lines": 47
} |
__author__ = 'argi'
import cv2
# import sys
class FaceDetect:
def __init__(self, casc_path):
self.classifier_path = casc_path
def execute(self):
face_cascade = cv2.CascadeClassifier(self.classifier_path)
video_capture = cv2.VideoCapture(0)
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.cv.CV_HAAR_SCALE_IMAGE
)
# # Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
# Display the resulting frame
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
# video_capture.release()
# cv2.destroyAllWindows()
| {
"repo_name": "agounaris/python-computer-vision",
"path": "bootstrap/face_detect.py",
"copies": "1",
"size": "1104",
"license": "mit",
"hash": -333928836174764540,
"line_mean": 24.6744186047,
"line_max": 76,
"alpha_frac": 0.527173913,
"autogenerated": false,
"ratio": 3.6435643564356437,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9647482455482155,
"avg_score": 0.004651162790697674,
"num_lines": 43
} |
__author__ = 'Ariel Anthieni'
#Definicion de Librerias
import os
import datetime
import time
import sys
import shutil
import zipfile
import zlib
#Incorporo las librerias de uso
from extractor import dataraster , generate_ql
#Establecimiento de variables
dir_origen = '/media/sf_prod24/nuevos/l8/'
dir_dest_xml = '/media/sf_prod24/nuevos/l8/product/'
dir_quicklook = '/media/sf_prod24/nuevos/l8/product/'
dir_back = '/media/sf_prod24/nuevos/l8/back/'
dir_metadatasrc = '/media/sf_prod24/nuevos/l8/'
dir_producto = '/media/sf_prod24/nuevos/l8/product/'
base_ancho = 180
error_product = 0
error_count = 0
#Listo los archivos en el directorio
ficheros = os.listdir(dir_origen)
"""
El script se modificara para que tenga coherencia operativa, siendo que si existe el jpg se procesa
los siguientes datos que conforman el xml, en caso contrario se envia a un directorio de back
"""
for archivo in ficheros:
ext = os.path.splitext(archivo)
#verifico si es un producto
if (( ext[1] == '.tif')or ( ext[1] == '.img')):
#seteo la variable de error a cero
error_product = 0
#recorto el nombre del producto a su nombre definitivo
# nombre_producto = ext[0].split('_')
# largo = len(nombre_producto)
# nombre_subproducto = ''
# for ele in nombre_producto[0:(largo-4)]:
# if (not nombre_subproducto == ''):
# nombre_subproducto = nombre_subproducto + '_' + ele
# else:
# nombre_subproducto = ele
nombre_subproducto = ext[0]
nombre_ql = nombre_subproducto + '.jpg'
nombre_ql_origen = ext[0] + '.jpg'
nombre_meta_txt = nombre_subproducto + '.txt'
nombre_meta_xml = nombre_subproducto + '.xml'
nombre_producto_f = nombre_subproducto + '.zip'
#Adecuo el QL
generate_ql(dir_origen + nombre_ql_origen, dir_quicklook + nombre_ql)
#Genero los Metadatos
#Verifico si no hay un error anterior
if (error_product == 0):
#genero los metadatos del producto
try:
#realiza la conversion del txt al xml
#elemento = txttoxml()
#elemento.toxml(dir_origen + nombre_meta_txt, dir_dest_xml + nombre_meta_xml, ':')
#logSalida.write("Se convirtio el txt a xml el archivo: " + archivo + "\n")
#Obtengo los metadatos de ciudades
#metadatos_ciudades = getMetadata(ext[0])
print("Se genero el metadato del archivo: " + archivo + "\n")
#obtengo los datos raster de la imagen
datos_imagen = dataraster(dir_origen + archivo)
#incluimos los datos raster
#elemento.metadataxml(dir_dest_xml + nombre_meta_xml, metadatos_ciudades, datos_imagen)
#Muevo el archivo de metadatos
#shutil.move(dir_origen + nombre_meta_txt, dir_metadatasrc + nombre_meta_txt)
print("Se generaron los metadatos al archivo: " + archivo + "\n")
except:
print("ERROR [007]: No se ha podido generar los metadatos del archivo: " + archivo + "\n")
error_product = 1
continue
#Genero los archivos para el producto terminado
# try:
#
# #Creamos el zip con todos los archivos extraidos
# os.chdir(dir_producto) #cambiar al directorio donde quiero crear la carpeta zip
# #configuramos el metodo de compresion
# compresion = zipfile.ZIP_DEFLATED
#
# zfilename = nombre_producto_f
# zf = zipfile.ZipFile(zfilename, "w")
# zf.write(os.path.join(dir_origen, archivo),arcname=archivo, compress_type=compresion)
# zf.close()
# #borro el archivo de origen
# #os.remove(os.path.join(dir_origen, archivo))
#
# print("Se genero el producto del archivo: " + zfilename + " correctamente \n")
#
# except:
# print("ERROR [009]: No se ha podido manipular el archivo: " + archivo + "\n")
else:
#Muevo el tif a su lugar de back
try:
shutil.move(dir_origen + archivo, dir_back + archivo)
print("ERROR [008]: No se ha podido generar el producto para el archivo: " + archivo + "\n")
error_count = error_count +1
except:
print("ERROR [009]: No se ha podido manipular el archivo: " + archivo + "\n")
continue
| {
"repo_name": "elcoloo/metadata-tools",
"path": "example.py",
"copies": "1",
"size": "4663",
"license": "apache-2.0",
"hash": 7590026418791864000,
"line_mean": 32.7898550725,
"line_max": 108,
"alpha_frac": 0.5794552863,
"autogenerated": false,
"ratio": 3.2092222986923606,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4288677584992361,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Ariel Anthieni'
#Definicion de Librerias
import os
import json
import csv
import codecs
import geojson
import shapely.wkt
"""
Es necesario que se instalen las librerias geojson y shapely para poder convertir los formatos
pip3 install geojson
pip3 install shapely
"""
#Establecimiento de variables
dir_origen = '/opt/repositorio/metadata-tools/convert-tools/data/in/'
dir_destino = '/opt/repositorio/metadata-tools/convert-tools/data/out/'
geocampo = 'WKT'
#Listo los archivos en el directorio
ficheros = os.listdir(dir_origen)
"""
El script analiza el contenido del encabezado del csv y genera el array luego produciendo un geojson
"""
for archivo in ficheros:
ext = os.path.splitext(archivo)
#verifico si es un producto
if (( ext[0] == '20161212calles_gba')):
#abro el csv
filecsv = open(dir_origen+archivo)
objcsv = csv.reader(filecsv)
#Paso a un array la estructura
arreglo = []
geoarreglo = []
propiedades = {}
multigeo = {}
multiwkt = ''
for elemento in objcsv:
arreglo.append(elemento)
filecsv.close()
encabezado = arreglo[0]
idgeo = encabezado.index(geocampo)
i = 0
for elemento in arreglo:
#Recorro el encabezado
if i == 0 :
i=i+1
else:
j = 0
propiedades = {}
for col in encabezado:
if (j != idgeo):
propiedades[col] = elemento[j]
else:
multiwkt = elemento[j]
j=j+1
#convierto el wkt a geojson
g1 = shapely.wkt.loads(multiwkt)
#Almaceno las propiedades
multigeo = geojson.Feature(geometry=g1, properties=propiedades)
geoarreglo.append(multigeo)
georesultado = { "type": "FeatureCollection", "features": [] }
for value in geoarreglo:
georesultado['features'].append(value)
resultado = codecs.open(dir_destino+ext[0]+'.geojson', 'w','utf-8')
jsongeo = json.dumps(georesultado, ensure_ascii=False).encode('utf8')
resultado.write(jsongeo.decode('utf-8'))
resultado.close()
| {
"repo_name": "elcoloo/metadata-tools",
"path": "convert-tools/csv_gba_to_geojson.py",
"copies": "1",
"size": "2322",
"license": "apache-2.0",
"hash": 3377855607636281300,
"line_mean": 21.1142857143,
"line_max": 100,
"alpha_frac": 0.5788113695,
"autogenerated": false,
"ratio": 3.3554913294797686,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9347354019189976,
"avg_score": 0.017389735957958476,
"num_lines": 105
} |
__author__ = 'Ariel Anthieni'
#Definicion de Librerias
import os
import json
import csv
import codecs
#Establecimiento de variables
dir_origen = '/opt/desarrollo/metadata-tools/convert-tools/data/in/'
dir_destino = '/opt/desarrollo/metadata-tools/convert-tools/data/out/'
geocampo = 'geojson'
#Listo los archivos en el directorio
ficheros = os.listdir(dir_origen)
"""
El script analiza el contenido del encabezado del csv y genera el array luego produciendo un geojson
"""
for archivo in ficheros:
ext = os.path.splitext(archivo)
#verifico si es un producto
if (( ext[1] == '.csv')):
#abro el csv
filecsv = open(dir_origen+archivo)
objcsv = csv.reader(filecsv)
#Paso a un array la estructura
arreglo = []
geoarreglo = []
propiedades = {}
multigeo = []
for elemento in objcsv:
arreglo.append(elemento)
filecsv.close()
encabezado = arreglo[0]
idgeo = encabezado.index(geocampo)
i = 0
for elemento in arreglo:
#Recorro el encabezado
if i == 0 :
i=i+1
else:
j = 0
propiedades = {}
for col in encabezado:
if (j != idgeo):
propiedades[col] = elemento[j]
else:
multigeo = json.loads(elemento[j])
j=j+1
#Almaceno las propiedades
multigeo['properties'] = propiedades
geoarreglo.append(multigeo)
georesultado = { "type": "FeatureCollection", "features": [] }
for value in geoarreglo:
georesultado['features'].append(value)
resultado = codecs.open(dir_destino+ext[0]+'.geojson', 'w','utf-8')
jsongeo = json.dumps(georesultado, ensure_ascii=False).encode('utf8')
resultado.write(jsongeo.decode('utf-8'))
resultado.close()
| {
"repo_name": "elcoloo/metadata-tools",
"path": "convert-tools/csv_ckan_to_geojson.py",
"copies": "1",
"size": "1993",
"license": "apache-2.0",
"hash": 5535654716330843000,
"line_mean": 21.908045977,
"line_max": 100,
"alpha_frac": 0.5604616157,
"autogenerated": false,
"ratio": 3.366554054054054,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9339662714906835,
"avg_score": 0.01747059096944392,
"num_lines": 87
} |
__author__ = 'Ariel Anthieni'
'''
Created on 20/04/2015
'''
#Cargo las librerias
import xml.dom.minidom
class txttoxml():
'''
Esta clases se encarga de convertir un txt separado por un caracter especificado a xml
'''
def __init__(self):
'''
Constructor
'''
"""Funcion para procesar los datos de un txt y convertirlo a xml
Parametros
filesource: Nombre del archivo de origen
filedestination: Nombre del archivo de destino
separator: string que separa etiqueta de elemento
"""
def toxml(self, filesource, filedestination, separator = "="):
#Obtenemos los datos del archivo de metadatos
f=open(filesource, "r")
#Leemos el archivo y lo cortamos en renglones
lines=f.read().split("\n")
#Creamos el diccionario
metadatos_origen={}
#Recorremos el archivo y guardamos los terminos que estan separados con el string pasado
#y que no son vacios
for line in lines:
line = line.replace ('"','')
if len(line.strip()) > 0 and line.rfind(separator)> 0:
clave = line.split(separator)[0]
valor = line.split(separator)[1]
metadatos_origen[clave.strip(" ")]=valor.strip(" ")
#Imprimo el diccionario
print(metadatos_origen)
#Cierro el archivo
f.close()
#################################################################################
## Generacion de campos combinado
#################################################################################
#Para productos landsat 8
try:
metadatos_origen['DATE_INSTANT']= metadatos_origen['DATE_ACQUIRED']+'T'+metadatos_origen['SCENE_CENTER_TIME'].strip("Z")
metadatos_origen['FILE_DATE']= metadatos_origen['FILE_DATE'].strip("Z")
metadatos_origen['CORNER_UL']= metadatos_origen['CORNER_UL_LAT_PRODUCT']+' '+metadatos_origen['CORNER_UL_LON_PRODUCT']
metadatos_origen['CORNER_UR']= metadatos_origen['CORNER_UR_LAT_PRODUCT']+' '+metadatos_origen['CORNER_UR_LON_PRODUCT']
metadatos_origen['CORNER_LL']= metadatos_origen['CORNER_LL_LAT_PRODUCT']+' '+metadatos_origen['CORNER_LL_LON_PRODUCT']
metadatos_origen['CORNER_LR']= metadatos_origen['CORNER_LR_LAT_PRODUCT']+' '+metadatos_origen['CORNER_LR_LON_PRODUCT']
except:
print( "No existen campos de origen para Landsat8 combinar en este metadato")
#Para productos ciudades 2013
try:
metadatos_origen['Key']= metadatos_origen['Province']+' , '+metadatos_origen['City']
metadatos_origen['obs']= 'Las Imagenes del Satelite Spot 5 para este producto poseen fecha ' + metadatos_origen['Start'] + ' y del Satelite Landsat 8 la fecha ' + metadatos_origen['StartAux']
except:
print( "No existen campos de origen para ciudades combinar en este metadato")
#################################################################################
## Creacion del xml a partir del diccionario generado
#################################################################################
dom = xml.dom.minidom.Document()
x = dom.createElement("Metadata") # creo el principal
dom.appendChild(x) #agrego el primer hijo
#recorro el diccionario generando con la clave la etiqueta y asignandole el valor
for elemento in metadatos_origen.items():
x = dom.createElement(elemento[0])
txt = dom.createTextNode(elemento[1])
x.appendChild(txt)
dom.childNodes[0].appendChild(x) #agrega el hijo al primer hijo
#imprimo el XML
print(dom.toxml())
#Abro el archivo para escritura XML
f = open(filedestination, "w")
#Escribo el archivo XML
f.write(dom.toprettyxml())
#Cierro el archivo XML
f.close()
"""Funcion incluir datos al xml generado
Parametros
filesource: Nombre del archivo a modificar
dataraster: diccionario de elementos a insertar
"""
def includedataxml(self, filesource, dataraster):
#parseo el xml a incluir
dom = xml.dom.minidom.parse(filesource)
#recorro el diccionario generando con la clave la etiqueta y asignandole el valor
for elemento in dataraster.items():
x = dom.createElement(elemento[0])
txt = dom.createTextNode(elemento[1])
x.appendChild(txt)
dom.childNodes[0].appendChild(x) #agrega el hijo al primer hijo
#imprimo el XML
#print(dom.toxml())
#Abro el archivo para escritura XML
f = open(filesource, "w")
#Escribo el archivo XML
f.write(dom.toprettyxml())
#Cierro el archivo XML
f.close()
def metadataxml(self, filesource, dataciudad, dataraster):
#Creamos la esctructura xml
dom = xml.dom.minidom.Document()
x = dom.createElement("Metadata") # creo el principal
dom.appendChild(x) #agrego el primer hijo
#recorro el diccionario generando con la clave la etiqueta y asignandole el valor
for elemento in dataciudad.items():
x = dom.createElement(elemento[0])
txt = dom.createTextNode(elemento[1])
x.appendChild(txt)
dom.childNodes[0].appendChild(x) #agrega el hijo al primer hijo
#recorro el diccionario generando con la clave la etiqueta y asignandole el valor
for elemento in dataraster.items():
x = dom.createElement(elemento[0])
txt = dom.createTextNode(elemento[1])
x.appendChild(txt)
dom.childNodes[0].appendChild(x) #agrega el hijo al primer hijo
#imprimo el XML
#print(dom.toxml())
#Abro el archivo para escritura XML
f = open(filesource, "w")
#Escribo el archivo XML
f.write(dom.toprettyxml())
#Cierro el archivo XML
f.close()
| {
"repo_name": "elcoloo/metadata-tools",
"path": "conversor.py",
"copies": "1",
"size": "6078",
"license": "apache-2.0",
"hash": -2897060018244600000,
"line_mean": 30.1692307692,
"line_max": 203,
"alpha_frac": 0.5837446528,
"autogenerated": false,
"ratio": 3.469178082191781,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45529227349917806,
"avg_score": null,
"num_lines": null
} |
__author__ = 'arif_'
# answer = input('would you like express shipping')
# if answer == 'yes':
# print('that will be an extra $10')
# else:
# print('fine!')
# print('have a nice day')
# favouriteTeam = input('what is your favourite hockey team?\n')
# if favouriteTeam == 'Senators':
# print('Yeah Go Sens Go')
# print('Hmm')
# else:
# print('bleh')
# if favouriteTeam == 'MU':
# print('Yeah')
# else:
# print('boo')
# bestTeam = 'MU'
# favouriteTeam = input('What is your favourite hockey team?\n')
# if favouriteTeam.upper() == bestTeam.upper():
# print('Red Devil!!')
# print('Yezza')
# print('Buekkk')
# Initialize the variable to fix the error from boolean being false
# freeToaster = False
# deposit = float(input('How much do you want to deposit\n'))
# if deposit >= 100:
# freeToaster = True # This boolean here ;)
# print('enjoy your toaster')
# else:
# print('enjoy the free mug')
# print('have a nice day sucker')
# if freeToaster:
# print('Just Kidding')
#############################################
# import locale
#
# locale.setlocale(locale.LC_ALL, '') # use the user-default locale
# shippingCharge = False
# total = int(input('What is the total amount of purchase?\n'))
# TotalWithShipping = total + 10
# if total < 50:
# shippingCharge = True
# else:
# shippingCharge = False
# if shippingCharge:
# print('You have to pay an extra', locale.currency(float(10)), 'for the total of', locale.currency(float(TotalWithShipping)), 'because you\'re cheap') # What is locale.currency??
# else:
# print('You have to pay', locale.currency(float(total)))
# print('Have a nice day.')
###########################################
# Team = input('Enter your favourite team: ').upper()
# Sport = input('Enter your favourite sport: ').upper()
##################
# if Sport == 'FOOTBALL':
# print('Go Football!!')
# if Team == 'MU':
# print('Manchester United gonna win this year')
# print('We love football')
# else:
# print('Go and learn about football')
###################
# if Sport == 'FOOTBALL' and Team == 'MU': # make sure both are in capital latter because the previous .upper()
# print('Go Red Devils!')
# # Make sure there are brackets. Same is as in math
# elif Sport == 'FOOTBALL' and (Team == 'LIVERPOOL' or Team == 'ARSENAL'):
# print('you suck')
# else:
# print('You\'re stupid')
###########################################
# def main():
# happyBirthdayCall1()
# print('')
# happyBirthdayCall2()
# return
#
# def happyBirthdayCall1():
# print('Happy Birthday to you!')
# print('Happy Birthday to you!')
# print('Happy Birthday, dear', name1, '.')
# print('Happy Birthday to you')
# return
#
# def happyBirthdayCall2():
# print("Happy Birthday to you!")
# print("Happy Birthday to you!")
# print("Happy Birthday, dear", name2, '.')
# print("Happy Birthday to you!")
# return
#
# name1 = str(input('What is your first friend\'s name?\n'))
# name2 = str(input('What is your second friend\'s name?\n'))
#
# main()
###########################################
# Calculate the total to charge from an online store in Malaysia
# ASk The user what country they are from and their order total
# If the user is from Malaysia, ask which state
# If the order is from outside Malaysia do not charge any taxes
# If the order was placed in Malaysia, calculate tax based on which state.
# - Selangor charge 5% GST
# - Penang, Kelantan, Perak charge 13% Harmonized Sales Tax
# - All other state charge 6% state tax and additional 5% GST
# GoodsSalesTax = 0.05
# HarmonizedTax = 0.13
# StateTax = 0.06
# Malaysian = False
#
# Nationality = str(input('Are you from Malaysia? (y/n)\n')).lower()
# if Nationality == 'y':
# Malaysian = True
# elif Nationality == 'n':
# Malaysian = False
# else:
# print('please restart the runtime and choose the correct answer')
# exit()
# State = str(input('What is your state\'s name?\n')).lower()
#
# InputAmount = float(input('What is the base amount?\n'))
# if State == 'selangor' and Malaysian:
# Amount = InputAmount + (InputAmount * GoodsSalesTax)
# print('\nThe amount after tax is', Amount)
# elif State == ('penang' or 'kelantan' or 'perak') and Malaysian:
# Amount = InputAmount + (InputAmount * HarmonizedTax)
# print('\nThe amount after tax is', Amount)
# elif Malaysian == True:
# Amount = InputAmount + ((InputAmount * GoodsSalesTax) + (InputAmount * StateTax))
# print('\nThe amount after tax is', Amount)
# else:
# Amount = InputAmount
# print('\nYour amount is', Amount)
# exit()
###########################################
# import turtle
# nbrSides = 20
# for steps in range(nbrSides):
# turtle.forward(100)
# turtle.right(360/nbrSides)
# for moresteps in range(nbrSides):
# turtle.forward(50)
# turtle.right(360/nbrSides)
###########################################
# import turtle
#
# nbrSides = int(input('Number of sides.\n'))
# for colour in range(nbrSides):
# turtle.forward(100)
# turtle.right(360 / nbrSides)
# for colour in range(1):
# turtle.forward(50)
# turtle.right(360 / nbrSides)
###########################################
# Looping through unknown number of time.
# answer = '0'
# while answer != '4':
# answer = input('What is 2 + 2\n')
# print( ' Yes! 2 + 2 = 4')
###########################################
# import turtle
# counter = 0
# while counter < 12:
# turtle.forward(100)
# turtle.right(90)
# # counter = counter + 1
# counter += 1
###########################################
# import turtle
# penColor = input('What color do you want?\n')
# lineLength = int(input('How long do you want the line to be?\n'))
# lineAngle = int(input('What is the angle of the line?\n'))
# while lineLength == 1:
# lineLength = int(input('How long do you want the line to be?\n'))
# if lineLength > 0:
# turtle.color(penColor)
# turtle.right(lineAngle)
# turtle.forward(lineLength)
# elif lineLength <= 0:
# exit()
# exit()
import
| {
"repo_name": "areriff/pythonlearncanvas",
"path": "ifStatement.py",
"copies": "1",
"size": "6126",
"license": "mit",
"hash": -6085333370651347000,
"line_mean": 31.585106383,
"line_max": 184,
"alpha_frac": 0.5935357493,
"autogenerated": false,
"ratio": 3.195618153364632,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9288470455505121,
"avg_score": 0.00013668943190219785,
"num_lines": 188
} |
__author__ = 'arif_'
# import _tkinter
# from tkinter import Tk, Frame, BOTH
#
#
# class Example(Frame):
#
# def __init__(self, parent):
# Frame.__init__(self, parent, background="grey")
#
# self.parent = parent
#
# self.initUI()
#
# def initUI(self):
#
# self.parent.title("My First GUI Window")
# self.pack(fill=BOTH, expand=1)
#
#
# def main():
#
# root = Tk()
# root.geometry("250x150+300+300")
# app = Example(root)
# root.mainloop()
#
#
# if __name__ == '__main__':
# main()
#
#
# #################
# from tkinter import Tk, Frame, BOTH
# class Example(Frame):
#
# def __init__(self, parent):
# Frame.__init__(self, parent, background="white")
#
# self.parent = parent
# self.parent.title("Centered window")
# self.pack(fill=BOTH, expand=1)
# self.centerWindow()
#
# def centerWindow(self):
#
# w = 290
# h = 150
#
# sw = self.parent.winfo_screenwidth()
# sh = self.parent.winfo_screenheight()
#
# x = (sw - w)/2
# y = (sh - h)/2
# self.parent.geometry('%dx%d+%d+%d' % (w, h, x, y))
#
# def main():
#
# root = Tk()
# ex = Example(root)
# root.mainloop()
#
#
# if __name__ == '__main__':
# main()
#############
from tkinter import Tk, BOTH
from tkinter import Frame, Button, Style
class Example(Frame):
def __init__(self, parent):
Frame.__init__(self, parent)
self.parent = parent
self.initUI()
def initUI(self):
self.parent.title("Quit button")
self.style = Style()
self.style.theme_use("default")
self.pack(fill=BOTH, expand=1)
quitButton = Button(self, text="Quit",
command=self.quit)
quitButton.place(x=50, y=50)
def main():
root = Tk()
root.geometry("250x150+300+300")
app = Example(root)
root.mainloop()
if __name__ == '__main__':
main()
| {
"repo_name": "areriff/pythonlearncanvas",
"path": "GUI/FirstGUIAttemp.py",
"copies": "1",
"size": "1976",
"license": "mit",
"hash": 1045333555552573800,
"line_mean": 18.9595959596,
"line_max": 60,
"alpha_frac": 0.5151821862,
"autogenerated": false,
"ratio": 3.09717868338558,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9112360869585581,
"avg_score": 0,
"num_lines": 99
} |
__author__ = 'arif_'
##########Frame
# root = Tk() # Create a blank window
# topFrame = Frame(root) # Make a frame inside the root (main window)
# topFrame.pack(side=TOP) # To put it in and display in the topFrama
# bottomFrame = Frame(root)
# bottomFrame.pack(side=BOTTOM) # The bottom frame
#
# button1 = Button(topFrame, text='Button 1', fg='red') # fg is foreground color
# button2 = Button(topFrame, text='Button 2', fg='blue')
# button3 = Button(topFrame, text='Button 3', fg='green')
# button4 = Button(bottomFrame, text='Button 4', fg='purple')
# button1.pack(side=LEFT)
# button2.pack(side=LEFT)
# button3.pack(side=LEFT)
# button4.pack(side=BOTTOM)
#
##############Put label
# root = Tk() # Create a blank window
# one = Label(root, text='One', bg='grey', fg='yellow')
# one.pack()
# two = Label(root, text='Two', bg='green', fg='black')
# two.pack(fill=X)
# three = Label(root, text='three', bg='blue', fg='white')
# three.pack(side=LEFT, fill=Y)
# root.mainloop() # To make the window stay
##############Grid
# root = Tk() # Create a blank window
# label_1 = Label(root, text='name')
# label_2 = Label(root, text='password')
# entry_1 = Entry(root)
# entry_2 = Entry(root)
#
# label_1.grid(row=1, column=1, sticky=E) # sticky use North South East and West terminology
# label_2.grid(row=2, column=1, sticky=E)
# entry_1.grid(row=1, column=2)
# entry_2.grid(row=2, column=2)
#
# c = Checkbutton(root, text='Keep me logged in')
# c.grid(columnspan=2)
# root.mainloop() # To make the window stay
##############Bind function to widget
# root = Tk() # Create a blank window
# # def printName():
# def printName(event): # event is something user do / can do
# print('Hello, my name is Arif')
#
#
# # button_1 = Button(root, text='Print Name', command=printName)
# button_1 = Button(root, text='Print Name')
# button_1.bind('<Button-1>', printName) # <Button-1> is left mouse button
# button_1.pack()
#
# root.mainloop() # To make the window stay
##############one widget does multiple thing
# root = Tk() # Create a blank window
# def leftClick(event):
# print('Left')
#
# def middleClick(event):
# print('Middle')
#
# def rightClick(event):
# print('Right')
#
# frame = Frame(root, width=300, height=250)
# frame.bind("<Button-1>", leftClick)
# frame.bind("<Button-2>", middleClick)
# frame.bind("<Button-3>", rightClick)
# frame.pack()
# root.mainloop() # To make the window stay
##############how to use class
# class BuckyButtons:
# def __init__(self, master): # __init__ initialize itself whenever I create object/something
# frame = Frame(master)
# frame.pack()
#
# self.printButton = Button(frame, text='Print Message', command=self.printMessage)
# self.printButton.pack(side=LEFT)
#
# self.quitButton = Button(frame, text='Quit', command=frame.quit)
# self.quitButton.pack(side=LEFT)
# def printMessage(self):
# print('Wow, this actually worked!')
#
# root = Tk() # Create a blank window
# b = BuckyButtons(root)
# root.mainloop() # To make the window stay
##############Creating drop down menu
# def doNothing():
# print('ok ok I won\'t')
#
#
# root = Tk() # Create a blank window
#
# menu = Menu(root)
# root.config(menu=menu)
#
# subMenu = Menu(menu) # This is adding the item
# menu.add_cascade(label='File', menu=subMenu) # This is how I want it to behave
# subMenu.add_command(label='New Project...', command=doNothing)
# subMenu.add_command(label='New...', command=doNothing)
# subMenu.add_separator()
# subMenu.add_command(label='Exit', command=quit)
#
# editMenu = Menu(menu)
# menu.add_cascade(label='Edit', menu=editMenu)
# editMenu.add_command(label='Redo', command=doNothing)
#
# # ENd of menu, Starts of toolbar
#
# toolbar = Frame(root, bg='blue') # create basic toolbar
# insertButton = Button(toolbar, text='Insert Image', command=doNothing) # put the button in toolbar
# insertButton.pack(side=LEFT, padx=2, pady=2)
# printButton = Button(toolbar, text='print', command=doNothing())
# printButton.pack(side=LEFT, padx=2, pady=2)
#
# toolbar.pack(side=TOP, fill=X)
#
# # ENd of toolbar, starts of status bar
#
# status = Label(root, text='Preparing to do nothing...', bd=1, relief=SUNKEN, anchor=W, bg='white')
# status.pack(side=BOTTOM, fill=X)
#
# root.mainloop() # To make the window stay
##############Message Box
# import tkinter.messagebox
#
# root = Tk()
#
# tkinter.messagebox.showinfo('Window Title', 'Mondkey can live up to three hundred years')
# answer = tkinter.messagebox.askquestion('Question 1', 'Do you like silly faces?')
#
# if answer == 'yes':
# print(' ;-) ')
# elif answer == 'no':
# quit()
# else:
# quit()
#
# root.mainloop()
##############Shapes and Graphics
# root = Tk()
#
# canvas = Canvas(root, width=600, height=300)
# canvas.pack()
#
# blackLine = canvas.create_line(10,10,300,200, fill='black')
# redLine = canvas.create_line(10,290,300,200, fill='red')
# blueLine = canvas.create_line(590,10,300,200, fill='blue')
# greenBox = canvas.create_rectangle(60,30,400,180, fill='green')
#
# canvas.delete(greenBox, redLine, blueLine)
# canvas.delete(ALL)
#
# root.mainloop()
############ Images and Icons (Not working yet, need png
# root = Tk()
#
# photo = PhotoImage( file="add1.png" )
# label = Label( root, image=photo )
# label.pack( )
#
# root.mainloop()
from PyQt5.QtCore import *
from PyQt5.QtGui import *
import sys
class HelloWorld():
def __init__(self):
QDialog.__init__(self)
layout = QV | {
"repo_name": "areriff/pythonlearncanvas",
"path": "GUI/SecondGUIAttempt.py",
"copies": "1",
"size": "5515",
"license": "mit",
"hash": 7551459650361346000,
"line_mean": 27.7291666667,
"line_max": 101,
"alpha_frac": 0.6471441523,
"autogenerated": false,
"ratio": 2.9634605051047824,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4110604657404782,
"avg_score": null,
"num_lines": null
} |
# guests = ['Susan', 'Christopher', 'Bill', 'Satya']
# print(guests[-2])
# print(guests[1])
# guests[1] = 'Steve' # Replacing item from the list
# guests.append('Arif') # Append item to the list
# guests.remove('Satya')
# guests.remove(guests[-2])
# guests.append('Colin')
# del guests[0]
# print(guests[-1:-2])
# print(guests)
# print(guests.index('Arif'))
###############
# guests = ['Susan', 'Christopher', 'Bill', 'Satya']
# nbrValueList = len(guests)
# for steps in range(nbrValueList):
# print(guests[steps])
################
# guests = ['Susan', 'Christopher', 'Bill', 'Satya']
# guests.sort()
# currentGuest = ()
# for currentGuest in guests:
# print(currentGuest)
################
# guests = []
# name = ''
# while name != 'arif':
# name = input('Welcome guest\nEnter your name\n')
# print('')
# guests.append(name)
# guests.remove('arif')
# guests.sort()
# for currentGuest in guests:
# print(guests)
################
fileName = 'list.csv'
WRITE = 'w'
READ = 'r'
READWRITE = 'w+'
APPEND = 'a'
# myList = open(fileName, mode=WRITE)
# myList.write('arif effendi, 30\n')
# myList.write('syia, 30')
# myList.close()
# data = input('Please enter file info')
# file = open(fileName, mode=WRITE)
# file.write(data)
# file.close()
# How to read from file
# fileList = open('list.csv')
# allFileContents = fileList.read()
# print(allFileContents)
# firstLine = fileList.readline()
# print(firstLine)
# secondLine = fileList.readline()
# print(secondLine)
# Import CSV and display all data row by row and one value by value.
import csv
with open(fileName, mode=READ) as animalFile:
allRowsList = csv.reader(animalFile)
for currentRow in allRowsList:
print(','.join(currentRow)) # .join is use to combine the value and use , to display it
# print(currentRow) # This is hideous form.
print('This is individual values from the row above')
for currentWord in currentRow:
print(currentWord)
print('')
| {
"repo_name": "areriff/pythonlearncanvas",
"path": "Lists.py",
"copies": "1",
"size": "2028",
"license": "mit",
"hash": 7013777688532116000,
"line_mean": 26.04,
"line_max": 96,
"alpha_frac": 0.6252465483,
"autogenerated": false,
"ratio": 2.956268221574344,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4081514769874344,
"avg_score": null,
"num_lines": null
} |
__author__ = 'arifpz'
__all__ = ["summingFunction", "summingFunctionBackwards", "summingFunctionWithBias", "summingFunctionWithBiasBackward",
"addingValueBackwards", "addingValueBias", "addingValueWeight"]
def summingFunction(detailInputList, weightList):
"""
Basic summing function
Parameter:
:param detailInputList:
:param weightList:
Return:
:return:
"""
summing_function = 0.0
for index in range(0, len(detailInputList) - 1):
summing_function = summing_function + (weightList[index] * detailInputList[index])
return summing_function
def summingFunctionWithBias(detailInputList, weightList, bias):
"""
Basic summing function with bias
Parameter:
:param detailInputList:
:param weightList:
:param bias:
Return:
:return:
"""
summing_function = 0.0
for index in range(0, len(detailInputList) - 1):
summing_function = summing_function + (weightList[index] * detailInputList[index])
summing_function = summing_function + bias
return summing_function
def summingFunctionBackwards(detailInputList, weightList, addWeightList):
"""
Summing function backwards
Parameter:
:param detailInputList:
:param weightList:
:param addWeightList:
Return:
:return:
"""
# adding value
newWeightList = addingValueWeight(weightList, addWeightList)
summing_function = 0.0
for index in range(0, len(detailInputList) - 1):
summing_function = summing_function + (newWeightList[index] * detailInputList[index])
return summing_function, newWeightList
def summingFunctionWithBiasBackward(detailInputList, weightList, addWeightList, bias, tempBias):
"""
Summing function backwards with bias
Parameter:
:param detailInputList:
:param weightList:
:param addWeightList:
:param bias:
:param tempBias:
Return:
:return:
"""
#adding value
newBias, newWeightList = addingValueBackwards(weightList, addWeightList, bias, tempBias)
summing_function = 0.0
for index in range(0, len(detailInputList) - 1):
summing_function = summing_function + (newWeightList[index] * detailInputList[index])
summing_function = summing_function + newBias
return summing_function, newWeightList, newBias
def addingValueBackwards(weightList, addWeightList, bias, tempBias):
"""
Adding value backwards, for weight and bias
Parameter:
:param weightList:
:param addWeightList:
:param bias:
:param tempBias:
Return
:return:
"""
newBias = bias + tempBias
for index in range(0, len(weightList) - 1):
weightList[index] = (weightList[index] + addWeightList[index])
return newBias, weightList
def addingValueBias(bias, tempBias):
"""
Adding value backwards, only bias
Parameter
:param bias:
:param tempBias:
Return:
:return:
"""
return bias + tempBias
def addingValueWeight(weightList, addWeightList):
"""
Adding value backwards, only weight
Parameter:
:param weightList:
:param addWeightList:
Return:
:return:
"""
for index in range(0, len(weightList) - 1):
weightList[index] = weightList[index] + addWeightList[index]
return weightList | {
"repo_name": "AiLaboratoryTelU/simple-artificial-neural-network",
"path": "simple_ann/module/forewardFunction.py",
"copies": "1",
"size": "3290",
"license": "apache-2.0",
"hash": 1167965765504542000,
"line_mean": 26.425,
"line_max": 119,
"alpha_frac": 0.6820668693,
"autogenerated": false,
"ratio": 3.7133182844243793,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.489538515372438,
"avg_score": null,
"num_lines": null
} |
__author__ = 'arifpz'
import configparser as cp
import json as js
import os
import simple_ga.dataModel.inputDataModel as id
# import simple_ann.dataModel.trainingResultModel as tr
# import simple_ann.dataModel.testingDataModel as td
__all__ = ['similarList', 'readFromJson', 'readFromJsonAsObject', 'writeToJsonFromObject'
'writeToJsonFromString', 'pathGenerator', 'readConfigFileAll', 'readConfigFileSection',
'readConfigFileSectionOption', 'isFileExist']
def similarList(a, b):
"""
Method to detect similar list
Parameter:
:param a:
:param b:
Return:
:return:
"""
result = False
c = []
for x in a:
if x not in b:
c.append(x)
if (len(c) == 0):
result = True
return result
def readFromJson(pathData):
"""
Method to read value inside of json file
Parameter:
:param pathData:
Return:
:return:
"""
with open(pathData) as path:
data = js.load(path)
return data
def readFromJsonAsObject(pathData, objectHook):
"""
Method to read value inside of json file to python object
Parameter:
:param pathData:
:param objectHook:
Return:
:return data:
"""
with open(pathData) as path:
data = js.load(path, object_hook=objectHook)
return data
def writeToJsonFromString(data, targetFile, mode):
"""
Method to write data to json file
Parameter:
:param data:
:param targetFile:
:param mode, 'w'(writing, truncating the file first); 'a'(appending to the end of the file if file exist):
Return:
:return:
"""
# with open(targetFile, 'w') as resultPath:
# js.dump(data, resultPath, indent=4)
with open(targetFile, mode) as resultPath:
js.dump(data, resultPath, indent=1)
def writeToJsonFromObject(modelObject, targetFile, mode):
"""
Write to json from object
:param modelObject:
:param targetFile:
:param mode, 'w'(writing, truncating the file first); 'a'(appending to the end of the file if file exist):
:return:
"""
with open(targetFile, mode) as resultPath:
js.dump(modelObject.__dict__, resultPath, indent=1)
def pathGenerator(currentFolderName, folderNameTarget, fileName):
"""
Generate path for any file in this project
:param currentFolderName:
:param folderNameTarget:
:param fileName:
:return:
"""
pathProject, none = os.getcwd().split("/" + currentFolderName)
pathTrainingResultSample = os.path.join(pathProject, folderNameTarget, fileName)
return pathTrainingResultSample
def readConfigFileAll(pathIni):
"""
Read all value of configuration file target
:param pathIni:
:return:
"""
parser = cp.ConfigParser()
parser.read(pathIni)
return parser
def readConfigFileSection(pathIni, sectionName):
"""
Read per section value of configuration file target
:param pathIni:
:param sectionName:
:return:
"""
parser = cp.ConfigParser()
parser.read(pathIni)
return parser.items(sectionName)
def readConfigFileSectionOption(pathIni, sectionName, OptionName):
"""
Read per section-option value of configuration file target
:param pathIni:
:param sectionName:
:param OptionName:
:return:
"""
parser = cp.ConfigParser()
parser.read(pathIni)
result = parser.get(sectionName, OptionName)
if (",") in result:
return result.replace(' ','').split(',')
return result
def isFileExist(pathFile):
return os.path.exists(pathFile)
| {
"repo_name": "AiLaboratoryTelU/simple-genetic-algorithm",
"path": "simple_ga/utility/utility.py",
"copies": "1",
"size": "3578",
"license": "apache-2.0",
"hash": 515974609600689400,
"line_mean": 23.5068493151,
"line_max": 110,
"alpha_frac": 0.6570709894,
"autogenerated": false,
"ratio": 3.7426778242677825,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48997488136677825,
"avg_score": null,
"num_lines": null
} |
__author__ = 'arifpz'
# import inspect as ins
import simple_ann.utility.utility as util
import random as rn
__all__ = ["generateZeroList", "generateRandomList", "automaticGeneratorList"]
#TODO implement it, replace old one
def generateZeroList(n):
"""
Method to generate list of zero value
Parameter:
:param n:
Return:
:return:
"""
listOfZeros = [0]*n
return listOfZeros
#TODO make it flexible in rangeStart and rangeEnd
# def generateRandomList(n, rangeStart, rangeEnd):
def generateRandomList(n):
"""
Method to generate random number of list
Parameter:
:param n:
Return:
:return:
"""
listOfRandom = [rn.uniform(0.0, 1.0)]*n
return listOfRandom
def automaticGeneratorList(n, typeList):
"""
Method to generate either zero or random initiation list
:param n:
:param typeList:
:return:
"""
if (typeList == "zero"):
return generateZeroList(n)
elif (typeList == "random"):
return generateRandomList(n)
return
def initiationValueGeneratorList(n):
"""
:param n:
:return:
"""
# must be initialize manually
listMethod = [generateZeroList(n), generateRandomList(n)]
return listMethod
def methodMapper(weightList, methodList):
"""
Mapper
:param n:
:return:
"""
dictList = dict(zip(weightList, methodList))
return dictList
#TODO refactor static value, move to config.ini
def automaticGenerator(n, typeList):
"""
Method to generate either zero or random initiation list
:param n:
:param typeList:
:return:
"""
pathIni = util.pathGenerator("simple_ann/module", "configurations", "configTemplate.ini")
sectionName = 'inputDataValue'
optionName = "weight"
weightList = util.readConfigFileSectionOption(pathIni, sectionName, optionName)
methodList = initiationValueGeneratorList(n)
result = methodMapper(weightList, methodList)[typeList]
return result
| {
"repo_name": "AiLaboratoryTelU/simple-artificial-neural-network",
"path": "simple_ann/module/initiationValueGenerator.py",
"copies": "1",
"size": "1983",
"license": "apache-2.0",
"hash": -7077314698566666000,
"line_mean": 22.0581395349,
"line_max": 93,
"alpha_frac": 0.6691880988,
"autogenerated": false,
"ratio": 3.76280834914611,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.493199644794611,
"avg_score": null,
"num_lines": null
} |
__author__ = 'arifpz'
import inspect as ins
__all__ = ["hardLimit"]
# class activationFunction(object):
#
# # Hard limit function
# def hardLimit(self, summing_function, threshold):
# if (summing_function >= threshold):
# return 1
# return 0
#
# #TODO Linear function
# def linear(self):
# pass
#
# #TODO Exponential function
# def exponential(self):
# pass
#
# #TODO Sigmoid function
# def sigmoid(self):
# pass
#
# def activationFunctionResult(function):
# methodList = activationFunction()
# listMethod = [item[0] for item in ins.getmembers(methodList, predicate=ins.ismethod)]
#
# try:
# for value in listMethod:
# if value == function:
# return methodList.hardLimit(summing_function=0, threshold=0)
# elif value == function:
# return methodList.linear()
# elif value == function:
# return methodList.exponential()
# elif value == function:
# return methodList.sigmoid()
# except (ArithmeticError, ValueError):
# print("error")
# finally:
# pass
#
#
# if __name__=="__main__":
# print(activationFunctionResult())
# Hard limit function
def hardLimit(summing_function, threshold):
if (summing_function >= threshold):
return 1
return 0
#TODO Linear function
def linear():
pass
#TODO Exponential function
def exponential():
pass
#TODO Sigmoid function
def sigmoid():
pass | {
"repo_name": "AiLaboratoryTelU/simple-artificial-neural-network",
"path": "simple_ann/module/activationFunction.py",
"copies": "1",
"size": "1555",
"license": "apache-2.0",
"hash": -6979651877092921000,
"line_mean": 22.223880597,
"line_max": 91,
"alpha_frac": 0.5890675241,
"autogenerated": false,
"ratio": 3.7742718446601944,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4863339368760194,
"avg_score": null,
"num_lines": null
} |
__author__ = 'arifpz'
#TODO implement use scipy/numpy so that matrix can be calculated more effective
# import scipy as sc
# import numpy as np
import simple_ann.dataModel.inputDataModel as id
import simple_ann.module.activationFunction as af
import simple_ann.module.backwardFunction as bf
import simple_ann.module.forewardFunction as ff
import simple_ann.utility.utility as util
import simple_ann.utility.utilityDataModel as utilModel
import simple_ann.module.initiationValueGenerator as init
import simple_ann.dataModel.trainingResultModel as tr
__all__ = ['training']
class training(object):
"""
Class for training phase
"""
pathData = None
inputData = None
trainingResult = None
optimizeEpoch = None
def __init__(self, pathInput, pathTrainingResult):
"""
Parameter:
:param path:
"""
self.pathData = pathInput
self.pathResult = pathTrainingResult
self.trainingResult = self.train(self.pathData, self.pathResult)
def train(self, pathData, pathResult):
"""
Parameter:
:param pathData:
Return:
:return:
"""
# ---Initiation Variable From File---
# Read JSON data for initiation variable
mode = 'w'
self.inputData = util.readFromJsonAsObject(pathData, id.as_inputData)
epoch, bias, learningRate, threshold, activationFunction, nInput, weightType, inputStructure, nRows, nColumns, \
dataDetail = utilModel.initiationInputVariables(self.inputData)
# ---Initiation Variable From File---
# ---List Processing---
weightData = init.automaticGeneratorList(nRows*nColumns, weightType)
addWeightData = init.generateZeroList(nRows*nColumns)
# ---List Processing---
# ---ANN Algorithm---
# Loop each layer
# 1. Summing Function
# 2. Activation Function
# 3. Validation T-Y
# 4. Adding New Value
# Loop each layer
tempBias = 0.0
summing_function = 0.0
similarWeightCount = 0
weightDataPrev = []
trainingResult = []
# Loop per epoch
for iteration in range(epoch):
print(iteration)
weightDataPrev = weightData[:]
# Loop per input Data
for detailData in dataDetail:
# ---Forwards---
# Summing Function
target = detailData.target
result = 0
value = detailData.input
summing_function, weightData, bias = ff.summingFunctionWithBiasBackward(value, weightData,
addWeightData, bias, tempBias)
# ---Activation Function---
result = af.hardLimit(summing_function, threshold)
# ---Activation Function---
# ---Forwards---
# ---Backwards---
# Calculate Adding Weight and Bias
tempBias, addWeightData = bf.updateBackward(learningRate, target, result, value, nRows, nColumns)
print(weightData)
summing_function = 0.0
# ---Backwards---
# ---Automate Testing End of Learning---
# endOfLearning, similarWeightCount = util.endOfLearning(weightData, weightDataPrev, similarWeightCount)
# if endOfLearning:
# trainingResult = weightData
# self.optimizeEpoch = iteration
# break
# ---Automate Testing End of Learning---
trainingResult = weightData
#old
# self.trainingResult = trainingResult
#new
self.trainingResult = tr.trainingResult()
self.trainingResult.pathInputData = pathData
self.trainingResult.bias = bias
self.trainingResult.trainingResult = trainingResult
# ---Write training result in file---
util.writeToJsonFromObject(self.trainingResult, pathResult, mode)
# ---Write training result in file---
print("Done")
return self.trainingResult
# ---ANN Algorithm---
# if __name__ == '__main__':
# sourceData = '/Users/arifpz/Documents/Code/GithubProjects/AiLabTel_U/simple-artificial-neural-network/sample_data/inputData_sample.json'
# trainingResultDataPath = '/Users/arifpz/Documents/Code/GithubProjects/AiLabTel_U/simple-artificial-neural-network/sample_data/trainingResult_sample.json'
#
# train = training(sourceData, trainingResultDataPath)
# print("Source Data: ",sourceData)
# print("Training Result Path: ",trainingResultDataPath)
| {
"repo_name": "AiLaboratoryTelU/simple-artificial-neural-network",
"path": "simple_ann/module/moduleTraining.py",
"copies": "1",
"size": "4686",
"license": "apache-2.0",
"hash": 1418514790147582700,
"line_mean": 32.9565217391,
"line_max": 159,
"alpha_frac": 0.6135296628,
"autogenerated": false,
"ratio": 4.35906976744186,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.547259943024186,
"avg_score": null,
"num_lines": null
} |
__author__ = 'arifpz'
#TODO place where utility function which has dependencies with data model
import os
import simple_ga.dataModel.inputDataModel as id
import simple_ga.utility.utility as util
#TODO refactor method dataTestingResultDumpGenerator, dataTestingDataDumpGenerator, dataTestingResultDumpGenerator, dataInputDumpGenerator and dataTrainingResultDumpGenerator --> make only one method
def dataInputDumpGenerator(currentFolderName, folderNameTarget, fileName):
"""
Read data input data sample from sample_data folder
:return:
"""
pathProject, none = os.getcwd().split("/"+currentFolderName)
pathInputDataSample = os.path.join(pathProject, folderNameTarget, fileName)
inputData = util.readFromJsonAsObject(pathInputDataSample, id.as_inputData)
return inputData
#TODO make dictionary output
def initiationInputVariables(inputData):
"""
Initiation variables input from file
:param inputData:
:return epoch, bias, learningRate, threshold, activationFunction, nInput, weight, inputStructure, nRows,
nColumn, dataDetail:
"""
# epoch = inputData.epoch
# bias = inputData.bias
# learningRate = inputData.learningRate
# threshold = inputData.threshold
# activationFunction = inputData.activationFunction
# nInput = inputData.nInput
# weight = inputData.weight
# inputStructure = inputData.inputStructure
# nRows = inputStructure.nRows
# nColumns = inputStructure.nColumns
# dataDetail = inputData.dataDetail
# return epoch, bias, learningRate, threshold, activationFunction, nInput, weight, inputStructure, nRows, \
# nColumns, dataDetail
nVariable = inputData.nVariable
nGenerationVariable = inputData.nGenerationVariable
populationSize = inputData.populationSize
generationSize = inputData.generationSize
crossoverRate = inputData.crossoverRate
mutationRate = inputData.mutationRate
smallValue = inputData.smallValue
maxVal = inputData.maxVal
minVal = inputData.minVal
representation = inputData.representation
return nVariable, nGenerationVariable, populationSize, generationSize, crossoverRate, mutationRate, smallValue,\
maxVal, minVal, representation | {
"repo_name": "AiLaboratoryTelU/simple-genetic-algorithm",
"path": "simple_ga/utility/utilityDataModel.py",
"copies": "1",
"size": "2231",
"license": "apache-2.0",
"hash": 2626210047720312300,
"line_mean": 38.8571428571,
"line_max": 199,
"alpha_frac": 0.7624383684,
"autogenerated": false,
"ratio": 4.273946360153257,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5536384728553256,
"avg_score": null,
"num_lines": null
} |
__author__ = 'arimorcos'
import praw
import sys
import os
def getMostRecentComment(userName, redditObject):
userObject = redditObject.get_redditor(userName)
userComments = userObject.get_comments()
comment = None
for comment in userComments:
pass
return comment
def loadLastComment(saveFile):
# open file
file_object = open(saveFile, 'r')
# read in whole file and return
lastCommentDate = file_object.read()
# close file
file_object.close()
# return
return lastCommentDate
def saveLastComment(saveFile, lastCommentTime):
# open file
file_object = open(saveFile, 'w')
# read in whole file and return
file_object.write(str(lastCommentTime))
# close
file_object.close()
def checkComment(userToCheck, userToNotify):
# initialize reddit object
redditObject = praw.Reddit(user_agent="findUserComments")
# get most recent user comment
lastComment = getMostRecentComment(userToCheck, redditObject)
# get home directory
homePath = os.path.expanduser('~')
# construct file name
fileName = os.path.join(homePath, userToCheck) + '.txt'
if os.path.isfile(fileName):
# load last saved comment date
lastCommentDate = loadLastComment(fileName)
# compare to current last comment date
if lastCommentDate != lastComment.created:
# send reddit message
redditObject.set_oauth_app_info(client_id='6z02joDBya9uHA',
client_secret='VW_eorxVV9QBuxjeQmYxdRU0tC8',
redirect_uri='http://127.0.0.1:65010/authorize_callback')
access_information = redditObject.get_access_information('pOg4o0YTvTcnwsFXEowb10NZM40')
redditObject.set_access_credentials(**access_information)
redditObject.send_message(userToNotify, 'New comment found for ' + userToCheck,
'The following comment has been found for user: ' + userToCheck + '\n\n' +
lastComment.body)
print('sending message')
# save new date
saveLastComment(fileName, lastComment.created)
else:
# save new date
saveLastComment(fileName, lastComment.created)
# send reddit message
redditObject.set_oauth_app_info(client_id='6z02joDBya9uHA',
client_secret='VW_eorxVV9QBuxjeQmYxdRU0tC8',
redirect_uri='http://127.0.0.1:65010/authorize_callback')
access_information = redditObject.get_access_information('pOg4o0YTvTcnwsFXEowb10NZM40')
redditObject.set_access_credentials(**access_information)
redditObject.send_message(userToNotify, 'New comment found for ' + userToCheck,
'The following comment has been found for user: ' + userToCheck + '\n\n' +
lastComment.body)
print('sending message')
if __name__ == "__main__":
# handle argument
userToCheck = sys.argv[1]
checkComment(userToCheck) | {
"repo_name": "arimorcos/blog_analyses",
"path": "reddit/redUserComment.py",
"copies": "1",
"size": "3169",
"license": "mit",
"hash": 819845902646538900,
"line_mean": 29.7766990291,
"line_max": 112,
"alpha_frac": 0.6213316504,
"autogenerated": false,
"ratio": 3.9171817058096416,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0013414093248245303,
"num_lines": 103
} |
__author__ = 'arimorcos'
from flask import render_template
from initialize import flatpages
from settings import POST_DIR
def renderPostList(postList, pageNum=False, tagName=False):
# Number of posts per page
nPostsPerPage = 5
nRecent = 5
# get recent posts
allPosts = getPostList()
recentPosts = allPosts[:nRecent]
if pageNum:
# slice to the correct post subset
startPost = (int(pageNum) - 1) * nPostsPerPage
stopPost = max(len(postList) - 1, int(pageNum) * nPostsPerPage)
postSubset = postList[startPost:stopPost]
# generate booleans for whether there should be a new or old posts button
if len(postList) > nPostsPerPage and nPostsPerPage*int(pageNum) <= len(postList):
shouldOlder = True
if tagName:
olderPage = '/blog/tag/' + tagName + '/page' + str(int(pageNum)+1)
else:
olderPage = '/blog/page' + str(int(pageNum)+1)
else:
shouldOlder = False
olderPage = False
if int(pageNum) > 1:
shouldNewer = True
if tagName:
newerPage = '/blog/tag/' + tagName + '/page' + str(int(pageNum)-1)
else:
newerPage = '/blog/page' + str(int(pageNum)-1)
else:
shouldNewer = False
newerPage = False
else:
shouldNewer = False
shouldOlder = False
newerPage = False
olderPage = False
postSubset = postList
tagFreq = getTagFrequency(True)
for tag in tagFreq:
tagFreq[tag] = 'tag{0}'.format(tagFreq[tag])
for num, p in enumerate(postSubset):
postSubset[num].tagList = [convertToCamelCase(tag) for tag in p.meta['tags'].replace(', ',',').split(',')]
# return the posts rendered by the posts.html template
return render_template('posts.html', posts=postSubset, olderPage=olderPage,
newerPage=newerPage, shouldNewer=shouldNewer, shouldOlder=shouldOlder,
recentPosts=recentPosts, tagFreq=tagFreq)
def getPostList():
# get list of all pages which come from the post directory within the flatpages root
postList = [p for p in flatpages if p.path.startswith(POST_DIR)]
# sort each post according to the date
postList.sort(key=lambda item:item['date'], reverse=True)
# return
return postList
def findTagMatch(tagName):
"""
Finds posts which match a given tag, case-insensitive
:param tagName: string containing tag to match
:return: postList
"""
postList = getPostList()
matchTag = []
for p in postList:
if any(convertToCamelCase(tag) == tagName for tag in p['tags'].replace(', ', ',').split(',')):
matchTag.append(p)
return matchTag
def getTagFrequency(normalizeRange):
"""
Finds the frequency of each tag
normalizeRange: normalize the range of the tag frequencies
:return: dict with key as tag and value being number of occurences
"""
postList = getPostList()
tagList = {}
for p in postList:
for tag in p['tags'].replace(', ', ',').split(','):
tag = convertToCamelCase(tag)
if tag in tagList:
tagList[tag] += 1
else:
tagList[tag] = 1
if normalizeRange:
maxFreq = float(max(list(tagList[i] for i in tagList)))
for tag in tagList:
tagList[tag] = int(round(maxFreq*(float(tagList[tag])/maxFreq)))
return tagList
def convertToCamelCase(input):
"""
Converts a string into camel case
:param input: string to be converted
:return: camelCase version of string
"""
# split input
splitWords = input.split(' ')
# check if only one word, and if so, just return the input as is
if len(splitWords) == 1:
return input
# convert words after first word to title
titleCaseWords = [word.title() for word in splitWords[1:]]
# combine all words together
output = ''.join([splitWords[0]] + titleCaseWords)
return output | {
"repo_name": "arimorcos/arimorcos.github.io",
"path": "helperFunctions.py",
"copies": "1",
"size": "4109",
"license": "mit",
"hash": 4152136772194867000,
"line_mean": 28.7826086957,
"line_max": 114,
"alpha_frac": 0.6108542224,
"autogenerated": false,
"ratio": 3.8691148775894537,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49799690999894536,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Ari Morcos'
from requests import HTTPError
import praw
from redditDB import RedditDB
import datetime
import time
import itertools
import sys
import os
def createDataset(r, subreddits, startDate=(datetime.datetime.now()-datetime.timedelta(days=7)).strftime('%y%m%d%H%M%S'),
endDate=datetime.datetime.now().strftime('%y%m%d%H%M%S'), nCommentsPerSubmission=100, dbName='reddit',
fineScale=12, nPostsPerFineScale=200):
"""
:param r: reddit object
:param subreddits: list of subreddits to grab
:param startDate: start date in format yymmddHHMMSS
:param endDate: end date in format yymmddHHMMSS
:param nCommentsPerSubmission: number of comments to grab per submission. Default is 100.
:param dbName: base of database name
:param fineScale: scale of database in hours
:param nPostsPerFineScale: number of posts per fine scale
:return:
"""
# initialize database
dbObj = RedditDB(dbName=dbName)
# loop through each subreddit
for sub in subreddits:
print 'Processing subreddit: ' + sub.title.encode('utf-8')
# get submissions within the date range
matchingPosts = getAllPostsWithinRangeFineScale(sub, startDate=startDate, endDate=endDate, fineScale=fineScale,
nPostsPer=nPostsPerFineScale)
# loop through each post and get top comments
for post in matchingPosts:
print 'Processing post: ' + post.title.encode('utf-8')
# save post
dbObj.saveSubmission(post)
# get comments
numTries = 0
gotComments = False
while not gotComments and numTries < 10:
try:
comments = getCommentsFromSubmission(post, nCommentsPerSubmission)
gotComments = True
except HTTPError:
time.sleep(2)
numTries += 1
# save comment data for comments which have not been deleted
# print [com.author.name for com in comments if isinstance(com, praw.objects.Comment)]
[dbObj.saveCommentData(com) for com in comments if isinstance(com, praw.objects.Comment)
and com.author is not None]
dbObj.closeConnection()
print ('\nData collection complete!')
def getSubreddits(r, subredditNames):
"""
:param r: reddit object
:param subredditNames: list of subreddit names to retrieve
:return: generator object of subreddit objects
"""
for sub in subredditNames:
yield r.get_subreddit(sub.lower())
def getRecentSubmissions(subreddit, dateRange):
try:
# perform an empty search to get all submissions within date range
searchResult = subreddit.search('', period=dateRange, limit=None)
except HTTPError:
time.sleep(2)
searchResult = getRecentSubmissions(subreddit, dateRange)
# return search result
return searchResult
def getCommentsFromSubmission(submission, nCommentsPerSubmission):
# get comment list
flatComments = praw.helpers.flatten_tree(submission.comments)
# filter list and return
return flatComments[:nCommentsPerSubmission]
def getAllPostsWithinRangeFineScale(subreddit, startDate, endDate, fineScale=12, nPostsPer=1000):
"""
Grabs posts using fine scale to grab maximum number
:param fineScale: scale in hours. Default is 12.
:param subreddit: subreddit object
:param startDate: start date in format yymmdd
:param endDate: end date in format yymmdd
:param nPostsPer: number of posts per unit
:return:
"""
# create datetime object for each date
startDateObject = datetime.datetime.strptime(startDate, "%y%m%d%H%M%S")
endDateObject = datetime.datetime.strptime(endDate, "%y%m%d%H%M%S")
# get posts
posts = []
tempStart = startDateObject
while True:
# get temporary end date
tempEnd = tempStart + datetime.timedelta(hours=fineScale)
# check if tempEnd is after than endDateObject
if (tempEnd - endDateObject) > datetime.timedelta(0, 0, 0):
# set tempEnd to be endDateObject
tempEnd = endDateObject
# break if start is after end
if (tempStart - tempEnd) > datetime.timedelta(0, 0, 0):
break
# convert to strings
tempStartStr = tempStart.strftime('%y%m%d%H%M%S')
tempEndStr = tempEnd.strftime('%y%m%d%H%M%S')
# get posts within range
tempPosts = getPostsWithinRange(subreddit, tempStartStr, tempEndStr, nPosts=nPostsPer)
# combine with posts
posts = itertools.chain(posts, tempPosts)
# iterate on start date
tempStart = tempEnd + datetime.timedelta(seconds=1)
# return
return posts
def getPostsWithinRange(subreddit, startDate, endDate, nPosts=1000):
"""
:param subreddit: subreddit object
:param startDate: start date in format yymmddHHMMSS
:param endDate: end date in format yymmddHHMMSS
:return: generator object of posts
"""
# convert dates to unix time format
startDate = time.mktime(datetime.datetime.strptime(startDate, "%y%m%d%H%M%S").timetuple())
endDate = time.mktime(datetime.datetime.strptime(endDate, "%y%m%d%H%M%S").timetuple())
# generate timestamp search term
searchTerm = 'timestamp:' + str(startDate)[:-2] + '..' + str(endDate)[:-2]
# get posts
try:
posts = subreddit.search(searchTerm, sort='top', syntax='cloudsearch', limit=nPosts)
except HTTPError:
time.sleep(2)
posts = getPostsWithinRange(subreddit, startDate, endDate, nPosts=nPosts)
return posts
if __name__ == "__main__":
# handle arguments
startDate = sys.argv[1]
endDate = sys.argv[2]
dbName = sys.argv[3]
fineScale = int(sys.argv[4])
# initialize reddit object
r = praw.Reddit(user_agent='get_dataset')
subreddits = r.get_popular_subreddits(limit=200)
createDataset(r, subreddits, startDate=startDate, endDate=endDate, dbName=dbName, fineScale=fineScale)
| {
"repo_name": "arimorcos/getRedditDataset",
"path": "redditDataset.py",
"copies": "1",
"size": "6131",
"license": "mit",
"hash": 3926404454779496400,
"line_mean": 31.9623655914,
"line_max": 121,
"alpha_frac": 0.6599249715,
"autogenerated": false,
"ratio": 3.803349875930521,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49632748474305205,
"avg_score": null,
"num_lines": null
} |
__author__ = 'arimorcos'
import datetime
import sys
sys.path.extend(['D:\\Documents\\GitHub\\getRedditDataset', 'D:\\Documents\\GitHub\\reddit_analyses'])
from redditDataset import *
if __name__ == '__main__':
shouldOneHour = False
# handle arguments
startDate = sys.argv[1]
endDate = sys.argv[2]
dbName = sys.argv[3]
fineScale = int(sys.argv[4])
shouldOneHour = bool(sys.argv[5])
offset = int(sys.argv[6])
newDefaults = ['art', 'askscience', 'blog', 'creepy', 'dataisbeautiful', 'DIY', 'Documentaries', 'fitness',
'food', 'futurology', 'gadgets', 'getmotivated', 'history', 'internetisbeautiful', 'jokes',
'lifeprotips', 'listentothis', 'mildlyinteresting', 'nosleep', 'nottheonion', 'oldschoolcool',
'personalfinance', 'philosophy', 'photoshopbattles', 'showerthoughts', 'space', 'sports', 'tifu',
'twoxchromosomes', 'upliftingnews', 'writingprompts']
r = praw.Reddit(user_agent='grab_defaults')
sub = list(getSubreddits(r, newDefaults))
if shouldOneHour:
hour = 9
# get number of days between dates
startDay = datetime.date(2000 + int(startDate[0:2]), int(startDate[2:4]), int(startDate[4:6]))
endDay = datetime.date(2000 + int(endDate[0:2]), int(endDate[2:4]), int(endDate[4:6]))
nDays = (endDay - startDay).days
# loop through each day and create dataset
for day in range(offset,nDays):
# get temp start and stop
tempStartObj = datetime.datetime(2000 + int(startDate[0:2]), int(startDate[2:4]), int(startDate[4:6]),
hour, int(startDate[8:10]), int(startDate[10:12]))
tempStartObj = tempStartObj + datetime.timedelta(days=day)
tempStopObj = tempStartObj + datetime.timedelta(hours=2)
# convert to strings
tempStartStr = tempStartObj.strftime('%y%m%d%H%M%S')
tempStopStr = tempStopObj.strftime('%y%m%d%H%M%S')
# create dataset
createDataset(r, sub, startDate=tempStartStr, endDate=tempStopStr, dbName=dbName + '_day_' + str(day).zfill(3),
fineScale=fineScale)
else:
createDataset(r, sub, startDate=startDate, endDate=endDate, dbName=dbName, fineScale=fineScale)
| {
"repo_name": "arimorcos/blog_analyses",
"path": "reddit/grabNewDefaults.py",
"copies": "1",
"size": "2345",
"license": "mit",
"hash": -2473690510332775000,
"line_mean": 44.9803921569,
"line_max": 123,
"alpha_frac": 0.6102345416,
"autogenerated": false,
"ratio": 3.35,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44602345416,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Ari Morcos'
import os
import datetime
import sqlite3
import re
import shutil
import time
class RedditDB:
"""
Class for interfacing with a database for reddit data sets
"""
def __init__(self, dbName='reddit', dbPath=None):
self.__dbName = dbName
self.__dbPath = dbPath
self.__c = None # initialized in initialize database
self.__initializeDatabase()
def __getDatabasePath(self):
"""
:return: full absolute database path
"""
if self.__dbPath is None:
userPath = os.path.expanduser('~')
basePath = os.path.abspath(os.path.join(userPath, 'Databases'))
else:
basePath = self.__dbPath
databasePath = os.path.abspath(os.path.join(basePath, self.__dbName + '.db'))
# make directory if it doesn't exist
if not os.path.exists(basePath):
os.makedirs(basePath)
return databasePath
def __initializeDatabase(self):
"""
Initializes a database connection called 'reddit.db'
:return: cursor object
"""
dbPath = self.__getDatabasePath()
self.__dbObj = sqlite3.connect(dbPath)
self.__c = self.__dbObj.cursor()
# get list of tables
tableList = self.__c.execute("Select name from sqlite_master where type = 'table' ")
# check if comments exist in tableList
commentsPresent = any(['comments' == item[0] for item in [row for row in list(tableList)]])
if not commentsPresent:
self.__createTables()
def __createTables(self):
# create comments table
self.__c.execute('Create TABLE comments (date, user, body, comScore, postID)')
# create submissions table
self.__c.execute('Create TABLE submissions (postID, postTitle, postBody, postScore, postDate, '
'subredditName, subredditID)')
def saveCommentData(self, comment):
"""
:param comment: comment object
:return: void
"""
# extract relevant fields
commentDate = datetime.datetime.fromtimestamp(comment.created_utc)
commentDateStr = commentDate.strftime('%Y%m%d%H%M%S')
userName = comment.author.name
body = comment.body
submissionID = comment._submission.name
score = comment.score
# save data
self.__c.execute('Insert into comments VALUES (?, ?, ?, ?, ?)', [commentDateStr, userName, body, score, submissionID])
self.__c.connection.commit()
def saveSubmission(self, post):
"""
:param post: post object
:return: void
"""
# extract relevant fields
submissionID = post.name
submissionTitle = post.title
submissionDate = datetime.datetime.fromtimestamp(post.created_utc)
submissionDateStr = submissionDate.strftime('%Y%m%d%H%M%S')
subredditID = post.subreddit.name
subredditName = post.subreddit.display_name
score = post.score
if post.is_self:
body = post.selftext
else:
body = post.url
# save data
self.__c.execute('Insert into submissions VALUES (?, ?, ?, ?, ?, ?, ?)', [submissionID, submissionTitle, body,
score, submissionDateStr,
subredditName, subredditID])
self.__c.connection.commit()
def getSubreddits(self):
""" Extracts a list of distinct subreddits """
# execute query
self.__c.execute('select distinct subredditName '
'from submissions '
'group by subredditName '
'order by count(*) desc')
# grab results
rawOut = self.__c.fetchall()
return [item[0] for item in rawOut]
def getSubredditCommentText(self, subreddit):
""" Grabs all comment text and concatenates from a given subreddit """
# execute query
self.__c.execute("select body "
"from comments "
"where postID in "
" (select postID "
" from submissions "
" where subredditName = ?)", [subreddit])
# get comments
rawComments = self.__c.fetchall()
return [item[0] for item in rawComments]
def closeConnection(self):
self.__dbObj.close()
def mergeDBs(path, dbName='mergedDB'):
"""
Merges multiple databases into one large database
:param path: path to folder containing databases. Will merge all of these databases
:param dbName: Name of the merged database. Default is mergedDB.
"""
# get list of database objects in path
allFiles = os.listdir(path)
# get db files
dbFiles = [dbFile for dbFile in allFiles if re.match(r'.*\.db', dbFile) is not None]
# get path of first file and new database object
source = os.path.abspath(os.path.join(path, dbFiles[0]))
destination = os.path.abspath(os.path.join(path, dbName + '.db'))
# check if destination file exists
if os.path.isfile(destination):
userInput = raw_input('Destination file already exists. Continue (y/n): ')
if userInput.lower() == 'n':
print('Ending merge.')
return
elif userInput.lower() != 'y':
print 'Cannot process input. Ending merge.'
return
# copy file
shutil.copyfile(source, destination)
# create sql object
dbObj = sqlite3.connect(destination)
c = dbObj.cursor()
# loop through each database, attach, and merge
for dbFile in dbFiles[1:]:
# create query
sqlQuery = "attach '" + os.path.abspath(os.path.join(path, dbFile)) + """' as toMerge;
INSERT into comments select * from toMerge.comments;
INSERT into submissions select * from toMerge.submissions;
detach toMerge;"""
# execute and commit
c.executescript(sqlQuery)
dbObj.commit()
print 'Merge complete!'
| {
"repo_name": "arimorcos/getRedditDataset",
"path": "redditDB.py",
"copies": "1",
"size": "6243",
"license": "mit",
"hash": -1921626256419292000,
"line_mean": 31.3471502591,
"line_max": 126,
"alpha_frac": 0.5731218965,
"autogenerated": false,
"ratio": 4.399577167019028,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5472699063519028,
"avg_score": null,
"num_lines": null
} |
__author__ = 'arimorcos'
import re
import urllib2
import enchant
from textstat.textstat import textstat
def countWords(commentList):
"""
:param commentList: list of text
:return: word count
"""
# get count in each string
commentCount = [len(commentStr.split()) for commentStr in commentList]
return sum(commentCount)
def countMisspellings(commentList):
"""
Count the number of misspelled words
:param commentList:
:return: the count of misspelled words
"""
# create dictionary object
dictionary = enchant.Dict('en-US')
# count misspelled words
misSpelledCount = 0
for comment in commentList:
misSpelledCount += sum([not dictionary.check(word) for word in comment.split()])
return misSpelledCount
def getReadabilityStats(text):
# get scores
fleschGrade = textstat.flesch_kincaid_grade(text)
# store
return {'fleschGrade': fleschGrade}
def getCelebList():
"""
Grabs celebrity list from people.com
:return: list of celebrities
"""
response = urllib2.urlopen('http://www.people.com/people/celebrities/')
html = response.read()
pattern = r"(?<=id\=\"\d{8}\"\>)\w+\s\w+"
celebList = re.findall(pattern, html)
return celebList
def findCelebrityMentions(commentList, celebList):
"""
Find number of mentions of a given celebrity
:param commentList: list of comments
:param celebList: list of celebrities
:return: number of mentions of a given celebrity (only exact spelling)
"""
# join all comments
allComments = " ".join(commentList)
# convert to lower case
allComments = allComments.lower()
# find counts of each celebrity
celebCount = {}
for celeb in celebList:
celebCount[celeb] = len(re.findall(celeb.lower(), allComments))
return celebCount
| {
"repo_name": "arimorcos/blog_analyses",
"path": "reddit/celebReddit.py",
"copies": "1",
"size": "1860",
"license": "mit",
"hash": 2472788959114090500,
"line_mean": 21.4096385542,
"line_max": 88,
"alpha_frac": 0.6731182796,
"autogenerated": false,
"ratio": 3.4766355140186915,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.958883596500085,
"avg_score": 0.012183565723568431,
"num_lines": 83
} |
__author__ = 'ariyatan'
from verb_tenses import verb_form
def verb_input():
v_input = raw_input('Enter a verb> \n')
return v_input
def tense_input():
tense = raw_input('Enter the required tense > \n')
return tense
def person_input():
person = raw_input('Enter the person > \n')
return person
def tense_choice(tense, case):
if tense in ['present simple', 'ps']:
return verb_form.present_simple(case)
elif tense in ['gerund', 'g']:
return verb_form.gerund(case)
elif tense in ['participle', 'ppl']:
return verb_form.ppl(case)
elif tense in ['present perfect', 'prp']:
return verb_form.present_perfect(case)
elif tense in ['past simple', 'pas']:
return verb_form.past_simple(case)
elif tense in ['future simple', 'fs']:
return verb_form.future_simple(case)
elif tense in ['present continuous', 'pc']:
return verb_form.present_continuous(case)
elif tense in ['past perfect', 'pap']:
return verb_form.past_perfect(case)
elif tense in ['future continuous', 'fc']:
return verb_form.future_continuous(case)
elif tense in ['past continuous', 'pac']:
return verb_form.past_continuous(case)
else:
return verb_form.inf(case)
print '''Welcome to the test interface for verb form generator!
The interface will ask you to provide verb, and fill tense and person, to which it would bend.
It's very simple and has no spell check, so be careful not to make mistakes.
'verb' is an open category, so you can print any verb you like.
'tense' can be present simple, present perfect, present continuous, past continuous, future continuous, past simple, past perfect, and also gerund and participle.
'person' can be can be 'I','you','she' (we try to be correct), 'we' or 'they'.
You need to fill in at least verb and tense for the first time.
If later some input is empty, the interface will use your previous choices. \nThe idea is to give you possibility to test different forms for the same verb or the same person without extra typing
You can use acronyms for a faster input of tenses:
g - gerund
ppl - participle
ps - present simple
prp - present perfect
pap - past perfect
pas - past simple
fs - future simple
pc - present continuous
fc - future continuous
pac - past continuous
'''
#main
verb = verb_input()
tense = tense_input()
person = person_input()
case = verb_form(verb, tense, person)
print tense_choice(tense, case)
while True:
v = verb_input()
if v != '':
verb = v
t = tense_input()
if t != '':
tense = t
p = person_input()
if p != '':
person = p
if tense == '':
tense = 'inf'
else:
pass
case = verb_form(verb, tense, person)
print tense_choice(tense, case)
print "\nLet's test some more"
| {
"repo_name": "DmitryKey/MTengine",
"path": "grammarer/verb_start.py",
"copies": "1",
"size": "2846",
"license": "mit",
"hash": -7369583508813122000,
"line_mean": 26.9019607843,
"line_max": 195,
"alpha_frac": 0.658116655,
"autogenerated": false,
"ratio": 3.5092478421701605,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46673644971701606,
"avg_score": null,
"num_lines": null
} |
__author__ = 'ariyatan'
import csv
#list of adjectives that have synthetic forms of degree of comparison
synth_form_list = []
with open('adj_comp_ex.csv', 'rb') as dc:
for line in dc:
synth_form_list.append(line[:-1])
#I use [:-1] to get rid of '\n' symbol at the end of each row
#list of adjectives that double last consonant in synthetic form
double_cons_list = []
with open('adj_double_consonant.csv', 'rb') as lst:
for line in lst:
double_cons_list.append(line[:-1])
#I use [:-1] to get rid of '\n' symbol at the end of each row
#stem ending in 'y' don't change if it's part of a diphtong and follows a vowel
no_change_list = ['shy', 'gray', 'sly', 'spry', 'wry']
#Synthetic forms are by adjectives with 1-syllable stem, or with 2-syllable stem, if it ends in 'er' (like 'clever'), 'y' (like 'dirty'), 'le' (like 'simple') or 'ow' (like 'narrow')
#Adjectives of 2 and more syllables usually have analytic forms of comparison
class adj_form(object):
def __init__(self, stem, degree = '0'):
self.stem = stem
self.degree = degree
def add_er(self):
if self.stem[-1] == 'y':
if self.stem in no_change_list:
return self.stem + 'er'
else:
return self.stem[:-1] + 'ier'
if self.stem[-1] == 'e':
return self.stem + 'r'
if self.stem in double_cons_list:
return self.stem + self.stem[-1] + 'er'
else:
return self.stem + 'er'
def add_est(self):
if self.stem[-1] == 'y':
if self.stem in no_change_list:
return self.stem + 'er'
else:
return self.stem[:-1] + 'iest'
if self.stem[-1] == 'e':
return self.stem + 'st'
if self.stem in double_cons_list:
return self.stem + self.stem[-1] + 'est'
else:
return self.stem + 'est'
def comparative(self):
if self.stem == 'good':
return 'better'
if self.stem == 'bad':
return 'worse'
if self.stem == 'little':
return 'lesser'
if self.stem in synth_form_list:
return self.add_er()
else:
return 'more' + ' ' + self.stem
def superlative(self):
if self.stem == 'good':
return 'best'
if self.stem == 'bad':
return 'worst'
if self.stem == 'little':
return 'least'
if self.stem in synth_form_list:
return self.add_est()
else:
return 'the most' + ' ' + self.stem
| {
"repo_name": "DmitryKey/MTengine",
"path": "grammarer/adj_comparison.py",
"copies": "1",
"size": "2616",
"license": "mit",
"hash": -2629879584691891000,
"line_mean": 32.5384615385,
"line_max": 182,
"alpha_frac": 0.5382262997,
"autogenerated": false,
"ratio": 3.397402597402597,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4435628897102597,
"avg_score": null,
"num_lines": null
} |
__author__ = 'ariyatan'
#the class takes a verb ('verb'), required tense ('tense') and person('person').
#In this case 'tense' means not only common tenses, but also verb forms such as participle or adverb or gerund.
#'verb' is infinitive taken from vocabulary during previous stages
#'tense' is received from parser, and set to 'infinitive', if nothing is received.
#'person' is presumably received from parser, and set to 'you', if nothing is received.
#'person' can be 'I','you','she' (we try to be correct), 'we' or 'they'.
import csv
double_consonant_list = []
with open('verb_double_consonant.csv', 'rb') as dc:
for line in dc:
double_consonant_list.append(line[:-1])
#I use [:-1] to get rid of '\n' symbol at the end of each row
class verb_form(object):
def __init__(self, verb, tense = 'inf', person = 'you'):
self.verb = verb
self.tense = tense
self.person = person
def inf(self):
return self.verb
#nhave(self) is conjugation of the verb 'to have' to use it later as auxiliary verb
def nhave(self):
if self.person == 'she':
return 'has'
else:
return 'have'
#nbe(self) is conjugation of the verb 'to be' to use it later as auxiliary verb(present)
def nbe(self):
if self.person == 'I':
return 'am'
if self.person == 'she':
return 'is'
else:
return 'are'
#fbe(self) is conjugation of the verb 'to be' to use it later as auxiliary verb(future)
def fbe(self):
if self.person in ['I', 'we']:
return 'shall'
else:
return 'will'
#pbe(self) is conjugation of the verb 'to be' to use it later as auxiliary verb(past)
def pbe(self):
if self.person == 'she':
return 'was'
else:
return 'were'
def gerund(self):
if self.verb == 'be':
return 'being'
elif self.verb[-1] == 'e':
if self.verb in ['toe', 'singe', 'dye', 'see', 'agree', 'free', 'flee']:
return self.verb + 'ing'
if self.verb[-2] == 'ie':
return self.verb[:-2] + 'ying'
else:
return self.verb[:-1] + 'ing'
elif self.verb[-1] == 'c':
return self.verb + 'ked'
elif self.verb in double_consonant_list:
return self.verb + self.verb[-1] + 'ing'
else:
return self.verb + 'ing'
#ppl is passive participle
def ppl(self):
reader = csv.reader(open('verb_exceptions.csv'))
verb_exceptions = {}
for row in reader:
key = row[0]
verb_exceptions[key] = row[2]
if self.verb in verb_exceptions.keys():
return verb_exceptions[self.verb]
elif self.verb == 'be':
return 'been'
elif self.verb[-1] == 'e':
return self.verb + 'd'
elif self.verb [-1] == 'y':
if self.verb [-2] in ['ay', 'ey', 'oy', 'uy']:
return self.verb + 'ed'
else:
return self.verb[:-1] + 'ied'
elif self.verb[-1] == 'c':
return self.verb + 'ked'
elif self.verb in double_consonant_list:
return self.verb + self.verb[-1] + 'ed'
else:
return self.verb + 'ed'
def present_simple(self):
if 'do' in self.verb:
if self.person in ['I', 'you', 'we', 'they']:
return self.verb
if self.person == 'she':
return 'does'
elif 'be' in self.verb:
if self.person == 'I':
return 'am'
if self.person == 'she':
return 'is'
else:
return 'are'
elif self.verb == 'have':
return self.nhave()
if 'she' in self.person:
return self.verb + "s"
else:
return self.verb
def present_continuous(self):
return [self.nbe(), self.gerund()]
def present_perfect(self):
reader = csv.reader(open('verb_exceptions.csv'))
verb_exceptions = {}
for row in reader:
key = row[0]
verb_exceptions[key] = row[2]
if self.verb in verb_exceptions.keys():
return [self.nhave(), verb_exceptions[self.verb]]
else:
return [self.nhave(), self.ppl()]
def past_simple(self):
reader = csv.reader(open('verb_exceptions.csv'))
verb_exceptions = {}
for row in reader:
key = row[0]
verb_exceptions[key] = row[1]
if self.verb in verb_exceptions.keys():
return verb_exceptions[self.verb]
elif self.verb == 'be':
if self.person == 'she':
return 'was'
else:
return 'were'
else:
return self.ppl()
def future_simple(self):
return [self.fbe(), self.verb]
def past_perfect(self):
return ['had', self.ppl()]
def past_continuous(self):
return [self.pbe(), self.gerund()]
def future_continuous(self):
return [self.fbe(), 'be' + ' ' + self.gerund()]
| {
"repo_name": "DmitryKey/MTengine",
"path": "grammarer/verb_tenses.py",
"copies": "1",
"size": "5218",
"license": "mit",
"hash": 1437317416717728300,
"line_mean": 31.0122699387,
"line_max": 112,
"alpha_frac": 0.5201226524,
"autogenerated": false,
"ratio": 3.661754385964912,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9666627562772097,
"avg_score": 0.003049895118563138,
"num_lines": 163
} |
__author__ = 'Arkan'
import argparse
import sys
import textwrap
import time
import urllib.request
import libftb.ftb
download_last_mark = 0
def main():
__parse_argv(sys.argv)
def __parse_argv(argv):
root = argparse.ArgumentParser()
subcmds = root.add_subparsers(help='sub-command help')
ls = subcmds.add_parser('list', help='List modpacks.')
ls.set_defaults(func=__list)
info = subcmds.add_parser('details', help='Get modpack details')
info.add_argument(action='store', dest='pack')
info.set_defaults(func=__details)
fetch = subcmds.add_parser('fetch', help='Fetch a modpack zip.')
fetch.add_argument('-s', '--server', action='store_true', dest='server')
fetch.add_argument(action='store', dest='pack')
fetch.set_defaults(func=__fetch)
args = root.parse_args(argv[1:])
try:
args.func(args)
except AttributeError:
root.exit(-1, "Invalid input; run with --help for help.\n")
def __list(args):
packs = libftb.ftb.get_packs()
print('Available packs:')
for _, p in packs.items():
print("\t{} ({}) {}".format(p['name'], p['id'], p['version']))
def __details(args):
packs = libftb.ftb.get_packs()
try:
p = packs[args.pack]
print("Details for {} ({}):".format(p['name'], p['id']))
print("\t Version: {}".format(p['version']))
print("\t Minecraft: {}".format(p['mc_version']))
print("\t Author: {}".format(p['author']))
print()
desc = textwrap.wrap(p['description'], 100)
first = True
for l in desc:
if first:
first = False
print("\tDescription: {}".format(l))
else:
print("\t {}".format(l))
print()
count = 1
mods = ""
for m in p['mods']:
if (count % 3) == 0:
mods += "{},\n".format(m)
else:
mods += "{}, ".format(m)
count += 1
mods = mods[:len(mods) - 2]
first = True
for l in mods.splitlines():
if first:
first = False
print("\t Mods: {}".format(l))
else:
print("\t {}".format(l))
except KeyError:
print("No pack found for name {}".format(args.pack))
def __fetch(args):
packs = libftb.ftb.get_packs()
try:
p = packs[args.pack]
server = args.server
if server:
url = libftb.ftb.get_pack_url(p, server=server)
fname = p['server_url']
else:
url = libftb.ftb.get_pack_url(p)
fname = p['url']
try:
print("Downloading {}...".format(url), end='')
start = time.time()
urllib.request.urlretrieve(url, fname, reporthook=__download_print_dots)
end = time.time()
print(" Done in {:.2f} seconds.".format(end - start))
print("Saved archive as {}".format(fname))
except Exception as ex:
print(" Failed to download pack: {}".format(ex))
except KeyError:
print(" No pack found for name {}".format(args.pack))
def __download_print_dots(block, block_size, total_bytes):
import ftb
per = float(block*block_size) / total_bytes
per = round(per*100, 2)
if (per % 5) == 0 and per > ftb.download_last_mark:
ftb.download_last_mark = per
print('.', end='', flush=True)
if __name__ == "__main__":
main() | {
"repo_name": "Emberwalker/LibFTB",
"path": "ftb.py",
"copies": "1",
"size": "3527",
"license": "mit",
"hash": 1687198733762776800,
"line_mean": 26.3488372093,
"line_max": 84,
"alpha_frac": 0.5245250921,
"autogenerated": false,
"ratio": 3.6286008230452675,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9646032087837915,
"avg_score": 0.0014187654614705111,
"num_lines": 129
} |
__author__ = 'Arkan'
import urllib.request
import os.path
import time
import xml.etree.ElementTree as ET
import libftb.internal.parser as parser
CDN_ROOT = "http://ftb.cursecdn.com/FTB2"
def get_packs():
root = __get_or_create_cache()
return parser.packs_xml_to_dict(root)
def get_pack_url(pack_dict, version=None, server=False):
"""
:type version str
:type server bool
"""
pack_id = pack_dict['id']
if not version:
version = pack_dict['version']
version = version.replace('.', '_')
print("Getting URL for ID {} ({}) (server: {})".format(pack_id, version, server))
if server:
file = pack_dict['server_url']
else:
file = pack_dict['url']
return __get_pack_url(pack_id, version, file)
def __get_or_create_cache():
if not (os.path.isfile('modpacks.xml') and (time.time() - os.path.getmtime('modpacks.xml')) < 3600):
print("Updating local copy of modpacks.xml...")
urllib.request.urlretrieve(__get_static_url('modpacks.xml'), 'modpacks.xml')
return ET.parse('modpacks.xml').getroot()
def __get_static_url(file):
return "{}/static/{}".format(CDN_ROOT, file)
def __get_pack_url(pack, version, file):
return "{}/modpacks/{}/{}/{}".format(CDN_ROOT, pack, version, file) | {
"repo_name": "Emberwalker/LibFTB",
"path": "libftb/ftb.py",
"copies": "1",
"size": "1284",
"license": "mit",
"hash": 1498710901729356800,
"line_mean": 25.2244897959,
"line_max": 104,
"alpha_frac": 0.6292834891,
"autogenerated": false,
"ratio": 3.139364303178484,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9263851779071834,
"avg_score": 0.0009592026413297995,
"num_lines": 49
} |
__author__ = 'arkilic'
import time
import socket
import sys
from thread import start_new_thread
import broker.config as cfg
from Queue import Queue
def remote_client_thread(conn2, queue):
"""
Function to create client threads!
Whenever a client is connected to the server, a dedicated thread is initiated
"""
#Receiving from client that this thread is assigned to
if not queue.empty():
data = queue.get(block=True)
conn2.sendall(data)
print 'Data Sent from broker server', len(data)
conn2.close()
HOST = cfg.HOST
PORT = cfg.PORT
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
send_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print 'Sockets created'
#Bind socket to local host and port
try:
s.bind((HOST, PORT))
except socket.error as msg:
print 'Binding the socket failed'
sys.exit()
send_socket.bind((cfg.SEND_HOST, cfg.SEND_PORT))
# try:
# send_socket.bind((cfg.SEND_HOST, cfg.SEND_PORT))
# except socket.error as msg:
# print 'Binding the socket failed'
# sys.exit()
#
print 'Socket bind complete'
#Start listening on socket
s.listen(10)
send_socket.listen(10)
print 'Sockets now listening'
queue = Queue()
while True:
print('top of the server loop')
#accept connection and start_threads for each incoming request from collection clients
# passing a send_server connection instance that will ship the requested data to
#analysis clients listening
conn, address = s.accept()
conn2, address2 = send_socket.accept()
accum_data = []
data = conn.recv(4096)
while len(data):
accum_data.append(data)
data = conn.recv(4096)
data = ''.join(accum_data)
print 'Connected with ' + address[0] + ':' + str(address[1])
if data:
queue.put(data)
#place the data in the queue that is shared among all client threads
start_new_thread(remote_client_thread, (conn2, queue, )) #start new client thread as data is received
send_socket.close()
s.close()
| {
"repo_name": "NSLS-II/brokerStreamServer",
"path": "broker/server/broker_server.py",
"copies": "1",
"size": "2035",
"license": "bsd-3-clause",
"hash": 6594190205256063000,
"line_mean": 26.5,
"line_max": 109,
"alpha_frac": 0.685012285,
"autogenerated": false,
"ratio": 3.5701754385964914,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9741727483229699,
"avg_score": 0.0026920480733584103,
"num_lines": 74
} |
__author__ = 'arkilic'
import tornado.web, tornado.ioloop
import motor
class NewMessageHandler(tornado.web.RequestHandler):
def get(self):
"""Show a 'compose message' form."""
self.write('''
<form method="post">
<input type="text" name="msg">
<input type="submit">
</form>''')
# Method exits before the HTTP request completes, thus "asynchronous"
@tornado.web.asynchronous
def post(self):
"""Insert a message."""
msg = self.get_argument('msg')
# Async insert; callback is executed when insert completes
self.settings['db'].messages.insert(
{'msg': msg},
callback=self._on_response)
def _on_response(self, result, error):
if error:
raise tornado.web.HTTPError(500, error)
else:
self.redirect('/')
class MessagesHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
def get(self):
"""Display all messages."""
self.write('<a href="/compose">Compose a message</a><br>')
self.write('<ul>')
db = self.settings['db']
db.messages.find().sort([('_id', -1)]).each(self._got_message)
def _got_message(self, message, error):
if error:
raise tornado.web.HTTPError(500, error)
elif message:
self.write('<li>%s</li>' % message['msg'])
else:
# Iteration complete
self.write('</ul>')
self.finish()
db = motor.MotorClient().test
application = tornado.web.Application(
[
(r'/compose', NewMessageHandler),
(r'/', MessagesHandler)
],
db=db
)
print('Listening on http://localhost:8888')
application.listen(8888)
tornado.ioloop.IOLoop.instance().start()
| {
"repo_name": "mrkraimer/metadataservice",
"path": "tut_test.py",
"copies": "1",
"size": "1786",
"license": "bsd-3-clause",
"hash": 5936569321055356000,
"line_mean": 27.3492063492,
"line_max": 73,
"alpha_frac": 0.5789473684,
"autogenerated": false,
"ratio": 4.0225225225225225,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5101469890922522,
"avg_score": null,
"num_lines": null
} |
__author__ = 'arkilic'
import csv
import numpy as np
from sklearn.linear_model import SGDRegressor
from sklearn.linear_model import SGDClassifier
import random
import pprint
import sys, logging, struct
logging.basicConfig(level=logging.DEBUG)
import time
import pydoop.pipes as pp
from pydoop.utils import jc_configure, jc_configure_int
import pydoop.hdfs as hdfs
print "USING HDFS... working version"
time.sleep(1)
print("This includes links to Hadoop mapreduce routines via pydoop Mapper and Reader calls.\
However, as also explained on the paper, these calls are exteremely tedious to parse through\
since they read the data file line by line. SGD via scikit uses python C-types where calculations\
are vectorized and vectorized calls are a lot more performance savvy than hadoop's built-in mapreduce\
routines. Therefore, for python, due to the expense of calls via boost wrapped hadoop pipes, this implementation\
is not satisfactory and/or appropriate. The example in the demo displays submission of a simple task via pydoop\
for proof of concept.As this code will reveal, the results obtained are very similar to the results of mahout+hadoop version!\
As an alternative to this approach, one could easily use pyMPI or IPython notebook parallel utilizing HDFS as a neat distributed\
file system approach sending data to computation nodes cheaply(compared to existing methods which use wget etc.). Overall, python\
proved to be a good prototyping tool for such application that could make good use of the distributed file system. However, as a development\
tool, due to incompatibility issues between Java and Python, it is not a desireable tool for development via Hadoop. This might change in the\ future if pydoop reaches the maturity and simplicity required to perform such vectorized calculations efficiently")
time.sleep(3)
class Reader(pp.RecordReader):
def __init__(self, context):
super(Reader, self).__init__()
self.logger = logging.getLogger("Reader") #formatted logger obtained
self.file = hdfs.open('HD-2004-2014-d.csv')
self.logger.debug("readline chunk size = %r" % self.file.chunk_size)
def close(self):
self.logger.debug("closing open handles")
self.file.close()
self.file.fs.close()
def next(self):
if self.bytes_read > self.isplit.length: # end of input split
return (False, "", "")
key = struct.pack(">q", self.isplit.offset+self.bytes_read)
record = self.file.readline()
if record == "": # end of file
return (False, "", "")
self.bytes_read += len(record)
return (True, key, record)
class Mapper(pp.Mapper):
def __init__(self, context):
super(Mapper, self).__init__(context)
self.logger = logging.getLogger("Mapper")
context.setStatus("initializing")
def map(self, context):
k = context.getInputKey()
tmp_data = csv.reader(f)
words = context.getInputValue().split()
for w in words:
context.emit(w, "1")
context.incrementCounter(self.inputWords, len(words))
def close(self):
self.logger.info("all done")
print "Prediction on HD 30 year data:"
f = hdfs.open('/HD-1984-2014-d.csv')
tmp_data = csv.reader(f)
my_data = list()
for item in tmp_data:
tmp_item = list()
for i in item:
tmp_item.append(i)
my_data.append(tmp_item)
data = my_data[1:]
X = list()
training_indices = list()
for i in xrange(int(len(data)*0.9)):
training_indices.append(i)
test_indices = list()
for i in xrange(int(len(data))):
if i in training_indices:
pass
else:
if i == 0:
pass
else:
test_indices.append(i)
for s_data in data:
X.append(map(float, s_data[1:5]))
y = list()
y2 = list()
for s_data in data:
y.append(float(s_data[4]))
y2.append(float(s_data[1]))
pprint.pprint('Training the supervised learning model... Fit on training data')
print('=========================================')
try:
clf = SGDRegressor(loss="huber")
pprint.pprint(clf.fit(X, y))
except:
raise
try:
clf2 = SGDRegressor(loss="huber")
pprint.pprint(clf2.fit(X, y2))
except:
raise
print('=========================================')
print 'Model testing itself! Confidence score on the training data used to construct:', clf.score(X, y)
pprint.pprint('Ready to predict')
print('=========================================')
pprint.pprint('Testing with test data...')
test_data = list()
test_diff = list()
predict_diff = list()
for index in test_indices:
tmp = data[index][1:5]
my_tmp = list()
for item in tmp:
my_tmp.append(float(item))
test_data.append(my_tmp)
test_diff.append(float(data[index][4]) - float(data[index][1]))
# #
prediction_results_close = clf.predict(test_data)
prediction_results_open = clf2.predict(test_data)
for i in xrange(len(prediction_results_close)):
p_diff = prediction_results_close[i] - prediction_results_open[i]
predict_diff.append(p_diff)
print test_diff
print predict_diff
test_inc =0
for diff in test_diff:
if diff > 0:
test_inc += 1
p_inc =0
total_diff = 0
s = 0
ind1 = 0
correct_guess_index_1 = list()
tm_stmp1 = list()
for diff in predict_diff:
total_diff += diff
s += 1
if diff > -0.147:#normalization of the diff value for a data set(avoid floating errors)
p_inc += 1
correct_guess_index_1.append(s)
for indx in correct_guess_index_1:
tm_stmp1.append(data[indx][0])
myfile1 = open('1984_2014_correct_ts.csv', 'wb')
wr = csv.writer(myfile1, quoting=csv.QUOTE_ALL)
wr.writerow(tm_stmp1)
pprint.pprint(total_diff/float(s))
print "=========================================================================================\n"
print "The accuracy of the stock price prediction with 30 years of data ..: ", (p_inc/float(test_inc))*100
print "=========================================================================================\n"
print "Prediction on HD 10 year data:"
f = open('HD-2004-2014-d.csv')
tmp_data = csv.reader(f)
my_data = list()
for item in tmp_data:
tmp_item = list()
for i in item:
tmp_item.append(i)
my_data.append(tmp_item)
data = my_data[1:]
X = list()
training_indices = list()
for i in xrange(int(len(data)*0.1)):
training_indices.append(i)
test_indices = list()
for i in xrange(int(len(data))):
if i in training_indices:
pass
else:
if i == 0:
pass
else:
test_indices.append(i)
for s_data in data:
X.append(map(float, s_data[1:5]))
y = list()
y2 = list()
for s_data in data:
y.append(float(s_data[4]))
y2.append(float(s_data[1]))
pprint.pprint('Training the supervised learning model... Fit on training data')
print('=========================================')
try:
clf = SGDRegressor(loss="huber")
pprint.pprint(clf.fit(X, y))
except:
raise
try:
clf2 = SGDRegressor(loss="huber")
pprint.pprint(clf2.fit(X, y2))
except:
raise
print('=========================================')
print 'Model testing itself! Confidence score on the training data used to construct:', clf.score(X, y)
pprint.pprint('Ready to predict')
print('=========================================')
pprint.pprint('Testing with test data...')
test_data = list()
test_diff = list()
predict_diff = list()
for index in test_indices:
tmp = data[index][1:5]
my_tmp = list()
for item in tmp:
my_tmp.append(float(item))
test_data.append(my_tmp)
test_diff.append(float(data[index][4]) - float(data[index][1]))
# #
prediction_results_close = clf.predict(test_data)
prediction_results_open = clf2.predict(test_data)
for i in xrange(len(prediction_results_close)):
p_diff = prediction_results_close[i] - prediction_results_open[i]
predict_diff.append(p_diff)
print test_diff
print predict_diff
p = 0
for entry in test_diff:
if entry > 0:
p += 1
k=0
ind = 0
correct_guess_index = list()
tm_stmp = list()
for entry in predict_diff:
ind += 1
if entry>-13: #normalization of the diff value for a data set(avoid floating errors)
k += 1 #calculated via variance
correct_guess_index.append(ind)
for indx in correct_guess_index:
tm_stmp.append(data[indx][0])
myfile = open('2004_2014_correct_ts.csv', 'wb')
wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
wr.writerow(tm_stmp)
print "The accuracy of the stock price prediction with 10 years of data ..: ", (p/float(k))*100
print "=========================================================================================\n"
print " SUMMARY..: \n"
print "=========================================================================================\n"
print "The accuracy of the stock price prediction with 30 years of data ..: %", (p_inc/float(test_inc))*100
print "=========================================================================================\n"
print "The accuracy of the stock price prediction with 10 years of data ..: %", (p/float(k))*100
print "=========================================================================================\n"
print "This is consistent with results obtained using mahout!"
#if __name__ == "__main__":
# pp.runTask(pp.Factory(
# Mapper, Reducer,
# record_reader_class=Reader,
# record_writer_class=Writer,
# partitioner_class=Partitioner,
# combiner_class=Reducer
# ))
| {
"repo_name": "Sapphirine/Stock-price-Movement-Prediction",
"path": "pydoop/predict_new_mapred.py",
"copies": "1",
"size": "9356",
"license": "apache-2.0",
"hash": -1009239998960549100,
"line_mean": 29.1806451613,
"line_max": 258,
"alpha_frac": 0.6223813596,
"autogenerated": false,
"ratio": 3.4409709452004416,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9499608465979039,
"avg_score": 0.012748767764280466,
"num_lines": 310
} |
__author__ = 'Armando'
import webapp2
import json
import datetime
from entities.Usuario import Usuario
from time import mktime
class ObtenerUsuario(webapp2.RequestHandler):
def get(self):
in_nombre = self.request.get('nombre')
print 'in_nombre=%r' % in_nombre
usuarios = Usuario.consultar_usuario(Usuario.usuario_key(in_nombre))
for u in usuarios:
if u.nombre:
self.response.headers['Content-Type'] = 'application/json'
obj = {
'nombre': u.nombre,
'correo_electronico': u.correo_electronico,
'fecha': u.date
}
self.response.out.write(json.dumps(obj, cls=MyEncoder))
class RegistrarUsuario(webapp2.RequestHandler):
def get(self):
in_nombre = self.request.get('nombre')
in_correo_electronico = self.request.get('correo_electronico')
print 'in_nombre=%r,in_correo_electronico=%r' % (in_nombre, in_correo_electronico)
usuario = Usuario(parent=Usuario.usuario_key(in_nombre))
usuario.nombre = in_nombre
usuario.correo_electronico = in_correo_electronico
if usuario.put():
respuesta = {'exito': 1}
else:
respuesta = {'exito': 0}
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(respuesta, cls=MyEncoder))
class MyEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, datetime.datetime):
return int(mktime(o.timetuple()))
return json.JSONEncoder.default(self, o)
app = webapp2.WSGIApplication([
('/registrar', RegistrarUsuario),
('/obtener', ObtenerUsuario)
],debug=True) | {
"repo_name": "apiconz/you-wrote-here",
"path": "index.py",
"copies": "1",
"size": "1753",
"license": "apache-2.0",
"hash": 7675751481243878000,
"line_mean": 28.7288135593,
"line_max": 90,
"alpha_frac": 0.6177980605,
"autogenerated": false,
"ratio": 3.36468330134357,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.448248136184357,
"avg_score": null,
"num_lines": null
} |
from collections import Sequence
from itertools import chain
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from uplift.validation.check import check_array
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_indicator(y):
return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes,
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == set(["binary", "multiclass"]):
ys_types = set(["multiclass"])
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1]
for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if len(set(isinstance(label, str) for label in ys_labels)) > 1:
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.unique(y.data).size == 1 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def check_classification_targets(y):
"""Ensure that target y is of a non-regression type.
Only the following target types (as defined in type_of_target) are allowed:
'binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences'
Parameters
----------
y : array-like
"""
y_type = type_of_target(y)
if y_type not in ['binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences']:
raise ValueError("Unknown label type: %r" % y_type)
def type_of_target(y):
"""Determine the type of data indicated by target `y`
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, str))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
if is_multilabel(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# Known to fail in numpy 1.3 for array of arrays
return 'unknown'
# The old sequence of sequences format
try:
if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], str)):
raise ValueError('You appear to be using a legacy multi-label data'
' representation. Sequence of sequences are no'
' longer supported; use a binary array or sparse'
' matrix instead.')
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], str)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown' # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
return 'continuous' + suffix
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return 'binary' # [1, 2] or [["a"], ["b"]]
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not np.array_equal(clf.classes_, unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data
Parameters
----------
y : array like or sparse matrix of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape = (n_samples,), optional
Sample weights.
Returns
-------
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
n_classes : list of integers of size n_outputs
Number of classes in each column
class_prior : list of size n_outputs of arrays of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = np.asarray(sample_weight)[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = np.bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its weight with the weight
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implicit zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = np.bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
| {
"repo_name": "psarka/uplift",
"path": "uplift/validation/multiclass.py",
"copies": "1",
"size": "12707",
"license": "bsd-3-clause",
"hash": 3350516679443262000,
"line_mean": 32.6164021164,
"line_max": 79,
"alpha_frac": 0.5705516644,
"autogenerated": false,
"ratio": 3.6821211243117937,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47526727887117937,
"avg_score": null,
"num_lines": null
} |
"""
Multi-class / multi-label utility function
==========================================
"""
from collections.abc import Sequence
from itertools import chain
import warnings
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from .validation import check_array, _assert_all_finite
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_indicator(y):
return np.arange(
check_array(y, accept_sparse=['csr', 'csc', 'coo']).shape[1]
)
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels.
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes
Returns
-------
out : ndarray of shape (n_unique_labels,)
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == {"binary", "multiclass"}:
ys_types = {"multiclass"}
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y,
accept_sparse=['csr', 'csc', 'coo']).shape[1]
for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, str) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : ndarray of shape (n_samples,)
Target values.
Returns
-------
out : bool
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__') or isinstance(y, Sequence):
# DeprecationWarning will be replaced by ValueError, see NEP 34
# https://numpy.org/neps/nep-0034-infer-dtype-is-object.html
with warnings.catch_warnings():
warnings.simplefilter('error', np.VisibleDeprecationWarning)
try:
y = np.asarray(y)
except np.VisibleDeprecationWarning:
# dtype=object should be provided explicitly for ragged arrays,
# see NEP 34
y = np.array(y, dtype=object)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.unique(y.data).size == 1 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def check_classification_targets(y):
"""Ensure that target y is of a non-regression type.
Only the following target types (as defined in type_of_target) are allowed:
'binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences'
Parameters
----------
y : array-like
"""
y_type = type_of_target(y)
if y_type not in ['binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences']:
raise ValueError("Unknown label type: %r" % y_type)
def type_of_target(y):
"""Determine the type of data indicated by the target.
Note that this type is the most specific type that can be inferred.
For example:
* ``binary`` is more specific but compatible with ``multiclass``.
* ``multiclass`` of integers is more specific but compatible with
``continuous``.
* ``multilabel-indicator`` is more specific but compatible with
``multiclass-multioutput``.
Parameters
----------
y : array-like
Returns
-------
target_type : str
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multilabel-indicator'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, str))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
sparse_pandas = (y.__class__.__name__ in ['SparseSeries', 'SparseArray'])
if sparse_pandas:
raise ValueError("y cannot be class 'SparseSeries' or 'SparseArray'")
if is_multilabel(y):
return 'multilabel-indicator'
# DeprecationWarning will be replaced by ValueError, see NEP 34
# https://numpy.org/neps/nep-0034-infer-dtype-is-object.html
with warnings.catch_warnings():
warnings.simplefilter('error', np.VisibleDeprecationWarning)
try:
y = np.asarray(y)
except np.VisibleDeprecationWarning:
# dtype=object should be provided explicitly for ragged arrays,
# see NEP 34
y = np.asarray(y, dtype=object)
# The old sequence of sequences format
try:
if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], str)):
raise ValueError('You appear to be using a legacy multi-label data'
' representation. Sequence of sequences are no'
' longer supported; use a binary array or sparse'
' matrix instead - the MultiLabelBinarizer'
' transformer can convert to this format.')
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], str)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown' # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
_assert_all_finite(y)
return 'continuous' + suffix
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return 'binary' # [1, 2] or [["a"], ["b"]]
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic.
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not np.array_equal(clf.classes_, unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data.
Parameters
----------
y : {array-like, sparse matrix} of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
classes : list of size n_outputs of ndarray of size (n_classes,)
List of classes for each column.
n_classes : list of int of size n_outputs
Number of classes in each column.
class_prior : list of size n_outputs of ndarray of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = sample_weight[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = np.bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its weight with the weight
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implicit zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = np.bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
def _ovr_decision_function(predictions, confidences, n_classes):
"""Compute a continuous, tie-breaking OvR decision function from OvO.
It is important to include a continuous value, not only votes,
to make computing AUC or calibration meaningful.
Parameters
----------
predictions : array-like of shape (n_samples, n_classifiers)
Predicted classes for each binary classifier.
confidences : array-like of shape (n_samples, n_classifiers)
Decision functions or predicted probabilities for positive class
for each binary classifier.
n_classes : int
Number of classes. n_classifiers must be
``n_classes * (n_classes - 1 ) / 2``.
"""
n_samples = predictions.shape[0]
votes = np.zeros((n_samples, n_classes))
sum_of_confidences = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
sum_of_confidences[:, i] -= confidences[:, k]
sum_of_confidences[:, j] += confidences[:, k]
votes[predictions[:, k] == 0, i] += 1
votes[predictions[:, k] == 1, j] += 1
k += 1
# Monotonically transform the sum_of_confidences to (-1/3, 1/3)
# and add it with votes. The monotonic transformation is
# f: x -> x / (3 * (|x| + 1)), it uses 1/3 instead of 1/2
# to ensure that we won't reach the limits and change vote order.
# The motivation is to use confidence levels as a way to break ties in
# the votes without switching any decision made based on a difference
# of 1 vote.
transformed_confidences = (sum_of_confidences /
(3 * (np.abs(sum_of_confidences) + 1)))
return votes + transformed_confidences
| {
"repo_name": "anntzer/scikit-learn",
"path": "sklearn/utils/multiclass.py",
"copies": "11",
"size": "16256",
"license": "bsd-3-clause",
"hash": 2315962123390929400,
"line_mean": 34.0344827586,
"line_max": 79,
"alpha_frac": 0.5796628937,
"autogenerated": false,
"ratio": 3.7586127167630057,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9838275610463006,
"avg_score": null,
"num_lines": null
} |
"""
Multi-class / multi-label utility function
==========================================
"""
from collections.abc import Sequence
from itertools import chain
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from .validation import check_array, _assert_all_finite
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_indicator(y):
return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == {"binary", "multiclass"}:
ys_types = {"multiclass"}
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1]
for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, str) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.unique(y.data).size == 1 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def check_classification_targets(y):
"""Ensure that target y is of a non-regression type.
Only the following target types (as defined in type_of_target) are allowed:
'binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences'
Parameters
----------
y : array-like
"""
y_type = type_of_target(y)
if y_type not in ['binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences']:
raise ValueError("Unknown label type: %r" % y_type)
def type_of_target(y):
"""Determine the type of data indicated by the target.
Note that this type is the most specific type that can be inferred.
For example:
* ``binary`` is more specific but compatible with ``multiclass``.
* ``multiclass`` of integers is more specific but compatible with
``continuous``.
* ``multilabel-indicator`` is more specific but compatible with
``multiclass-multioutput``.
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, str))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
sparse_pandas = (y.__class__.__name__ in ['SparseSeries', 'SparseArray'])
if sparse_pandas:
raise ValueError("y cannot be class 'SparseSeries' or 'SparseArray'")
if is_multilabel(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# Known to fail in numpy 1.3 for array of arrays
return 'unknown'
# The old sequence of sequences format
try:
if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], str)):
raise ValueError('You appear to be using a legacy multi-label data'
' representation. Sequence of sequences are no'
' longer supported; use a binary array or sparse'
' matrix instead - the MultiLabelBinarizer'
' transformer can convert to this format.')
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], str)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown' # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
_assert_all_finite(y)
return 'continuous' + suffix
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return 'binary' # [1, 2] or [["a"], ["b"]]
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not np.array_equal(clf.classes_, unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data
Parameters
----------
y : array like or sparse matrix of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape = (n_samples,), optional
Sample weights.
Returns
-------
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
n_classes : list of integers of size n_outputs
Number of classes in each column
class_prior : list of size n_outputs of arrays of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = np.asarray(sample_weight)[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = np.bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its weight with the weight
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implicit zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = np.bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
def _ovr_decision_function(predictions, confidences, n_classes):
"""Compute a continuous, tie-breaking OvR decision function from OvO.
It is important to include a continuous value, not only votes,
to make computing AUC or calibration meaningful.
Parameters
----------
predictions : array-like, shape (n_samples, n_classifiers)
Predicted classes for each binary classifier.
confidences : array-like, shape (n_samples, n_classifiers)
Decision functions or predicted probabilities for positive class
for each binary classifier.
n_classes : int
Number of classes. n_classifiers must be
``n_classes * (n_classes - 1 ) / 2``
"""
n_samples = predictions.shape[0]
votes = np.zeros((n_samples, n_classes))
sum_of_confidences = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
sum_of_confidences[:, i] -= confidences[:, k]
sum_of_confidences[:, j] += confidences[:, k]
votes[predictions[:, k] == 0, i] += 1
votes[predictions[:, k] == 1, j] += 1
k += 1
# Monotonically transform the sum_of_confidences to (-1/3, 1/3)
# and add it with votes. The monotonic transformation is
# f: x -> x / (3 * (|x| + 1)), it uses 1/3 instead of 1/2
# to ensure that we won't reach the limits and change vote order.
# The motivation is to use confidence levels as a way to break ties in
# the votes without switching any decision made based on a difference
# of 1 vote.
transformed_confidences = (sum_of_confidences /
(3 * (np.abs(sum_of_confidences) + 1)))
return votes + transformed_confidences
| {
"repo_name": "chrsrds/scikit-learn",
"path": "sklearn/utils/multiclass.py",
"copies": "1",
"size": "15256",
"license": "bsd-3-clause",
"hash": -237182039651572700,
"line_mean": 33.4379232506,
"line_max": 79,
"alpha_frac": 0.5774777137,
"autogenerated": false,
"ratio": 3.7155382367267413,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4793015950426741,
"avg_score": null,
"num_lines": null
} |
"""
Multi-class / multi-label utility function
==========================================
"""
from collections import Sequence
from itertools import chain
import warnings
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from ..externals.six import string_types
from .validation import check_array
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_sequence_of_sequence(y):
if hasattr(y, '__array__'):
y = np.asarray(y)
return set(chain.from_iterable(y))
def _unique_indicator(y):
return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-sequences': _unique_sequence_of_sequence,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
ys : array-likes,
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == set(["binary", "multiclass"]):
ys_types = set(["multiclass"])
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1] for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %r" % ys)
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_label_indicator_matrix(y):
""" Check if ``y`` is in the label indicator matrix format (multilabel).
Parameters
----------
y : numpy array of shape [n_samples] or sequence of sequences
Target values. In the multilabel case the nested sequences can
have variable lengths.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a label indicator matrix format,
else ``False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_label_indicator_matrix
>>> is_label_indicator_matrix([0, 1, 0, 1])
False
>>> is_label_indicator_matrix([[1], [0, 2], []])
False
>>> is_label_indicator_matrix(np.array([[1, 0], [0, 0]]))
True
>>> is_label_indicator_matrix(np.array([[1], [0], [0]]))
False
>>> is_label_indicator_matrix(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.ptp(y.data) == 0 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def is_sequence_of_sequences(y):
""" Check if ``y`` is in the sequence of sequences format (multilabel).
This format is DEPRECATED.
Parameters
----------
y : sequence or array.
Returns
-------
out : bool,
Return ``True``, if ``y`` is a sequence of sequences else ``False``.
"""
# the explicit check for ndarray is for forward compatibility; future
# versions of Numpy might want to register ndarray as a Sequence
try:
if hasattr(y, '__array__'):
y = np.asarray(y)
out = (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], string_types))
except (IndexError, TypeError):
return False
if out:
warnings.warn('Direct support for sequence of sequences multilabel '
'representation will be unavailable from version 0.17. '
'Use sklearn.preprocessing.MultiLabelBinarizer to '
'convert to a label indicator representation.',
DeprecationWarning)
return out
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples] or sequence of sequences
Target values. In the multilabel case the nested sequences can
have variable lengths.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
return is_label_indicator_matrix(y) or is_sequence_of_sequences(y)
def type_of_target(y):
"""Determine the type of data indicated by target `y`
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-sequences': `y` is a sequence of sequences, a 1d
array-like of objects that are sequences of labels.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, string_types))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
if is_sequence_of_sequences(y):
return 'multilabel-sequences'
elif is_label_indicator_matrix(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# known to fail in numpy 1.3 for array of arrays
return 'unknown'
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], string_types)):
return 'unknown'
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown'
elif y.ndim == 2 and y.shape[1] > 1:
suffix = '-multioutput'
else:
# column vector or 1d
suffix = ''
# check float and contains non-integer float values:
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
return 'continuous' + suffix
if len(np.unique(y)) <= 2:
assert not suffix, "2d binary array-like should be multilabel"
return 'binary'
else:
return 'multiclass' + suffix
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not np.all(clf.classes_ == unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
| {
"repo_name": "RPGOne/Skynet",
"path": "scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/sklearn/utils/multiclass.py",
"copies": "3",
"size": "11203",
"license": "bsd-3-clause",
"hash": -2981303035437182500,
"line_mean": 31.100286533,
"line_max": 86,
"alpha_frac": 0.5893956976,
"autogenerated": false,
"ratio": 3.8079537729435757,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5897349470543576,
"avg_score": null,
"num_lines": null
} |
"""
Multi-class / multi-label utility function
==========================================
"""
from __future__ import division
from collections import Sequence
from itertools import chain
import warnings
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from ..externals.six import string_types
from .validation import check_array
from ..utils.fixes import bincount
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_sequence_of_sequence(y):
if hasattr(y, '__array__'):
y = np.asarray(y)
return set(chain.from_iterable(y))
def _unique_indicator(y):
return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-sequences': _unique_sequence_of_sequence,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes,
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == set(["binary", "multiclass"]):
ys_types = set(["multiclass"])
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1] for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %r" % ys)
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_label_indicator_matrix(y):
""" Check if ``y`` is in the label indicator matrix format (multilabel).
Parameters
----------
y : numpy array of shape [n_samples] or sequence of sequences
Target values. In the multilabel case the nested sequences can
have variable lengths.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a label indicator matrix format,
else ``False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_label_indicator_matrix
>>> is_label_indicator_matrix([0, 1, 0, 1])
False
>>> is_label_indicator_matrix([[1], [0, 2], []])
False
>>> is_label_indicator_matrix(np.array([[1, 0], [0, 0]]))
True
>>> is_label_indicator_matrix(np.array([[1], [0], [0]]))
False
>>> is_label_indicator_matrix(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.ptp(y.data) == 0 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def is_sequence_of_sequences(y):
""" Check if ``y`` is in the sequence of sequences format (multilabel).
This format is DEPRECATED.
Parameters
----------
y : sequence or array.
Returns
-------
out : bool,
Return ``True``, if ``y`` is a sequence of sequences else ``False``.
"""
# the explicit check for ndarray is for forward compatibility; future
# versions of Numpy might want to register ndarray as a Sequence
try:
if hasattr(y, '__array__'):
y = np.asarray(y)
out = (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], string_types))
except (IndexError, TypeError):
return False
if out:
warnings.warn('Direct support for sequence of sequences multilabel '
'representation will be unavailable from version 0.17. '
'Use sklearn.preprocessing.MultiLabelBinarizer to '
'convert to a label indicator representation.',
DeprecationWarning)
return out
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples] or sequence of sequences
Target values. In the multilabel case the nested sequences can
have variable lengths.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
return is_label_indicator_matrix(y) or is_sequence_of_sequences(y)
def type_of_target(y):
"""Determine the type of data indicated by target `y`
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-sequences': `y` is a sequence of sequences, a 1d
array-like of objects that are sequences of labels.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, string_types))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
if is_sequence_of_sequences(y):
return 'multilabel-sequences'
elif is_label_indicator_matrix(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# known to fail in numpy 1.3 for array of arrays
return 'unknown'
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], string_types)):
return 'unknown'
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown'
elif y.ndim == 2 and y.shape[1] > 1:
suffix = '-multioutput'
else:
# column vector or 1d
suffix = ''
# check float and contains non-integer float values:
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
return 'continuous' + suffix
if len(np.unique(y)) <= 2:
assert not suffix, "2d binary array-like should be multilabel"
return 'binary'
else:
return 'multiclass' + suffix
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not np.all(clf.classes_ == unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data
Parameters
----------
y : array like or sparse matrix of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape = (n_samples,), optional
Sample weights.
Returns
-------
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
n_classes : list of integrs of size n_outputs
Number of classes in each column
class_prior : list of size n_outputs of arrays of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = np.asarray(sample_weight)[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its wieght with the wieght
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implict zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
| {
"repo_name": "hsuantien/scikit-learn",
"path": "sklearn/utils/multiclass.py",
"copies": "92",
"size": "13986",
"license": "bsd-3-clause",
"hash": 5448907726861517000,
"line_mean": 31.8309859155,
"line_max": 86,
"alpha_frac": 0.5832975833,
"autogenerated": false,
"ratio": 3.7995110024449876,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""
Multi-class / multi-label utility function
==========================================
"""
from __future__ import division
from collections import Sequence
from itertools import chain
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from ..externals.six import string_types
from .validation import check_array
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_indicator(y):
return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes,
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == set(["binary", "multiclass"]):
ys_types = set(["multiclass"])
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1]
for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.unique(y.data).size == 1 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def check_classification_targets(y):
"""Ensure that target y is of a non-regression type.
Only the following target types (as defined in type_of_target) are allowed:
'binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences'
Parameters
----------
y : array-like
"""
y_type = type_of_target(y)
if y_type not in ['binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences']:
raise ValueError("Unknown label type: %r" % y_type)
def type_of_target(y):
"""Determine the type of data indicated by the target.
Note that this type is the most specific type that can be inferred.
For example:
* ``binary`` is more specific but compatible with ``multiclass``.
* ``multiclass`` of integers is more specific but compatible with
``continuous``.
* ``multilabel-indicator`` is more specific but compatible with
``multiclass-multioutput``.
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, string_types))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
sparseseries = (y.__class__.__name__ == 'SparseSeries')
if sparseseries:
raise ValueError("y cannot be class 'SparseSeries'.")
if is_multilabel(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# Known to fail in numpy 1.3 for array of arrays
return 'unknown'
# The old sequence of sequences format
try:
if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], string_types)):
raise ValueError('You appear to be using a legacy multi-label data'
' representation. Sequence of sequences are no'
' longer supported; use a binary array or sparse'
' matrix instead.')
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], string_types)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown' # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
return 'continuous' + suffix
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return 'binary' # [1, 2] or [["a"], ["b"]]
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not np.array_equal(clf.classes_, unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data
Parameters
----------
y : array like or sparse matrix of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape = (n_samples,), optional
Sample weights.
Returns
-------
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
n_classes : list of integers of size n_outputs
Number of classes in each column
class_prior : list of size n_outputs of arrays of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = np.asarray(sample_weight)[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = np.bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its weight with the weight
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implicit zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = np.bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
def _ovr_decision_function(predictions, confidences, n_classes):
"""Compute a continuous, tie-breaking ovr decision function.
It is important to include a continuous value, not only votes,
to make computing AUC or calibration meaningful.
Parameters
----------
predictions : array-like, shape (n_samples, n_classifiers)
Predicted classes for each binary classifier.
confidences : array-like, shape (n_samples, n_classifiers)
Decision functions or predicted probabilities for positive class
for each binary classifier.
n_classes : int
Number of classes. n_classifiers must be
``n_classes * (n_classes - 1 ) / 2``
"""
n_samples = predictions.shape[0]
votes = np.zeros((n_samples, n_classes))
sum_of_confidences = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
sum_of_confidences[:, i] -= confidences[:, k]
sum_of_confidences[:, j] += confidences[:, k]
votes[predictions[:, k] == 0, i] += 1
votes[predictions[:, k] == 1, j] += 1
k += 1
max_confidences = sum_of_confidences.max()
min_confidences = sum_of_confidences.min()
if max_confidences == min_confidences:
return votes
# Scale the sum_of_confidences to (-0.5, 0.5) and add it with votes.
# The motivation is to use confidence levels as a way to break ties in
# the votes without switching any decision made based on a difference
# of 1 vote.
eps = np.finfo(sum_of_confidences.dtype).eps
max_abs_confidence = max(abs(max_confidences), abs(min_confidences))
scale = (0.5 - eps) / max_abs_confidence
return votes + sum_of_confidences * scale
| {
"repo_name": "ryfeus/lambda-packs",
"path": "LightGBM_sklearn_scipy_numpy/source/sklearn/utils/multiclass.py",
"copies": "8",
"size": "15200",
"license": "mit",
"hash": -944408530042938500,
"line_mean": 32.9285714286,
"line_max": 79,
"alpha_frac": 0.5803289474,
"autogenerated": false,
"ratio": 3.6964980544747084,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8276827001874708,
"avg_score": null,
"num_lines": null
} |
"""
Multi-class / multi-label utility function
==========================================
"""
from __future__ import division
from itertools import chain
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from ..externals.six import string_types
from ..utils.fixes import _Sequence as Sequence
from .validation import check_array
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_indicator(y):
return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == set(["binary", "multiclass"]):
ys_types = set(["multiclass"])
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1]
for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.unique(y.data).size == 1 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def check_classification_targets(y):
"""Ensure that target y is of a non-regression type.
Only the following target types (as defined in type_of_target) are allowed:
'binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences'
Parameters
----------
y : array-like
"""
y_type = type_of_target(y)
if y_type not in ['binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences']:
raise ValueError("Unknown label type: %r" % y_type)
def type_of_target(y):
"""Determine the type of data indicated by the target.
Note that this type is the most specific type that can be inferred.
For example:
* ``binary`` is more specific but compatible with ``multiclass``.
* ``multiclass`` of integers is more specific but compatible with
``continuous``.
* ``multilabel-indicator`` is more specific but compatible with
``multiclass-multioutput``.
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, string_types))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
sparseseries = (y.__class__.__name__ == 'SparseSeries')
if sparseseries:
raise ValueError("y cannot be class 'SparseSeries'.")
if is_multilabel(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# Known to fail in numpy 1.3 for array of arrays
return 'unknown'
# The old sequence of sequences format
try:
if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], string_types)):
raise ValueError('You appear to be using a legacy multi-label data'
' representation. Sequence of sequences are no'
' longer supported; use a binary array or sparse'
' matrix instead.')
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], string_types)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown' # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
return 'continuous' + suffix
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return 'binary' # [1, 2] or [["a"], ["b"]]
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not np.array_equal(clf.classes_, unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data
Parameters
----------
y : array like or sparse matrix of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape = (n_samples,), optional
Sample weights.
Returns
-------
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
n_classes : list of integers of size n_outputs
Number of classes in each column
class_prior : list of size n_outputs of arrays of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = np.asarray(sample_weight)[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = np.bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its weight with the weight
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implicit zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = np.bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
def _ovr_decision_function(predictions, confidences, n_classes):
"""Compute a continuous, tie-breaking OvR decision function from OvO.
It is important to include a continuous value, not only votes,
to make computing AUC or calibration meaningful.
Parameters
----------
predictions : array-like, shape (n_samples, n_classifiers)
Predicted classes for each binary classifier.
confidences : array-like, shape (n_samples, n_classifiers)
Decision functions or predicted probabilities for positive class
for each binary classifier.
n_classes : int
Number of classes. n_classifiers must be
``n_classes * (n_classes - 1 ) / 2``
"""
n_samples = predictions.shape[0]
votes = np.zeros((n_samples, n_classes))
sum_of_confidences = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
sum_of_confidences[:, i] -= confidences[:, k]
sum_of_confidences[:, j] += confidences[:, k]
votes[predictions[:, k] == 0, i] += 1
votes[predictions[:, k] == 1, j] += 1
k += 1
max_confidences = sum_of_confidences.max()
min_confidences = sum_of_confidences.min()
if max_confidences == min_confidences:
return votes
# Scale the sum_of_confidences to (-0.5, 0.5) and add it with votes.
# The motivation is to use confidence levels as a way to break ties in
# the votes without switching any decision made based on a difference
# of 1 vote.
eps = np.finfo(sum_of_confidences.dtype).eps
max_abs_confidence = max(abs(max_confidences), abs(min_confidences))
scale = (0.5 - eps) / max_abs_confidence
return votes + sum_of_confidences * scale
| {
"repo_name": "vortex-ape/scikit-learn",
"path": "sklearn/utils/multiclass.py",
"copies": "9",
"size": "15222",
"license": "bsd-3-clause",
"hash": -3505567174184493000,
"line_mean": 33.0536912752,
"line_max": 79,
"alpha_frac": 0.5805413218,
"autogenerated": false,
"ratio": 3.692867540029112,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 447
} |
"""
Multi-class / multi-label utility function
==========================================
"""
from __future__ import division
from collections import Sequence
from itertools import chain
import numpy as np
from scipy.sparse import dok_matrix
from scipy.sparse import issparse
from scipy.sparse import lil_matrix
from scipy.sparse.base import spmatrix
from .validation import check_array
from ..externals.six import string_types
from ..utils.fixes import array_equal
from ..utils.fixes import bincount
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_indicator(y):
return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes,
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == set(["binary", "multiclass"]):
ys_types = set(["multiclass"])
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1]
for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.unique(y.data).size == 1 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def check_classification_targets(y):
"""Ensure that target y is of a non-regression type.
Only the following target types (as defined in type_of_target) are allowed:
'binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences'
Parameters
----------
y : array-like
"""
y_type = type_of_target(y)
if y_type not in ['binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences']:
raise ValueError("Unknown label type: %r" % y_type)
def type_of_target(y):
"""Determine the type of data indicated by target `y`
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, string_types))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
if is_multilabel(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# Known to fail in numpy 1.3 for array of arrays
return 'unknown'
# The old sequence of sequences format
try:
if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], string_types)):
raise ValueError('You appear to be using a legacy multi-label data'
' representation. Sequence of sequences are no'
' longer supported; use a binary array or sparse'
' matrix instead.')
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], string_types)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown' # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
return 'continuous' + suffix
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return 'binary' # [1, 2] or [["a"], ["b"]]
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not array_equal(clf.classes_, unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data
Parameters
----------
y : array like or sparse matrix of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape = (n_samples,), optional
Sample weights.
Returns
-------
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
n_classes : list of integers of size n_outputs
Number of classes in each column
class_prior : list of size n_outputs of arrays of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = np.asarray(sample_weight)[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its weight with the weight
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implicit zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
| {
"repo_name": "DailyActie/Surrogate-Model",
"path": "01-codes/scikit-learn-master/sklearn/utils/multiclass.py",
"copies": "1",
"size": "12982",
"license": "mit",
"hash": 2469879870440017000,
"line_mean": 32.5452196382,
"line_max": 79,
"alpha_frac": 0.5711754737,
"autogenerated": false,
"ratio": 3.7049086757990866,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4776084149499087,
"avg_score": null,
"num_lines": null
} |
"""
Multi-class / multi-label utility function
==========================================
"""
from collections import Sequence
from itertools import chain
import numpy as np
from ..externals.six import string_types
def _unique_multiclass(y):
if isinstance(y, np.ndarray):
return np.unique(y)
else:
return set(y)
def _unique_sequence_of_sequence(y):
return set(chain.from_iterable(y))
def _unique_indicator(y):
return np.arange(y.shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-sequences': _unique_sequence_of_sequence,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "mutliclass-multioutput" input type.
Parameters
----------
ys : array-likes,
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
>>> unique_labels(np.array([[0.0, 1.0], [1.0, 1.0]]), np.zeros((2, 2)))
array([0, 1])
>>> unique_labels([(1, 2), (3,)], [(1, 2), tuple()])
array([1, 2, 3])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == set(["binary", "multiclass"]):
ys_types = set(["multiclass"])
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(y.shape[1] for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type")
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_label_indicator_matrix(y):
""" Check if ``y`` is in the label indicator matrix format (multilabel).
Parameters
----------
y : numpy array of shape [n_samples] or sequence of sequences
Target values. In the multilabel case the nested sequences can
have variable lengths.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a label indicator matrix format,
else ``False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_label_indicator_matrix
>>> is_label_indicator_matrix([0, 1, 0, 1])
False
>>> is_label_indicator_matrix([[1], [0, 2], []])
False
>>> is_label_indicator_matrix(np.array([[1, 0], [0, 0]]))
True
>>> is_label_indicator_matrix(np.array([[1], [0], [0]]))
False
>>> is_label_indicator_matrix(np.array([[1, 0, 0]]))
True
"""
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
labels = np.unique(y)
return len(labels) <= 2 and (y.dtype.kind in 'biu' # bool, int, uint
or _is_integral_float(labels))
def is_sequence_of_sequences(y):
""" Check if ``y`` is in the sequence of sequences format (multilabel).
Parameters
----------
y : sequence or array.
Returns
-------
out : bool,
Return ``True``, if ``y`` is a sequence of sequences else ``False``.
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_sequence_of_sequences([0, 1, 0, 1])
False
>>> is_sequence_of_sequences([[1], [0, 2], []])
True
>>> is_sequence_of_sequences(np.array([[1], [0, 2], []], dtype=object))
True
>>> is_sequence_of_sequences([(1,), (0, 2), ()])
True
>>> is_sequence_of_sequences(np.array([[1, 0], [0, 0]]))
False
>>> is_sequence_of_sequences(np.array([[1], [0], [0]]))
False
>>> is_sequence_of_sequences(np.array([[1, 0, 0]]))
False
"""
# the explicit check for ndarray is for forward compatibility; future
# versions of Numpy might want to register ndarray as a Sequence
try:
return (not isinstance(y[0], np.ndarray) and isinstance(y[0], Sequence)
and not isinstance(y[0], string_types))
except IndexError:
return False
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples] or sequence of sequences
Target values. In the multilabel case the nested sequences can
have variable lengths.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
True
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
return is_label_indicator_matrix(y) or is_sequence_of_sequences(y)
def type_of_target(y):
"""Determine the type of data indicated by target `y`
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'mutliclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-sequences': `y` is a sequence of sequences, a 1d
array-like of objects that are sequences of labels.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target([['a', 'b'], ['c'], []])
'multilabel-sequences'
>>> type_of_target([[]])
'multilabel-sequences'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
# XXX: is there a way to duck-type this condition?
valid = (isinstance(y, (np.ndarray, Sequence))
and not isinstance(y, string_types))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
if is_sequence_of_sequences(y):
return 'multilabel-sequences'
elif is_label_indicator_matrix(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# known to fail in numpy 1.3 for array of arrays
return 'unknown'
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], string_types)):
return 'unknown'
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown'
elif y.ndim == 2 and y.shape[1] > 1:
suffix = '-multioutput'
else:
# column vector or 1d
suffix = ''
# check float and contains non-integer float values:
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
return 'continuous' + suffix
if len(np.unique(y)) <= 2:
assert not suffix, "2d binary array-like should be multilabel"
return 'binary'
else:
return 'multiclass' + suffix
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimator that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial fit.and
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not np.all(clf.classes_ == unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
| {
"repo_name": "Sklearn-HMM/scikit-learn-HMM",
"path": "sklean-hmm/utils/multiclass.py",
"copies": "3",
"size": "10950",
"license": "bsd-3-clause",
"hash": 7673745331732883000,
"line_mean": 30.6473988439,
"line_max": 79,
"alpha_frac": 0.5852054795,
"autogenerated": false,
"ratio": 3.70809346427362,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 346
} |
"""
Multi-class / multi-label utility function
==========================================
"""
from collections import Sequence
import numpy as np
from ..externals.six import string_types
def unique_labels(*lists_of_labels):
"""Extract an ordered array of unique labels
Parameters
----------
lists_of_labels : list of labels,
The supported "list of labels" are:
- a list / tuple / numpy array of int
- a list of lists / tuples of int;
- a binary indicator matrix (2D numpy array)
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
>>> unique_labels(np.array([[0.0, 1.0], [1.0, 1.0]]), np.zeros((2, 2)))
array([0, 1])
>>> unique_labels([(1, 2), (3,)], [(1, 2), tuple()])
array([1, 2, 3])
"""
def _unique_labels(y):
classes = None
if is_multilabel(y):
if is_label_indicator_matrix(y):
classes = np.arange(y.shape[1])
else:
classes = np.array(sorted(set.union(*map(set, y))))
else:
classes = np.unique(y)
return classes
if not lists_of_labels:
raise ValueError('No list of labels has been passed.')
return np.unique(np.hstack(_unique_labels(y) for y in lists_of_labels))
def is_label_indicator_matrix(y):
""" Check if ``y`` is in the label indicator matrix format (multilabel).
Parameters
----------
y : numpy array of shape [n_samples] or sequence of sequences
Target values. In the multilabel case the nested sequences can
have variable lengths.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a label indicator matrix format,
else ``False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_label_indicator_matrix
>>> is_label_indicator_matrix([0, 1, 0, 1])
False
>>> is_label_indicator_matrix([[1], [0, 2], []])
False
>>> is_label_indicator_matrix(np.array([[1, 0], [0, 0]]))
True
>>> is_label_indicator_matrix(np.array([[1], [0], [0]]))
False
>>> is_label_indicator_matrix(np.array([[1, 0, 0]]))
False
"""
return (hasattr(y, "shape") and len(y.shape) == 2 and y.shape[1] > 1 and
y.shape[0] > 1 and np.size(np.unique(y)) <= 2)
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples] or sequence of sequences
Target values. In the multilabel case the nested sequences can
have variable lengths.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
True
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
False
"""
# the explicit check for ndarray is for forward compatibility; future
# versions of Numpy might want to register ndarray as a Sequence
return (not isinstance(y[0], np.ndarray) and isinstance(y[0], Sequence) and
not isinstance(y[0], string_types) or is_label_indicator_matrix(y))
| {
"repo_name": "kmike/scikit-learn",
"path": "sklearn/utils/multiclass.py",
"copies": "3",
"size": "3824",
"license": "bsd-3-clause",
"hash": -6581158473212420000,
"line_mean": 27.5373134328,
"line_max": 79,
"alpha_frac": 0.5575313808,
"autogenerated": false,
"ratio": 3.6246445497630333,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5682175930563034,
"avg_score": null,
"num_lines": null
} |
__author__ = 'arnaud'
#!/usr/bin/env python
#
# Copyright 2001-2002 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# This file is part of the Python logging distribution. See
# http://www.red-dove.com/python_logging.html
#
"""Test harness for the logging module. Tests BufferingSMTPHandler, an alternative implementation
of SMTPHandler.
Copyright (C) 2001-2002 Vinay Sajip. All Rights Reserved.
"""
from google.appengine.api import mail
import logging, logging.handlers
from configuration import configuration_dict
class BufferingSMTPHandler(logging.handlers.BufferingHandler):
FROM = TO = configuration_dict['admin_email']
def __init__(self, capacity):
logging.handlers.BufferingHandler.__init__(self, capacity)
self.fromaddr = BufferingSMTPHandler.FROM
self.toaddrs = BufferingSMTPHandler.TO
self.setFormatter(logging.Formatter("%(asctime)s %(levelname)-5s %(message)s"))
def flush(self):
if len(self.buffer) > 0:
try:
msg = ""
for record in self.buffer:
s = self.format(record)
print s
msg = msg + s + "\r\n"
mail.send_mail(self.fromaddr,self.toaddrs, "Error in VideoNot.es", msg)
except Exception, e:
self.handleError(None) # no particular record
self.buffer = [] | {
"repo_name": "UniShared/videonotes",
"path": "BufferedSmtpHandler.py",
"copies": "3",
"size": "2304",
"license": "mit",
"hash": 9164536774876668000,
"line_mean": 42.4905660377,
"line_max": 97,
"alpha_frac": 0.7057291667,
"autogenerated": false,
"ratio": 4.189090909090909,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6394820075790909,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Arnaud Wery'
import oauth2 as oauth
import json
from datetime import datetime, date
from urllib import quote_plus
from httplib2 import ServerNotFoundError, MalformedHeader
import time
import socket
json_data = open('oAuth')
oAuth = json.load(json_data)
BASE_URL = oAuth['BASE_URL']
CONSUMER_KEY = oAuth['CONSUMER_KEY']
CONSUMER_SECRET = oAuth['CONSUMER_SECRET']
HEADERS = oAuth['HEADERS']
class railtimeFetcher():
def __init__(self, key=CONSUMER_KEY, secret=CONSUMER_SECRET, headers=HEADERS, return_json_object=True):
self.consumer = oauth.Consumer(key=key, secret=secret)
self.client = oauth.Client(self.consumer, timeout=5)
self.headers = headers
self.return_json_object = return_json_object
def fetch(self, url):
tries = 0
while tries < 10:
tries += 1
try:
resp, content = self.client.request(BASE_URL+url, headers=self.headers) # default : GET
if resp.status == 200: # OK
if self.return_json_object:
return json.loads(content)
else:
return content
else:
print 'Response status: %s, wait 2 sec. before retrying' % resp.status
time.sleep(2)
except (ServerNotFoundError, MalformedHeader):
print 'Site is down or Malformed header, wait 2 sec. before retrying'
time.sleep(2) # 2 sec
except socket.timeout:
print "Timeout, wait 2 sec. before retrying"
time.sleep(2)
print 'Exceeded number tries'
return None
def trainSchedule(self, trainNumber, trainDate=date.today()):
if trainDate > date.today():
print 'Impossible to predict the futur (train date: ' + self.formatDate(trainDate) +')'
return
url = "RetrieveTrainSchedule?trainNumber="+str(trainNumber)+"&requestedDate=" + self.formatDate(trainDate)+ "+00%3A00%3A00&dateType=1&language=3"
return self.fetch(url)
def routes(self, departureStation, arrivalStation, trainDatetime=datetime.now(), countBefore=0, countAfter=3):
if trainDatetime.date() > date.today():
print 'Impossible to predict the futur (train date: ' + self.formatDate(trainDatetime) +')'
return
url = "RetrieveRoutes?departureStationId="+str(departureStation)+"&arrivalStationId="+str(arrivalStation)+"&dateTime="+self.formatDate(trainDatetime)+"&searchType=1&minTransferTime=0&resultCountBefore="+str(countBefore)+"&resultCountAfter="+str(countAfter)
return self.fetch(url)
def stationList(self):
url = "RetrieveStationList"
return self.fetch(url)
def formatDate(self, d):
if type(d) is date:
return d.isoformat()
elif type(d) is datetime:
return quote_plus(d.strftime("%Y-%m-%d %H:%M:%S"))
def printJSON(self, myJson):
print json.dumps(myJson, sort_keys=True, indent=4, separators=(',', ': '))
| {
"repo_name": "JensNevens/Bachelorproject",
"path": "ipynb/railfetcher.py",
"copies": "2",
"size": "3158",
"license": "mit",
"hash": 6975041219039824000,
"line_mean": 38.475,
"line_max": 268,
"alpha_frac": 0.6073464218,
"autogenerated": false,
"ratio": 3.9425717852684143,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.028640780683814416,
"num_lines": 80
} |
__author__ = 'Arnav'
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import csv
#############Read the years from txt file################
year_list = [];
f= open('../Resources/FullSet/AdditionalFiles/tracks_per_year.txt', 'rU', encoding='utf8')
data=csv.reader(f)
for row in data:
line = row[0]
custom_format = line.split('<SEP>')
year = custom_format[0]
#year = year.encode('utf8')
year_list.append(year)
f.close()
count = 0
ignore = 0
init = 1922
"""
for current_year in year_list:
#print(current_year)
if int(current_year) == init:
count += 1
previous_year = current_year
else:
print("Year: " + str(previous_year) + " Count: " + str(count))
count = 1
init = int(current_year)
"""
#############Ignore the years not in proper format and count################
f_out = open('year_vs_count.txt', 'a')
for current_year in year_list:
try:
if int(current_year) == init:
count += 1
previous_year = current_year
else:
to_write = str(previous_year) + "|" + str(count) + "\n"
f_out.write(to_write)
count = 1
init = int(current_year)
except:
ignore +=1
print(str(ignore)+" Years not in correct format, ignored")
| {
"repo_name": "nishantnath/MusicPredictiveAnalysis_EE660_USCFall2015",
"path": "Code/Visualizations & Insights/visualize_songs_per_year.py",
"copies": "1",
"size": "1305",
"license": "mit",
"hash": 1562590168239993000,
"line_mean": 24.5882352941,
"line_max": 90,
"alpha_frac": 0.5647509579,
"autogenerated": false,
"ratio": 3.4432717678100264,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.937718420556263,
"avg_score": 0.026167704029479412,
"num_lines": 51
} |
__author__ = 'Arnav & Nishant'
import pandas
import numpy
import time
start_time=time.time()
import pandas
import numpy
import time
col_meta = ['track_id','song_id','wc_genre', 'wc_year']
col_data_in = [
'track_id', 'song_id',
'AvgBarDuration','Loudness', 'Tempo','ArtistFamiliarity','ArtistHotttnesss','SongHotttnesss',
'Mode[0]','Mode[1]','Year',
#Key features
'Key[0]','Key[1]','Key[2]','Key[3]','Key[4]','Key[5]',
'Key[6]','Key[7]','Key[8]','Key[9]','Key[10]','Key[11]',
#Picthes Mean
'PicthesMean[0]','PicthesMean[1]','PicthesMean[2]','PicthesMean[3]','PicthesMean[4]','PicthesMean[5]',
'PicthesMean[6]','PicthesMean[7]','PicthesMean[8]','PicthesMean[9]','PicthesMean[10]','PicthesMean[11]',
#Pitches Variance
'PitchesVar[0]','PitchesVar[1]','PitchesVar[2]','PitchesVar[3]','PitchesVar[4]','PitchesVar[5]',
'PitchesVar[6]','PitchesVar[7]','PitchesVar[8]','PitchesVar[9]','PitchesVar[10]','PitchesVar[11]',
#Timbre Mean
'TimbreMean[0]','TimbreMean[1]','TimbreMean[2]','TimbreMean[3]','TimbreMean[4]','TimbreMean[5]',
'TimbreMean[6]','TimbreMean[7]','TimbreMean[8]','TimbreMean[9]','TimbreMean[10]','TimbreMean[11]',
#Timbre Variance
'TimbreVar[0]','TimbreVar[1]','TimbreVar[2]','TimbreVar[3]','TimbreVar[4]','TimbreVar[5]',
'TimbreVar[6]','TimbreVar[7]','TimbreVar[8]','TimbreVar[9]','TimbreVar[10]','TimbreVar[11]',
#Time Signature
'TimeSig[0]', 'TimeSig[1]', 'TimeSig[2]', 'TimeSig[3]', 'TimeSig[4]', 'TimeSig[5]']
col_data_out = [ 'wc_genre', 'track_id',
'AvgBarDuration','Loudness', 'Tempo','ArtistFamiliarity','ArtistHotttnesss','SongHotttnesss',
'Mode[0]','Mode[1]','wc_year',
'Key[0]','Key[1]','Key[2]','Key[3]','Key[4]','Key[5]',
'Key[6]','Key[7]','Key[8]','Key[9]','Key[10]','Key[11]',
'PicthesMean[0]','PicthesMean[1]','PicthesMean[2]','PicthesMean[3]','PicthesMean[4]','PicthesMean[5]',
'PicthesMean[6]','PicthesMean[7]','PicthesMean[8]','PicthesMean[9]','PicthesMean[10]','PicthesMean[11]',
'PitchesVar[0]','PitchesVar[1]','PitchesVar[2]','PitchesVar[3]','PitchesVar[4]','PitchesVar[5]',
'PitchesVar[6]','PitchesVar[7]','PitchesVar[8]','PitchesVar[9]','PitchesVar[10]','PitchesVar[11]',
'TimbreMean[0]','TimbreMean[1]','TimbreMean[2]','TimbreMean[3]','TimbreMean[4]','TimbreMean[5]',
'TimbreMean[6]','TimbreMean[7]','TimbreMean[8]','TimbreMean[9]','TimbreMean[10]','TimbreMean[11]',
'TimbreVar[0]','TimbreVar[1]','TimbreVar[2]','TimbreVar[3]','TimbreVar[4]','TimbreVar[5]',
'TimbreVar[6]','TimbreVar[7]','TimbreVar[8]','TimbreVar[9]','TimbreVar[10]','TimbreVar[11]',
'TimeSig[0]', 'TimeSig[1]', 'TimeSig[2]', 'TimeSig[3]', 'TimeSig[4]', 'TimeSig[5]']
start_time=time.time()
df_meta = pandas.read_csv('MSD_Data_Genre_WikiCorrected_DupRemoved.bin', header=None, delimiter = "|", names=col_meta)
df_data = pandas.read_csv('feature_matrix_full.bin', header=0, delimiter = "|", names=col_data_in)
df_merged = pandas.merge(df_data, df_meta, how='left', on=['track_id', 'track_id', 'song_id', 'song_id'])
#handling missing data
for index, row in df_merged.iterrows():
if (row['wc_genre'] == '') or (row['wc_genre'] is None):
df_merged.set_value(index, 'wc_genre', 'UNCAT')
if (not numpy.isfinite(row['wc_year'])) or (row['wc_year'] == '') or (row['wc_year'] is None):
df_merged.set_value(index, 'wc_year', 0)
elapsed_time = time.time() - start_time
print "elapsed time : ", elapsed_time
df_merged.to_csv('MSD_Final_Dataset_For_EE660.bin', sep="|", index=False, header=None, columns=col_data_out)
elapsed_time = time.time() - start_time
print "elapsed time : ", elapsed_time | {
"repo_name": "nishantnath/MusicPredictiveAnalysis_EE660_USCFall2015",
"path": "Code/Data Generation & Manipulation/MSD_merge_feature_file_with_wikicorrected_genredata.py",
"copies": "1",
"size": "3914",
"license": "mit",
"hash": -1746562432058049800,
"line_mean": 44.523255814,
"line_max": 121,
"alpha_frac": 0.5935104752,
"autogenerated": false,
"ratio": 2.460087994971716,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8427489221272952,
"avg_score": 0.025221849779752767,
"num_lines": 86
} |
__author__ = 'Arnav'
import numpy as np
import h5py
import combine_feat as combine
import matplotlib
import matplotlib.pyplot as plt
f_hiphop = h5py.File("G:\project660\Resources\MillionSongSubset\data\A\A\A\TRAAAAW128F429D538.h5", 'r')
f_classic = h5py.File("G:\project660\Resources\MillionSongSubset\data\B\H\H\TRBHHHC128F428428E.h5", 'r')
f_country = h5py.File("G:\project660\Resources\MillionSongSubset\data\B\C\E\TRBCEST128F9327FBF.h5", 'r')
f_metal = h5py.File("G:\project660\Resources\MillionSongSubset\data\B\C\R\TRBCRDW128F422D8BA.h5", 'r')
f_pop = h5py.File("G:\project660\Resources\MillionSongSubset\data\B\C\W\TRBCWNH128F93103EE.h5", 'r')
f = [f_hiphop, f_classic, f_country, f_metal, f_pop]
labs = ["Hip-hop", "Classical", "Country", "Metal", "Pop"]
"""
feat_pop = combine.combine_feature(np.array(f_pop['/analysis/segments_pitches']))
full_feat, axarr = plt.subplots(3, sharex=True, sharey=True)
axarr[0].set_title('Mean, Var and Std of 12 pitches')
#axarr.legend(loc='best')
axarr[0].plot(feat_pop[:12], label="Pop")
axarr[1].plot(feat_pop[12:24], label="Pop")
axarr[2].plot(feat_pop[24:], label="Pop")
full_feat.subplots_adjust(hspace=0)
plt.setp([a.get_xticklabels() for a in full_feat.axes[:-1]], visible=False)
#print(feat_pop)
plt.show()
"""
# Set up subplot figures
full_feat, axarr = plt.subplots(3, sharex=True)
#coloring the background to R/G/B for Mean/Var-and-Std plots
axarr[0].axvspan(0, 12, facecolor='r', alpha=0.2)
axarr[1].axvspan(0, 12, facecolor='g', alpha=0.2)
axarr[2].axvspan(0, 12, facecolor='b', alpha=0.2)
#adding annotation and axes labels to plots
axarr[0].annotate('Means', xy=(11, 0.1), xycoords='data', color='r')
axarr[1].annotate('Variances', xy=(11, 0.01), xycoords='data', color='g')
axarr[2].annotate('Standard Deviations', xy=(11, 0.04), xycoords='data', color='b')
axarr[0].set_title('Mean, Var and Std of 12 pitches for all segments')
# Combine features and plot them for each genre
counter=1 # this is how we initialize array for first iteration
for file, genre in zip(f, labs):
if counter == 1:
is_confident = np.array(file['/analysis/segments_confidence'])>0.5
feat = combine.combine_feature(np.array(file['/analysis/segments_pitches']), is_confident)
axarr[0].plot(feat[:12], label=genre)
axarr[1].plot(feat[12:24], label=genre)
axarr[2].plot(feat[24:], label=genre)
axarr[0].legend(loc='upper right', fontsize='xx-small')
counter += 1 # after feat is calculated for 1st file, we'll never enter this loop
else:
is_confident = np.array(file['/analysis/segments_confidence'])>0.5
temp = combine.combine_feature(np.array(file['/analysis/segments_pitches']), is_confident)
axarr[0].plot(temp[:12], label=genre)
axarr[1].plot(temp[12:24], label=genre)
axarr[2].plot(temp[24:], label=genre)
axarr[0].legend(loc='upper right', fontsize='xx-small')
feat = np.vstack((feat, temp))
# Fine-tune figure; make subplots close to each other and hide x ticks for all but bottom plot.
full_feat.subplots_adjust(hspace=0.05)
#plt.setp([a.get_xticklabels() for a in full_feat.axes[:-1]], visible=False)
# Set Plot Labels and print figure
plt.xlabel('12 pitches value')
plt.ylabel('Normalized scores for each pitch', y=1.5)
plt.show() | {
"repo_name": "nishantnath/MusicPredictiveAnalysis_EE660_USCFall2015",
"path": "Code/Visualizations & Insights/visualize_segment_pitches.py",
"copies": "1",
"size": "3306",
"license": "mit",
"hash": -7723350524344235000,
"line_mean": 44.301369863,
"line_max": 104,
"alpha_frac": 0.6941923775,
"autogenerated": false,
"ratio": 2.7435684647302905,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8890236749512006,
"avg_score": 0.009504818543656859,
"num_lines": 73
} |
__author__ = 'Arnav'
"""
This module provides fuctionality for reducing dimentions of data set by
decomposing a multivariate dataset in a set of successive orthogonal components
that explain a maximum amount of the variance.
"""
def using_PCA(feature_mat, reduced_dim):
"""
Linear dimensionality reduction using Singular Value Decomposition
of the data and keeping only the most significant singular vectors
:param featureMat: numpy array of float/int features with no missing values
:param reduced_dim: required number of dimensions
:return: feature matrix with reduced_dim number of dimensions
"""
from sklearn.decomposition import PCA
reduced_feature_mat = PCA(n_components=reduced_dim).fit(feature_mat).transform(feature_mat)
return reduced_feature_mat
def using_LDA(feature_mat, true_labels, reduced_dim):
"""
Reduces the dimensionality of the input by projecting it to the most discriminative directions.
:param features_mat: numpy array of float/int features with no missing values
:param true_labels: numpy array of true labels
:param reduced_dim: required number of dimensions
:return: reduced feature matrix with reduced_dim number of dimensions
"""
from sklearn.lda import LDA
import numpy
reduced_feature_mat = LDA(n_components=reduced_dim).fit(feature_mat, numpy.ravel(true_labels[:])).transform(feature_mat)
return reduced_feature_mat
| {
"repo_name": "nishantnath/MusicPredictiveAnalysis_EE660_USCFall2015",
"path": "Code/Machine_Learning_Algos/10k_Tests/reduce_dimensions.py",
"copies": "1",
"size": "1436",
"license": "mit",
"hash": 7199656445020852000,
"line_mean": 43.875,
"line_max": 124,
"alpha_frac": 0.7527855153,
"autogenerated": false,
"ratio": 4.211143695014663,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002610017219963866,
"num_lines": 32
} |
__author__ = 'Arnav'
# !/usr/bin/env python
'''
Using : Python 2.7+ (backward compatibility exists for Python 3.x if separate environment created)
Required files : hdf5_getters.py
Required packages : numpy, pandas, matplotlib, sklearn
# Uses LDA for classification
'''
import pandas
import matplotlib.pyplot as plt
import numpy as np
# main function
if __name__ == '__main__':
col_input = ['genre', 'AvgBarDuration','Loudness', 'Tempo','ArtistFamiliarity','ArtistHotttnesss','SongHotttnesss',
'Mode[0]','Mode[1]','Year',
'Key[0]','Key[1]','Key[2]','Key[3]','Key[4]','Key[5]',
'Key[6]','Key[7]','Key[8]','Key[9]','Key[10]','Key[11]',
'PicthesMean[0]','PicthesMean[1]','PicthesMean[2]','PicthesMean[3]','PicthesMean[4]','PicthesMean[5]',
'PicthesMean[6]','PicthesMean[7]','PicthesMean[8]','PicthesMean[9]','PicthesMean[10]','PicthesMean[11]',
'PitchesVar[0]','PitchesVar[1]','PitchesVar[2]','PitchesVar[3]','PitchesVar[4]','PitchesVar[5]',
'PitchesVar[6]','PitchesVar[7]','PitchesVar[8]','PitchesVar[9]','PitchesVar[10]','PitchesVar[11]',
'TimbreMean[0]','TimbreMean[1]','TimbreMean[2]','TimbreMean[3]','TimbreMean[4]','TimbreMean[5]',
'TimbreMean[6]','TimbreMean[7]','TimbreMean[8]','TimbreMean[9]','TimbreMean[10]','TimbreMean[11]',
'TimbreVar[0]','TimbreVar[1]','TimbreVar[2]','TimbreVar[3]','TimbreVar[4]','TimbreVar[5]',
'TimbreVar[6]','TimbreVar[7]','TimbreVar[8]','TimbreVar[9]','TimbreVar[10]','TimbreVar[11]']
df_input = pandas.read_csv('pandas_merged_output_cleaned_None.csv',
header=None, delimiter="|", names=col_input)
df_input = df_input.dropna()
df_input = df_input[df_input['Year'] != 0]
#df_input = df_input[df_input['Year'] != 0][df_input['Year'] < 1992]
#df_input = df_input[df_input['Year'] != 0][df_input['Year'] >= 1992]
df_input_target = df_input[list(range(0, 1))].as_matrix()
df_input_data = df_input[list(range(1, 70))].as_matrix()
# splitting the data into training and testing sets
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(df_input_data, df_input_target.tolist())
# Start LDA Classification
from sklearn.lda import LDA
clf = LDA(solver='svd', shrinkage=None, n_components=None).fit(X_train, np.ravel(y_train[:]))
predicted = clf.predict(X_test)
matches = (predicted == [item for sublist in y_test for item in sublist])
print "Accuracy : ", (matches.sum() / float(len(matches)))
| {
"repo_name": "nishantnath/MusicPredictiveAnalysis_EE660_USCFall2015",
"path": "Code/Machine_Learning_Algos/10k_Tests/ml_classification_lda.py",
"copies": "1",
"size": "2670",
"license": "mit",
"hash": 7879826802542534000,
"line_mean": 45.8421052632,
"line_max": 121,
"alpha_frac": 0.6104868914,
"autogenerated": false,
"ratio": 2.7554179566563466,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3865904848056346,
"avg_score": null,
"num_lines": null
} |
__author__ = 'a'
#Parse XML directly from the file path
import xml.etree.ElementTree as xml
import os
import shutil
htmlRendrering = ""
def openScreenToPage():
global htmlRendrering
htmlRendrering += "<html>\n<head>\n"
htmlRendrering += '<link href="{{ STATIC_URL }}home.css" rel="stylesheet" type="text/css" media="screen" />'
htmlRendrering += '</head>\n<body>\n'
htmlRendrering += '<br/><br/><div align="center">'
def endScreenEndPage():
global htmlRendrering
htmlRendrering += "</div></body>\n</html>\n"
def findvalues(e):
values = e.find('values')
l = []
if values is not None:
for value in values:
l.append(value.text)
else:
l = [""]
return l
def newScreen():
global htmlRendrering
htmlRendrering = ""
def componentType(e):
if len(e.attrib) != 0:
return e.attrib['type']
else:
return e.find('identifier').text.upper()
def button(e):
global htmlRendrering
l = []
l = findvalues(e)
htmlRendrering += ' <input type=\"button' + '\" name=\"' + e.find(
'identifier').text + '\" id =\"' + e.find('identifier').text + '\" value=\"' + e.find(
'identifier').text + '\"/><br>\n'
def link(e):
global htmlRendrering
l = []
l = findvalues(e)
htmlRendrering += ' <a href=\"' + e.find('identifier').text + '\">' + e.find('identifier').text + '</a><br/>\n'
def staticText(e):
global htmlRendrering
l = []
l = findvalues(e)
htmlRendrering += e.find('identifier').text + ': <p' + ' name=\"' + e.find(
'identifier').text + '\" id =\"' + e.find(
'identifier').text + '\">' + l[0] + '</p><br/>\n'
def dynamicText(e):
global htmlRendrering
l = []
l = findvalues(e)
htmlRendrering += e.find('identifier').text + ': <div><span' + ' name=\"' + e.find(
'identifier').text + '\" id =\"' + e.find(
'identifier').text + '\">' + l[0] + '</span></div><br/>\n'
def radioButton(e):
global htmlRendrering
l = []
l = findvalues(e)
for i in l:
htmlRendrering += e.find('identifier').text + ': <input type=\"radio\"' + ' name=\"' + e.find(
'identifier').text + '\" id =\"' + e.find(
'identifier').text + '\" value=\"' + i + '\">' + i + '<br/>\n'
def checkBox(e):
global htmlRendrering
l = []
l = findvalues(e)
for i in l:
htmlRendrering += e.find('identifier').text + ': <input type=\"checkbox\"' + ' name=\"' + e.find(
'identifier').text + '\" id =\"' + e.find(
'identifier').text + '\" value=\"' + i + '\">' + i + '<br/>\n'
def comboBox(e):
global htmlRendrering
htmlRendrering += e.find('identifier').text + ': <select name=\"' + e.find('identifier').text + '\" id=\"' + e.find('identifier').text +'\">\n'
if not e.find('children'):
pass
else:
for child in e:
for component1 in child:
htmlRendrering += '<option value=\"' + component1.find('identifier').text + '\">' + component1.find('identifier').text + '</option>\n'
htmlRendrering += '</select><br/>\n'
def tableTag(e):
global htmlRendrering
htmlRendrering += e.find('identifier').text + ': <table border=5><br/>\n'
if e.find('children') is not None:
for child in e:
if child.find('component') is not None:
htmlRendrering += "<tf>\n"
for component1 in child:
htmlRendrering += "<th>"
generateRendering(component1)
htmlRendrering += "</th>\n"
htmlRendrering += "</tf>\n"
else:
pass
if e.find('values') is not None:
for tr in e.find('values'):
if child.find('tr') is not None:
htmlRendrering += "<tr>\n"
for td1 in tr:
htmlRendrering += "<td>"
htmlRendrering += td1.text
htmlRendrering += "</td>\n"
htmlRendrering += "</tr>\n"
htmlRendrering += '</table><br/>\n'
def listBox(e):
global htmlRendrering
htmlRendrering += e.find('identifier').text + ': <select multiple=\"multiple\"><br/>\n'
l = []
l = findvalues(e)
if len(l) != 0:
for i in l:
htmlRendrering += '<option value=\"' + i + '\">' + i + '</option>\n'
htmlRendrering += '</select><br/>\n'
def editText(e):
global htmlRendrering
l = []
l = findvalues(e)
htmlRendrering += e.find('identifier').text + ': <input type=\"text' + '\" name=\"' + e.find(
'identifier').text + '\" id =\"' + e.find('identifier').text + '\" value=\"' + l[0] + '\"/><br/>\n'
def passwordText(e):
global htmlRendrering
l = []
l = findvalues(e)
htmlRendrering += e.find('identifier').text + ': <input type=\"password' + '\" name=\"' + e.find(
'identifier').text + '\" id =\"' + e.find('identifier').text + '\" value=\"' + l[0] + '\"/><br/>\n'
def imageTag(e):
global htmlRendrering
l = []
l = findvalues(e)
htmlRendrering += '<img src=\"' + "{{ STATIC_URL }}python.jpg" + '\" alt =\"the image alternative is ' + e.find(
'identifier').text + '\"/><br/>\n'
def simpleTag(e):
global htmlRendrering
l = []
l = findvalues(e)
htmlRendrering += e.find('identifier').text + ': <div name=\"' + e.find(
'identifier').text + '\" id =\"' + e.find('identifier').text + '\" >' + l[0] + '<br/>'
for child in e:
for component1 in child:
generateRendering(component1)
htmlRendrering += '</div><br/>'
def checkBoxes(e):
global htmlRendrering
l = []
l = findvalues(e)
if not e.find('children'):
checkBox(e)
else:
htmlRendrering += e.find('identifier').text + ': <br/>'
for child in e:
for component1 in child:
generateRendering(component1)
htmlRendrering += '<br/>'
def radioButtons(e):
global htmlRendrering
l = []
l = findvalues(e)
if not e.find('children'):
radioButton(e)
else:
htmlRendrering += e.find('identifier').text + ': <br/>'
for child in e:
for component1 in child:
generateRendering(component1)
htmlRendrering += '<br/>'
def ConfigureMysiteUrls():
urlsFile = open('mysite/mysite/urls.py' , 'w')
urlcontent = 'from django.conf.urls import patterns, include, url\n'
urlcontent += "urlpatterns = patterns('',url(r'^webApp/', include('webApp.urls')),)\n"
urlsFile.write(urlcontent)
urlsFile.close()
def listTag(e):
global htmlRendrering
l = []
l = findvalues(e)
htmlRendrering += e.find('identifier').text + ': <ol name=\"' + e.find(
'identifier').text + '\" id =\"' + e.find('identifier').text + '\" >' + l[0] + '<br/>\n'
for child in e:
for component1 in child:
htmlRendrering += "<li>"
generateRendering(component1)
htmlRendrering += "</li>\n"
htmlRendrering += '</ol><br/>\n'
def buildingDjangoFramework():
try:
shutil.rmtree('mysite')
except:
pass
creatingDjandoProject = "django-admin.py startproject mysite"
os.system(creatingDjandoProject)
os.chdir("mysite")
creatingDjangoApp = "python manage.py startapp webApp"
os.system(creatingDjangoApp)
os.chdir("webApp")
try:
os.makedirs("static")
except:
pass
try:
shutil.copyfile('../../home.css', 'static/home.css')
except:
pass
try:
shutil.copyfile('../../python.jpg', 'static/python.jpg')
except:
pass
try:
os.makedirs("templates")
except:
pass
os.chdir("templates")
try:
os.makedirs("rendering")
except:
pass
os.chdir("../../../")
def generateRendering(component):
global htmlRendrering
if componentType(component) == 'BUTTON':
button(component)
else:
if componentType(component) == 'LINK':
link(component)
else:
if componentType(component) == 'STATIC_TEXT':
staticText(component)
else:
if componentType(component) == 'RADIO_BUTTON':
radioButton(component)
else:
if componentType(component) == 'CHECK_BOX':
checkBox(component)
else:
if componentType(component) == 'COMBO_BOX':
comboBox(component)
else:
if componentType(component) == 'LIST_BOX':
listBox(component)
else:
if componentType(component) == 'EDIT_BOX':
editText(component)
else:
if componentType(component) == 'TEXT_AREA':
textArea(component)
else:
if componentType(component) == 'PASSWORD':
passwordText(component)
else:
if componentType(component) == 'DYNAMIC_TEXT':
dynamicText(component)
else:
if componentType(component) == 'SIMPLE':
simpleTag(component)
else:
if componentType(component) == 'LIST':
listTag(component)
else:
if componentType(component) == 'IMAGE':
imageTag(component)
else:
if componentType(component) == 'CHECK_BOXES':
checkBoxes(component)
else:
if componentType(component) == 'RADIO_BUTTONS':
radioButtons(component)
else:
if componentType(component) == 'TABLE':
tableTag(component)
def textArea(e):
global htmlRendrering
htmlRendrering += '<textArea name=\"' + e.find(
'identifier').text + '\" id =\"' + e.find('identifier').text + '\">\n'
l = []
l = findvalues(e)
htmlRendrering += l[0] + '\n</textarea>\n'
def mainFunction():
buildingDjangoFramework()
viewsFile = open('mysite/webApp/views.py', 'w')
viewsContent = 'from django.http import HttpResponse\n'
viewsContent += 'from django.shortcuts import render\n'
viewsContent +='def index(request):\n'
viewsContent += "\treturn render(request, 'index.html')\n\n"
urlsFile = open('mysite/webApp/urls.py' , 'w')
urlcontent = 'from django.conf.urls import patterns, url\n'
urlcontent += 'from webApp import views\n'
urlcontent += "urlpatterns = patterns('',url(r'^$', views.index, name='index'),\n"
index = '<html>\n<head>\n'
index += '<link href="{{ STATIC_URL }}home.css" rel="stylesheet" type="text/css" media="screen" />'
index += '</head>\n<body>\n'
index += '<br/>\n<br/>\n<div align="center">\n'
global htmlRendrering
tree = xml.parse("tar.xml")
#Get the root node
rootElement = tree.getroot()
#print rootElement
#Get a list of children elements with tag == "Books"
screen = rootElement.findall("screen")
i = 0
if screen != None:
for screen in screen:
linkName = screen.find('name').text
fileName = "html"
djangoPurpose=fileName + str(i)
viewsContent +='def ' + djangoPurpose + '(request):\n'
fileName = fileName + str(i) + '.html'
viewsContent += "\treturn render(request, 'rendering/"+fileName+"')\n\n"
urlcontent += "url(r'rendering/" + djangoPurpose + "' , views." + djangoPurpose + " , name ='"+djangoPurpose+"'),\n"
i += 1
openScreenToPage()
for child in screen:
for component in child:
generateRendering(component)
endScreenEndPage()
file = open('mysite/webApp/templates/rendering/' + fileName, 'w')
index += '<a href=\"rendering/'+ djangoPurpose + '\">' + linkName + "</a><br/>\n"
file.write(htmlRendrering)
file.close()
print htmlRendrering
newScreen()
index += '</div></body>\n</html>\n'
fileIndex = open('mysite/webApp/templates/index.html' , 'w')
fileIndex.write(index)
fileIndex.close()
viewsFile.write(viewsContent)
viewsFile.close()
urlcontent += ")"
urlsFile.write(urlcontent)
urlsFile.close()
ConfigureMysiteUrls()
| {
"repo_name": "perfidia/screensketch",
"path": "src/screensketch/screenspec/rendering/reading.py",
"copies": "1",
"size": "14075",
"license": "mit",
"hash": -2870505999747858000,
"line_mean": 30.430875576,
"line_max": 151,
"alpha_frac": 0.4758792185,
"autogenerated": false,
"ratio": 3.9547625737566734,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9885518841829146,
"avg_score": 0.009024590085505482,
"num_lines": 434
} |
__author__ = 'a'
import os
from pydocgen.model import ListStyleProperty, AlignmentProperty, FontEffectProperty, Image, Style, Table
import datetime
now = datetime.datetime.now()
from pydocgen.builders.common import Builder
class DitaMapBuilder(Builder):
"""Class responsible for creating a DITA Map document.
It inherits from base Builder class shared between all builder classes.
"""
def __init__(self):
super(DitaMapBuilder, self).__init__()
self.extension = "ditamap"
def generate_document(self, document,ditaTitle):
result = ''
result += '<?xml version="1.0" encoding="utf-8"?>\n'
result += '<!DOCTYPE map PUBLIC "-//OASIS//DTD DITA Map//EN" "http://docs.oasis-open.org/dita/v1.1/CD01/dtd/map.dtd">'
result += '<map>\n'
title = 'Default Title'
if 'title' in document.properties:
title = document.properties['title']
author = 'Tarek Alkhaeir ,Tomasz Bajaczyk '
if 'author' in document.properties:
author = document.properties['author']
result += '\t<title>' + title + '</title>\n'
result += '\t<topicmeta>\n'
result += '\t<author> ' + author + ' </author>\n'
result += '\t<critdates>\n'
result += '\t<created date=\" ' + str(now) + '\" />\n'
result += '\t</critdates>\n'
result += '\t</topicmeta>\n'
result += ' <topicref href=\"' +ditaTitle + '\"/>\n'
result += '</map>'
return result
| {
"repo_name": "perfidia/pydocgen",
"path": "src/pydocgen/builders/ditamap.py",
"copies": "1",
"size": "1547",
"license": "mit",
"hash": -2343160361916312600,
"line_mean": 33.1590909091,
"line_max": 126,
"alpha_frac": 0.5733678087,
"autogenerated": false,
"ratio": 3.6658767772511847,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47392445859511845,
"avg_score": null,
"num_lines": null
} |
__author__ = 'arobres, jfernandez'
# -*- coding: utf-8 -*-
from lettuce import step, world
from commons.rest_utils import RestUtils
from commons.product_steps import ProductSteps
from commons.provisioning_steps import ProvisioningSteps
from commons.constants import *
from commons.utils import response_body_to_dict, generate_product_instance_id
from nose.tools import assert_equals, assert_true
api_utils = RestUtils()
product_steps = ProductSteps()
provisioning_steps = ProvisioningSteps()
@step(u'a created product but not installed')
def a_created_product_but_not_installed(step):
world.product_name = step.hashes[0]['product_name']
world.product_version = step.hashes[0]['version']
world.vm_ip = step.hashes[0]['ip']
world.vm_hostname = step.hashes[0]['hostname']
world.vm_fqn = step.hashes[0]['fqn']
world.cm_tool = step.hashes[0]['cm_tool']
product_steps.a_created_product_with_name_and_release(step, world.product_name, world.product_version)
@step(u'a created and installed product')
def a_created_and_installed_product(step):
a_created_product_but_not_installed(step)
provisioning_steps.i_install_the_product_in_the_vm(step)
@step(u'I get all product instances')
def i_get_all_product_instances(step):
world.response = api_utils.retrieve_product_instance_list(headers=world.headers, vdc_id=world.tenant_id)
@step(u'the product instance is in the returned list')
def the_product_instance_is_returned_in_the_list(step):
instance_id = generate_product_instance_id(world.vm_fqn, world.product_name, world.product_version)
assert_true(world.response.ok, 'RESPONSE: {}'.format(world.response))
response_body = response_body_to_dict(world.response, world.headers[ACCEPT_HEADER],
xml_root_element_name=PRODUCT_INSTANCE_LIST)
response_body = response_body
assert_true(len(response_body) != 0)
product_instance = None
for product_ins in response_body:
if instance_id in product_ins[PRODUCT_INSTANCE_NAME]:
product_instance = product_ins
break
assert_true(product_instance is not None)
assert_equals(product_instance[PRODUCT_INSTANCE_NAME], instance_id)
assert_true(product_instance[PRODUCT_INSTANCE_STATUS] != "")
assert_equals(product_instance[PRODUCT_INSTANCE_VM][PRODUCT_INSTANCE_VM_IP], world.vm_ip)
assert_equals(product_instance[PRODUCT_INSTANCE_VM][PRODUCT_INSTANCE_VM_FQN], world.vm_fqn)
assert_equals(product_instance[PRODUCT_INSTANCE_VM][PRODUCT_INSTANCE_VM_HOSTNAME], world.vm_hostname)
assert_equals(product_instance[PRODUCT][VERSION], world.product_version)
assert_equals(product_instance[PRODUCT][PRODUCT_NAME], world.product_name)
@step(u'I get the product instance details')
def i_get_the_product_instance_details(step):
world.instance_id = generate_product_instance_id(world.vm_fqn, world.product_name, world.product_version)
world.response = api_utils.retrieve_product_instance(headers=world.headers, vdc_id=world.tenant_id,
product_instance_id=world.instance_id)
@step(u'the product instance is returned')
def the_product_instance_is_returned(step):
assert_true(world.response.ok, 'RESPONSE: {}'.format(world.response))
response_body = response_body_to_dict(world.response, world.headers[ACCEPT_HEADER],
xml_root_element_name=PRODUCT_INSTANCE)
assert_equals(response_body[PRODUCT_INSTANCE_NAME], world.instance_id)
assert_true(response_body[PRODUCT_INSTANCE_STATUS] != "")
assert_equals(response_body[PRODUCT_INSTANCE_VM][PRODUCT_INSTANCE_VM_IP], world.vm_ip)
assert_equals(response_body[PRODUCT_INSTANCE_VM][PRODUCT_INSTANCE_VM_FQN], world.vm_fqn)
assert_equals(response_body[PRODUCT_INSTANCE_VM][PRODUCT_INSTANCE_VM_HOSTNAME], world.vm_hostname)
assert_equals(response_body[PRODUCT][VERSION], world.product_version)
assert_equals(response_body[PRODUCT][PRODUCT_NAME], world.product_name)
@step(u'the HTTP response code is (.*)')
def the_http_response_code_is_group1(step, http_status_code):
assert_equals(world.response.status_code, int(http_status_code))
| {
"repo_name": "Fiware/cloud.SDC",
"path": "test/acceptance/component/get_product_instances/features/get_product_instances.py",
"copies": "2",
"size": "4233",
"license": "apache-2.0",
"hash": -3755589022821014500,
"line_mean": 45.0108695652,
"line_max": 109,
"alpha_frac": 0.7205291755,
"autogenerated": false,
"ratio": 3.486820428336079,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5207349603836079,
"avg_score": null,
"num_lines": null
} |
__author__ = 'arobres, jfernandez'
from json import JSONEncoder
import requests
from configuration import PUPPET_MASTER_PROTOCOL, PUPPET_WRAPPER_IP, PUPPET_WRAPPER_PORT, CONFIG_KEYSTONE_URL
PUPPET_WRAPPER_SERVER = '{}://{}:{}/puppetwrapper'.format(PUPPET_MASTER_PROTOCOL, PUPPET_WRAPPER_IP,
PUPPET_WRAPPER_PORT)
INSTALL_PATTERN = '{url_root}/v2/node/{nodeName}/install'
UNINSTALL_PATTERN = '{url_root}/v2/node/{nodeName}/uninstall'
GENERATE_PATTERN = '{url_root}/v2/node/{nodeName}/generate'
DELETE_NODE_PATTERN = '{url_root}/v2/node/{node_name}'
DELETE_MODULE_PATTERN = '{url_root}/delete/module/{software_name}'
DOWNLOAD_PATTERN = '{url_root}/download/{repository}/{software_name}'
class RestUtils(object):
def __init__(self):
""" Initialization method """
self.api_url = PUPPET_WRAPPER_SERVER
self.encoder = JSONEncoder()
def _call_api(self, pattern, method, body=None, headers=None, payload=None, **kwargs):
"""
Launch HTTP request to API with given arguments
:param pattern: string pattern of API url with keyword arguments (format string syntax)
:param method: HTTP method to execute (string)
:param body: JSON/XML body content
:param headers: HTTP header request (dict)
:param payload: Query parameters for the URL
:param **kwargs: URL parameters (without url_root) to fill the patters
:returns: REST API response
"""
kwargs['url_root'] = self.api_url
url = pattern.format(**kwargs)
#print ""
#print "### REQUEST ###"
#print 'METHOD: {}\nURL: {} \nHEADERS: {} \nBODY: {}'.format(method, url, headers, self.encoder.encode(body))
try:
r = requests.request(method=method, url=url, data=body, headers=headers, params=payload, verify=False)
except Exception, e:
print "Request {} to {} crashed: {}".format(method, url, str(e))
return None
#print "### RESPONSE ###"
#print "HTTP RESPONSE CODE:", r.status_code
#print 'HEADERS: {} \nBODY: {}'.format(r.headers, r.content)
#print ""
return r
@staticmethod
def get_keystone_token(body, headers=None):
return requests.request(method='post', url=CONFIG_KEYSTONE_URL, data=body, headers=headers, verify=False)
def install(self, node_name, body, headers=None, method='post'):
"""
POST /puppetwrapper/v2/node/{nodeName}/install
{
"attributes": [{
"value": "valor",
"key": "clave",
"id": 23119,
"description": null
}],
"version": "0.1",
"group": "alberts",
"softwareName": "testPuppet"
}
"""
return self._call_api(pattern=INSTALL_PATTERN, method=method, body=body, headers=headers, nodeName=node_name)
def uninstall(self, node_name, body, headers=None, method='post'):
"""
POST /puppetwrapper/v2/node/{nodeName}/uninstall
{
"attributes": [{
"value": "valor",
"key": "clave",
"id": 23119,
"description": null
}],
"version": "0.1",
"group": "alberts",
"softwareName": "testPuppet"
}
"""
return self._call_api(pattern=UNINSTALL_PATTERN, method=method, body=body, headers=headers, nodeName=node_name)
def generate(self, node_name, headers=None, method='get'):
"""
GET /puppetwrapper/v2/node/{nodeName}/generate
"""
return self._call_api(pattern=GENERATE_PATTERN, method=method, headers=headers, nodeName=node_name)
def delete_node(self, node_name, headers=None):
"""
DELETE /puppetwrapper/v2/node/{nodeName}
"""
return self._call_api(pattern=DELETE_NODE_PATTERN, method='delete', headers=headers, node_name=node_name)
def delete_module(self, software_name, headers=None):
return self._call_api(pattern=DELETE_MODULE_PATTERN, method='delete', headers=headers,
software_name=software_name)
def download_module(self, software_name=None, repository=None, url=None, headers=None):
api_body = {}
if url is not None:
api_body['url'] = url
return self._call_api(pattern=DOWNLOAD_PATTERN, method='post', headers=headers, software_name=software_name,
repository=repository, body=api_body)
| {
"repo_name": "telefonicaid/fiware-puppetwrapper",
"path": "acceptance_tests/commons/rest_utils.py",
"copies": "1",
"size": "4597",
"license": "apache-2.0",
"hash": -1189331993192054800,
"line_mean": 36.3739837398,
"line_max": 119,
"alpha_frac": 0.5908201001,
"autogenerated": false,
"ratio": 3.8565436241610738,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4947363724261074,
"avg_score": null,
"num_lines": null
} |
__author__ = 'arobres'
# -*- coding: utf-8 -*-
from commons.rest_utils import RestUtils
from commons.constants import URL
import commons.assertions as Assertions
import commons.fabric_utils as Fabutils
from lettuce import step, world, before
from nose.tools import assert_true
api_utils = RestUtils()
@step(u'Given a module "([^"]*)"')
def given_a_module_group1(step, software_name):
world.software_name = software_name
@step(u'When I download the module from the "([^"]*)" repository')
def when_i_download_the_module_from_the_group1_repository(step, repository):
url = step.hashes[0][URL]
world.response = api_utils.download_module(software_name=world.software_name, repository=repository, url=url)
@step(u'Then the module is downloaded')
def then_the_module_is_downloaded(step):
Assertions.assert_response_ok(world.response)
assert_true(Fabutils.execute_assert_download(world.software_name))
@step(u'Then I obtain an "([^"]*)"')
def then_i_obtain_an_group1(step, expected_status_code):
world.software_to_generate = []
Assertions.assert_error_code(response=world.response, error_code=expected_status_code)
| {
"repo_name": "telefonicaid/fiware-puppetwrapper",
"path": "acceptance_tests/component/download_module/features/steps.py",
"copies": "1",
"size": "1153",
"license": "apache-2.0",
"hash": 1930313487351474200,
"line_mean": 26.4523809524,
"line_max": 113,
"alpha_frac": 0.7328707719,
"autogenerated": false,
"ratio": 3.2942857142857145,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9402780183707908,
"avg_score": 0.024875260495561245,
"num_lines": 42
} |
__author__ = 'arobres'
# -*- coding: utf-8 -*-
from nose.tools import assert_equals, assert_true, assert_in
from constants import INSTALL_GROUP_NAME, INSTALL_NODE_NAME, INSTALL_MANIFEST_GENERATED, OP_SOFTWARE_LIST
HTTP_CODE_NOT_OK = u'Invalid HTTP status code. Status Code obtained is: {}\n RESPONSE OBTAINED IS: {}'
INCORRECT_HTTP_STATUS_CODE = u'Invalid HTTP status code. Status Code obtained is: {}\n Status Code expected is: {}\n'
INCORRECT_PARAMETER_SIMPLE = u'Incorrect value for the parameter {}'
INCORRECT_PARAMETER = u'Incorrect value for the parameter {}\n. Expected value is: {} \n Obtained value is: {}'
DESCRIPTION = 'description'
def assert_install_response(response, node_name, group_name, software_to_generate_list=None):
assert_response_ok(response)
try:
response = response.json()
except:
assert False
assert_equals(response[INSTALL_NODE_NAME], node_name, INCORRECT_PARAMETER.format(INSTALL_NODE_NAME, node_name,
response[INSTALL_NODE_NAME]))
assert_equals(response[INSTALL_GROUP_NAME], group_name, INCORRECT_PARAMETER.format(INSTALL_GROUP_NAME, group_name,
response[INSTALL_GROUP_NAME]))
if software_to_generate_list is not None:
for software in software_to_generate_list:
assert_in(software, response[OP_SOFTWARE_LIST])
def assert_generate_response(response, node_name, group_name):
assert_response_ok(response)
try:
response = response.json()
except:
assert False
assert_equals(response[INSTALL_NODE_NAME], node_name, INCORRECT_PARAMETER.format(INSTALL_NODE_NAME, node_name,
response[INSTALL_NODE_NAME]))
assert_equals(response[INSTALL_GROUP_NAME], group_name, INCORRECT_PARAMETER.format(INSTALL_GROUP_NAME, group_name,
response[INSTALL_GROUP_NAME]))
assert_equals(response[INSTALL_MANIFEST_GENERATED], True)
def assert_error_code(response, error_code):
obtained_status_code = str(response.status_code)
error_code = str(error_code)
assert_equals(obtained_status_code, error_code, INCORRECT_HTTP_STATUS_CODE.format(response.status_code, error_code))
def assert_response_ok(response):
assert_true(response.ok, HTTP_CODE_NOT_OK.format(response.status_code, response.content))
def assert_message_description(response, message_description):
assert_equals(response[DESCRIPTION], message_description) | {
"repo_name": "telefonicaid/fiware-puppetwrapper",
"path": "acceptance_tests/commons/assertions.py",
"copies": "1",
"size": "2696",
"license": "apache-2.0",
"hash": 6341365427172781000,
"line_mean": 44.7118644068,
"line_max": 120,
"alpha_frac": 0.6442878338,
"autogenerated": false,
"ratio": 4.023880597014926,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5168168430814926,
"avg_score": null,
"num_lines": null
} |
__author__ = 'arobres'
from selenium.webdriver.support.ui import Select
class BasePage(object):
url = None
def __init__(self, driver):
self.driver = driver
def navigate(self):
self.driver.get(self.url)
class Homepage(BasePage):
url = "http://localhost:8081/v1.0"
def go_new_user(self):
self.driver.find_element_by_link_text('Create a new user').click()
def go_new_forum_message(self):
self.driver.find_element_by_link_text('Create new forum message').click()
return NewMessageForumTest(self.driver)
def go_user_list(self):
self.driver.find_element_by_link_text('List users').click()
def go_forum_messages_list(self):
self.driver.find_element_by_link_text('List forum message').click()
def get_header(self):
return self.driver.find_element_by_id('header_first_time')
class NewMessageForumTest(BasePage):
url = "http://localhost:8081/v1.0/forum/new"
def fill_theme(self, theme):
Select(self.driver.find_element_by_id('theme')).select_by_visible_text(theme)
def fill_subject(self, subject):
self.driver.find_element_by_id('subject').send_keys(subject)
def fill_message(self, message):
self.driver.find_element_by_id('message').send_keys(message)
def click_button(self):
self.driver.find_element_by_id('save').click()
return MessageListPage(self.driver)
def fill_all_form(self, theme, subject, message):
self.fill_theme(theme)
self.fill_subject(subject)
self.fill_message(message)
self.click_button()
return MessageListPage(self.driver)
class MessageListPage(BasePage):
url = "http://localhost:8081/v1.0/users/"
def get_table_data(self):
forum_data_list = []
forum_table = self.driver.find_element_by_xpath(".//*[@id='forum_table']/tbody")
rows = forum_table.find_elements_by_tag_name('tr')
forum_data_list.append(rows[0].find_elements_by_tag_name('th')[0].text)
forum_data_list.append(rows[1].find_elements_by_tag_name('td')[0].text)
return forum_data_list
| {
"repo_name": "twiindan/forum_html",
"path": "test/exercices/exercice2/page_object.py",
"copies": "1",
"size": "2139",
"license": "apache-2.0",
"hash": -5901366508748434000,
"line_mean": 28.7083333333,
"line_max": 88,
"alpha_frac": 0.6526414212,
"autogenerated": false,
"ratio": 3.3265940902021773,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9476800859736612,
"avg_score": 0.00048693033311310383,
"num_lines": 72
} |
__author__ = 'arobres'
#AUTHENTICATION CONSTANTS
AUTH = u'auth'
TENANT_NAME = u'tenantName'
USERNAME = u'username'
PASSWORD = u'password'
ACCESS = u'access'
TOKEN = u'token'
TENANT = u'tenant'
ID = u'id'
#PRODUCT_PROPERTIES
PRODUCT_NAME = u'name'
PRODUCT_DESCRIPTION = u'description'
PRODUCT = u'product'
PRODUCTS = u'products'
PRODUCT_ATTRIBUTES = u'attributes'
PRODUCT_METADATAS = u'metadatas'
METADATA = u'metadata'
ATTRIBUTE = u'attribute'
KEY = u'key'
VALUE = u'value'
DESCRIPTION = u'description'
ATTRIBUTE_TYPE = u'type'
ATTRIBUTE_TYPE_PLAIN = u'Plain'
ATTRIBUTE_TYPE_IPALL = u'IPALL'
#HEADERS
CONTENT_TYPE = u'content-type'
CONTENT_TYPE_JSON = u'application/json'
CONTENT_TYPE_XML = u'application/xml'
AUTH_TOKEN_HEADER = u'X-Auth-Token'
TENANT_ID_HEADER = u'Tenant-Id'
ACCEPT_HEADER = u'Accept'
ACCEPT_HEADER_XML = u'application/xml'
ACCEPT_HEADER_JSON = u'application/json'
#PRODUCT RELEASE
PRODUCT_RELEASE = u'productRelease'
PRODUCT_RELEASE_LIST = u'productReleases'
VERSION = u'version'
#INCORRECT PARAMETERS
LONG_ID = 'long' * 64 + 'a' #STRING WITH 257 characters
#DEFAULT_METADATA
NUMBER_OF_DEFAULT_SDC_METADATA = 6
DEFAULT_METADATA = {"metadata": [{"key": "image", "value": ""},
{"key": "cookbook_url", "value": ''}, {"key": "cloud", "value": "yes"},
{"key": "installator", "value": "chef"}, {"key": "open_ports", "value": "80 22"}]}
DEFAULT_ATTRIBUTE = {"attribute": [{"key": "custom_att_01", "value": "att_01_default", "type": "Plain"},
{"key": "custom_att_02", "value": "att_02_default", "type": "Plain"}]}
PRODUCT_RELEASE_WITHOUT_RELEASES_RESPONSE = u'<?xml version="1.0" encoding="UTF-8" standalone="yes"?>' \
u'<productReleases></productReleases>'
# FABRIC AND PRODUCT INSTALLATION
FABRIC_RESULT_EXECUTE = u'<local-only>'
PRODUCT_FILE_NAME_FORMAT = u'{product_name}_{product_version}_{installator}'
PRODUCT_INSTALLATION_FILE_CONTENT = u'Operation: install; Product: {product_name}; Version: {product_version}; Att01: {att_01}; Att02: {att_02}'
PRODUCT_INSTALLATION_ATT1_DEFAULT = u'att_01_default'
PRODUCT_INSTALLATION_ATT2_DEFAULT = u'att_02_default'
#PRODUCT_INSTALLATION_PARAMETERS
PRODUCT_INSTANCE_LIST = u'productInstanceDtoes'
PRODUCT_INSTANCE = u'productInstanceDto'
PRODUCT_INSTANCE_NAME = u'name'
PRODUCT_INSTANCE_STATUS = u'status'
PRODUCT_INSTANCE_VM = u'vm'
PRODUCT_INSTANCE_VM_IP = u'ip'
PRODUCT_INSTANCE_VM_FQN = u'fqn'
PRODUCT_INSTANCE_VM_OSTYPE = u'osType'
PRODUCT_INSTANCE_VM_HOSTNAME = u'hostname'
PRODUCT_INSTANCE_ATTRIBUTES = u'attributes'
# METADATAS VALUES
INSTALLATOR = u'installator'
INSTALLATOR_VALUE = (u'puppet', u'chef')
METADATA_TENANT_ID = u'tenant_id'
#TASKS
TASK = u'task'
TASK_HREF = u'href'
TASK_STARTTIME = u'startTime'
TASK_STATUS = u'status'
TASK_DESCRIPTION = u'description'
TASK_VDC = u'vdc'
TASK_ERROR = u'error'
TASK_ERROR_MINOR_CODE = u'minorErrorCode'
TASK_URL = u'@href'
STATUS = u'status'
STATUS_XML = u'@status'
VDC = u'vdc'
TASK_STATUS_VALUE_RUNNING = u'RUNNING'
TASK_STATUS_VALUE_SUCCESS = u'SUCCESS'
TASK_STATUS_VALUE_ERROR = u'ERROR'
TASK_STATUS_VALUE_INSTALLED = u'INSTALLED'
TASK_STATUS_VALUE_UNINSTALLED = u'UNINSTALLED'
#PRODUCTANDRELEASE VALUES
PRODUCTANDRELEASE_LIST = u'productAndReleaseDtoes'
PRODUCTANDRELEASE = u'productAndReleaseDto'
#ATTRIBUTE FROM CONFIG FILE (for loading values from config_file)
CONFIG_FILE = u'${CONFIG_FILE}'
| {
"repo_name": "Fiware/cloud.SDC",
"path": "test/acceptance/commons/constants.py",
"copies": "2",
"size": "3483",
"license": "apache-2.0",
"hash": 4966594859409688000,
"line_mean": 29.2869565217,
"line_max": 144,
"alpha_frac": 0.6965259833,
"autogenerated": false,
"ratio": 2.9642553191489363,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9586425133310011,
"avg_score": 0.014871233827784996,
"num_lines": 115
} |
__author__ = 'arobres'
# -*- coding: utf-8 -*-
from lettuce import step, world, before, after
from commons.authentication import get_token
from commons.rest_utils import RestUtils
from commons.product_body import default_product, create_default_attribute_list, create_default_metadata_list,\
create_product_release
from commons.utils import dict_to_xml, set_default_headers, xml_to_dict
from commons.constants import CONTENT_TYPE, PRODUCT_NAME, ACCEPT_HEADER, AUTH_TOKEN_HEADER, CONTENT_TYPE_JSON, LONG_ID, \
VERSION, PRODUCT_RELEASE
from nose.tools import assert_equals, assert_true
api_utils = RestUtils()
@before.each_feature
def setup_feature(feature):
world.token_id, world.tenant_id = get_token()
@before.each_scenario
def setup_scenario(scenario):
world.headers = set_default_headers(world.token_id, world.tenant_id)
api_utils.delete_all_testing_products(world.headers)
world.attributes = None
world.metadatas = None
@step(u'Given a created product with name "([^"]*)"')
def given_a_created_product_with_name_group1(step, product_id):
body = dict_to_xml(default_product(name=product_id))
response = api_utils.add_new_product(headers=world.headers, body=body)
assert_true(response.ok, response.content)
world.product_id = response.json()[PRODUCT_NAME]
@step(u'Given a created product with attributes and name "([^"]*)"')
def given_a_created_product_with_attributes_and_name_group1(step, product_id):
attributes = create_default_attribute_list(2)
body = dict_to_xml(default_product(name=product_id, attributes=attributes))
response = api_utils.add_new_product(headers=world.headers, body=body)
assert_true(response.ok, response.content)
world.product_id = response.json()[PRODUCT_NAME]
@step(u'Given a created product with metadatas and name "([^"]*)"')
def given_a_created_product_with_attributes_and_name_group1(step, product_id):
metadatas = create_default_metadata_list(2)
body = dict_to_xml(default_product(name=product_id, metadata=metadatas))
response = api_utils.add_new_product(headers=world.headers, body=body)
assert_true(response.ok, response.content)
world.product_id = response.json()[PRODUCT_NAME]
@step(u'Given a created product with all data and name "([^"]*)"')
def given_a_created_product_with_all_data_and_name_group1(step, product_id):
metadatas = create_default_metadata_list(5)
attributes = create_default_attribute_list(5)
body = dict_to_xml(default_product(name=product_id, metadata=metadatas, attributes=attributes))
response = api_utils.add_new_product(headers=world.headers, body=body)
assert_true(response.ok, response.content)
world.product_id = response.json()[PRODUCT_NAME]
@step(u'a created product release "([^"]*)" assigned to the "([^"]*)')
def a_created_product_release_group1_assigned_to_the_group2(step, product_release, product_name):
create_release_product(step, product_release, product_name, world.headers[ACCEPT_HEADER])
@step(u'When I create the product release "([^"]*)" assigned to the "([^"]*)" with accept parameter "([^"]*)" response')
def create_release_product(step, product_release, product_name, accept_content):
if product_release == 'LONG_ID':
world.product_release = LONG_ID
else:
world.product_release = product_release
world.headers[ACCEPT_HEADER] = accept_content
body = dict_to_xml(create_product_release(version=world.product_release))
world.response = api_utils.add_product_release(headers=world.headers, body=body, product_id=product_name)
@step(u'Then the product release is created')
def then_the_product_release_is_created(step):
assert_true(world.response.ok, world.response.content)
response_headers = world.response.headers
if response_headers[CONTENT_TYPE] == CONTENT_TYPE_JSON:
try:
response_body = world.response.json()
except Exception, e:
print str(e)
else:
response_body = xml_to_dict(world.response.content)[PRODUCT_RELEASE]
assert_equals(response_body[VERSION], world.product_release)
world.headers = set_default_headers(world.token_id, world.tenant_id)
@step(u'Then I obtain an "([^"]*)"')
def then_i_obtain_an_group1(step, error_code):
assert_equals(str(world.response.status_code), error_code, 'RESPONSE: {}'.format(world.response.content))
world.headers = set_default_headers(world.token_id, world.tenant_id)
@step(u'And incorrect "([^"]*)" header')
def and_incorrect_content_type_header(step, content_type):
world.headers[CONTENT_TYPE] = content_type
@step(u'And incorrect "([^"]*)" authentication')
def incorrect_token(step, new_token):
world.headers[AUTH_TOKEN_HEADER] = new_token
@after.all
def tear_down(scenario):
world.token_id, world.tenant_id = get_token()
world.headers = set_default_headers(world.token_id, world.tenant_id)
api_utils.delete_all_testing_products(world.headers)
| {
"repo_name": "Fiware/cloud.SDC",
"path": "test/acceptance/component/add_product_release/features/add_product_release.py",
"copies": "2",
"size": "4959",
"license": "apache-2.0",
"hash": -7603671484269627000,
"line_mean": 38.672,
"line_max": 121,
"alpha_frac": 0.7211131276,
"autogenerated": false,
"ratio": 3.3989033584647017,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5120016486064702,
"avg_score": null,
"num_lines": null
} |
__author__ = 'arobres'
# -*- coding: utf-8 -*-
from lettuce import step, world
from commons.rest_utils import RestUtils
from commons.product_steps import ProductSteps
from commons.provisioning_steps import ProvisioningSteps
from commons.utils import wait_for_task_finished, response_body_to_dict
from commons.constants import *
from commons.configuration import CONFIG_VM_HOSTNAME, CONFIG_VM_IP, CONFIG_VM_FQN, CONFIG_PRODUCT_NAME_CHEF, CONFIG_PRODUCT_NAME_PUPPET, \
CONFIG_PRODUCT_VERSION_CHEF, CONFIG_PRODUCT_VERSION_PUPPET
from nose.tools import assert_equals, assert_true, assert_false
from lettuce_tools.dataset_utils.dataset_utils import DatasetUtils
import time
api_utils = RestUtils()
product_steps = ProductSteps()
provisioning_steps = ProvisioningSteps()
dataset_utils = DatasetUtils()
@step(u'a configuration management with "(.*)"')
def configuration_management_with_group1(step, cm_tool):
world.cm_tool = cm_tool
@step(u'a created product with name "([^"]*)" and release "([^"]*)"')
def a_created_product_with_name_group1(step, product_id, product_release):
world.product_name = product_id
world.product_description = "QA Test"
world.product_version = product_release
if product_id == CONFIG_FILE:
world.product_name = CONFIG_PRODUCT_NAME_CHEF if (world.cm_tool is not None and world.cm_tool == 'chef') \
else CONFIG_PRODUCT_NAME_PUPPET
if product_release == CONFIG_FILE:
world.product_version = CONFIG_PRODUCT_VERSION_CHEF if (world.cm_tool is not None and world.cm_tool == 'chef') \
else CONFIG_PRODUCT_VERSION_PUPPET
metadata_list = DEFAULT_METADATA[METADATA]
for metadata in metadata_list:
if metadata["key"] in "installator":
metadata["value"] = world.cm_tool
break
product_steps.a_created_product_with_name_and_release_with_metadatas(step=step, product_name=world.product_name,
product_version=world.product_version,
metadatas=metadata_list)
@step(u'a non existent product with name "([^"]*)"')
def given_a_not_existent_product_with_name_group1(step, product_name):
world.product_name = product_name
world.product_version = '1.0.0'
@step(u'a existent product with name "([^"]*)" and no product release')
def given_a_existent_product_with_name_group1_and_no_product_release(step, product_name):
world.product_name = product_name
world.product_version = '50.0.0'
product_steps.a_created_product_with_name(step, product_name=product_name)
@step(u'a virtual machine with these parameters:')
def and_a_vm_with_this_parameters(step):
provisioning_steps.and_a_vm_with_this_parameters(step)
@step(u'a VM with hostname "([^"]*)"')
def a_vm_with_hostname_group1(step, hostname):
world.vm_hostname = CONFIG_VM_HOSTNAME if CONFIG_FILE == hostname else hostname
@step(u'a VM with hostname "([^"]*)" and ip "([^"]*)"')
def a_vm_with_hostname_group1_and_ip_group2(step, hostname, ip):
a_vm_with_hostname_group1(step, hostname)
world.vm_ip = CONFIG_VM_IP if CONFIG_FILE == ip else ip
@step(u'a VM with fqn "([^"]*)"')
def a_vm_with_fqn_group1(step, fqn):
world.vm_fqn = CONFIG_VM_FQN if CONFIG_FILE == fqn else fqn
@step(u'a VM with hostname "([^"]*)" and fqn "([^"]*)"')
def a_vm_with_hostname_group1_and_fqn_group2(step, vm_hostname, vm_fqn):
a_vm_with_hostname_group1(step, vm_hostname)
a_vm_with_fqn_group1(step, vm_fqn)
@step(u'the following instance attributes:')
def the_following_instance_attributes(step):
world.instance_attributes = []
for row in step.hashes:
row = dict(dataset_utils.prepare_data(row))
world.instance_attributes.append(row)
@step(u'the following product attributes:')
def the_following_product_attributes(step):
world.attributes = []
for row in step.hashes:
row = dict(dataset_utils.prepare_data(row))
world.attributes.append(row)
@step(u'content type header values:')
def content_type_header_values(step):
world.headers[CONTENT_TYPE] = step.hashes[0]["content_type"]
world.headers[ACCEPT_HEADER] = step.hashes[0]["accept"]
@step(u'And the accept header "([^"]*)"')
def and_the_accept_header_group1(step, accept_content):
world.headers[ACCEPT_HEADER] = accept_content
@step(u'And incorrect "([^"]*)" authentication')
def incorrect_token(step, new_token):
world.headers[AUTH_TOKEN_HEADER] = new_token
@step(u'I install the product in the VM')
def i_install_the_product_in_the_vm(step):
provisioning_steps.i_install_the_product_in_the_vm(step)
@step(u'I try to install the product with empty params "(.*)"')
def i_try_to_install_the_product_with_empty_param_group1(step, empty_params):
world.product_name = "" if "product_name" in empty_params else world.product_name
world.product_version = "" if "release" in empty_params else world.product_version
world.vm_hostname = "" if "hostname" in empty_params else world.vm_hostname
world.vm_fqn = "" if "fqn" in empty_params else world.vm_fqn
world.vm_ip = "" if "ip" in empty_params else world.vm_ip
world.vm_ostype = "" if "ostype" in empty_params else world.vm_ostype
i_install_the_product_in_the_vm(step)
@step(u'the task is created')
def task_is_created(step):
provisioning_steps.task_is_created(step)
@step(u'the task has finished with status "(RUNNING|SUCCESS|ERROR)"$')
def the_task_has_finished_with_status_group1(step, status):
finished = wait_for_task_finished(vdc_id=world.tenant_id, task_id=world.task_id,
status_to_be_finished=status, headers=world.headers)
assert_true(finished, 'Task is not in the correct status. Expected: {}'.format(status))
@step(u'the task has the minor error code "(.*)"')
def the_task_has_the_minor_error_code_group1(step, error_minor_code):
response = api_utils.retrieve_task(headers=world.headers, vdc_id=world.tenant_id, task_id=world.task_id)
assert_true(response.ok, 'RESPONSE: {}'.format(response.content))
world.task_response_body = response_body_to_dict(response, world.headers[ACCEPT_HEADER], with_attributes=True,
xml_root_element_name=TASK)
assert_equals(world.task_response_body[TASK_ERROR][TASK_ERROR_MINOR_CODE], error_minor_code)
@step(u'the task is not created')
def task_is_not_created(step):
assert_false(world.response.ok, 'RESPONSE: {}'.format(world.response.content))
@step(u'the product is instantiated')
def the_product_is_instantiated(step):
# Wait for product instance (5s).
time.sleep(5)
provisioning_steps.the_product_is_instantiated(step)
@step(u'the product is not instantiated')
def the_product_is_not_instantiated(step):
world.instance_id = "{}_{}_{}".format(world.vm_fqn, world.product_name, world.product_version)
response = api_utils.retrieve_product_instance(headers=world.headers, vdc_id=world.tenant_id,
product_instance_id=world.instance_id)
assert_false(response.ok, 'RESPONSE: {}'.format(world.response.content))
@step(u'the product installation status is "(ERROR|INSTALLED|UNINSTALLED)"')
def the_product_installation_status_is(step, status):
provisioning_steps.the_product_installation_status_is(step, status)
@step(u'the product has the correct attributes in the catalog')
def the_product_has_the_correct_attributes_in_the_catalog(step):
response = api_utils.retrieve_product(headers=world.headers, product_id=world.product_name)
assert_true(response.ok, 'RESPONSE: {}'.format(response.content))
response_body = response_body_to_dict(response, world.headers[ACCEPT_HEADER], xml_root_element_name=PRODUCT)
assert_equals(response_body[PRODUCT_NAME], world.product_name)
assert_equals(response_body[PRODUCT_DESCRIPTION], world.product_description)
if len(world.attributes) == 1:
world.attributes = world.attributes[0]
assert_equals(world.attributes, response_body[PRODUCT_ATTRIBUTES])
@step(u'I obtain an "([^"]*)"')
def i_obtain_an_group1(step, error_code):
assert_equals(str(world.response.status_code), error_code, world.response.content)
| {
"repo_name": "telefonicaid/fiware-sdc",
"path": "test/acceptance/component/install_product/features/install_product.py",
"copies": "2",
"size": "8313",
"license": "apache-2.0",
"hash": 1754753608863650600,
"line_mean": 37.8457943925,
"line_max": 138,
"alpha_frac": 0.6920485986,
"autogenerated": false,
"ratio": 3.3642250101173614,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006857665265548493,
"num_lines": 214
} |
__author__ = 'arobres'
# -*- coding: utf-8 -*-
from commons.rest_utils import RestUtils
from commons.constants import INSTALL, UNINSTALL, ACTION, SOFTWARE_NAME, VERSION
from nose.tools import assert_equals
import commons.assertions as Assertions
import commons.fabric_utils as Fabutils
from lettuce import step, world
api_utils = RestUtils()
@step(u'Given a generated node "([^"]*)" in the tenant "([^"]*)"')
def given_a_generated_node_group1_in_the_tenant_group2(step, node_name, group_name):
world.node_name = node_name
world.group_name = group_name
for examples in step.hashes:
print examples[ACTION]
if examples[ACTION] == INSTALL:
res = api_utils.install(group=group_name, node_name=node_name, software_name=examples[SOFTWARE_NAME],
version=examples[VERSION])
Assertions.assert_response_ok(response=res)
elif examples[ACTION] == UNINSTALL:
res = api_utils.uninstall(group=group_name, node_name=node_name, software_name=examples[SOFTWARE_NAME],
version=examples[VERSION])
Assertions.assert_install_response(response=res, node_name=node_name, group_name=group_name)
else:
assert False, 'Not valid action'
res = api_utils.generate(node_name=node_name)
Assertions.assert_response_ok(response=res)
@step(u'When I delete the node "([^"]*)"')
def when_i_delete_the_node_group1(step, node_name):
world.response = api_utils.delete_node(node_name=node_name)
@step(u'Then the node file is deleted')
def then_the_node_is_deleted(step):
Assertions.assert_response_ok(world.response)
assert_equals(Fabutils.execute_delete_node(group=world.group_name, node_name=world.node_name), False)
@step(u'And the import is removed from the site file')
def and_the_import_is_removed_from_the_site_file(step):
assert_equals(Fabutils.execute_import_deleted(group=world.group_name), False)
@step(u'The node is not deleted from the system')
def the_node_is_not_deleted_from_the_system(step):
Assertions.assert_response_ok(world.response)
assert_equals(Fabutils.execute_delete_node(group=world.group_name, node_name=world.node_name), True)
assert_equals(Fabutils.execute_import_deleted(group=world.group_name), True)
| {
"repo_name": "telefonicaid/fiware-puppetwrapper",
"path": "acceptance_tests/component/delete_node/features/steps.py",
"copies": "1",
"size": "2333",
"license": "apache-2.0",
"hash": 1040673329218511700,
"line_mean": 32.3285714286,
"line_max": 115,
"alpha_frac": 0.6935276468,
"autogenerated": false,
"ratio": 3.4208211143695015,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46143487611695017,
"avg_score": null,
"num_lines": null
} |
__author__ = 'arobres'
from bottle import run, template, Bottle, request, response, auth_basic, redirect, static_file, TEMPLATE_PATH
from constants import THEME, SUBJECT, MESSAGES
from collections import defaultdict
import ujson
from sys import argv
import os
from time import sleep
TEMPLATE_PATH.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), ".")))
app = Bottle()
user_list = []
USER_ATTRIBUTES = {'name', 'username', 'password', 'role', 'email'}
FORUM_ATTRIBUTES = {'theme', 'subject', 'message'}
ROLES = ['QA', 'DEVELOPER', 'MANAGER']
THEMES = ['Security', 'Development', 'Automation', 'Testing']
user_messages_dict = defaultdict(list)
forum_messages_dict = defaultdict(list)
def check_username(username, password):
for user in user_list:
if user['username'] == username:
if user['password'] == password:
return True
return False
@app.get("/v1.0")
@app.get("/v1.0/")
def im_alive():
output = template('welcome', first_time=True)
return output
@app.get("/v1.0/reset")
@app.get("/v1.0/reset/")
def reset_data():
del user_list[:]
user_messages_dict.clear()
forum_messages_dict.clear()
@app.post("/v1.0/users")
@app.post("/v1.0/users/")
def create_user():
if request.POST.get('save','').strip():
name = request.POST.get('name', '').strip()
username = request.POST.get('username', '').strip()
password = request.POST.get('password', '').strip()
role = request.POST.get('role', '').strip()
email = request.POST.get('email', '').strip()
if name == '' or username == '' or password == '' or role == '' or email == '':
response.status = 400
return {"message": "some parameter is not correct"}
if role not in ROLES:
response.status = 400
return {"message": "Role not valid"}
if find_user(username):
response.status = 409
return {"message": "User exist!"}
else:
body = create_body(name, username, password, role, email)
user_list.append(body)
redirect("/v1.0/users")
@app.get("/v1.0/users")
@app.get("/v1.0/users/")
def list_users():
sleep(10)
if len(user_list) == 0:
return ' <h1 id="userlist">User List</h1>'
else:
print(user_list)
output = template('user_list', rows=user_list)
return output
@app.post("/v1.0/users/inbox/<username>")
@app.post("/v1.0/users/inbox/<username>/")
def create_user_message(username):
body = "".join(request.body)
try:
body = ujson.loads(body)
except:
response.status = 400
return {"message": "The JSON format is not correct"}
user_exist = find_user(username=username)
if not user_exist:
response.status = 404
return {"message": "The user not exists"}
receiver_list = user_messages_dict[username]
receiver_list.append(body)
response.status = 200
return 'message saved'
@app.get("/v1.0/users/inbox/<username>")
@app.get("/v1.0/users/inbox/<username>/")
def get_user_messages(username):
user_cookie = request.get_cookie("username")
if user_cookie is None:
redirect("/v1.0/login/")
if check_username(username, user_cookie[::-1]):
receiver_list = user_messages_dict[username]
return {"username": username, "messages": receiver_list}
else:
redirect("/v1.0/login/")
@app.delete("/v1.0/users/inbox/<username>")
@app.delete("/v1.0/users/inbox/<username>/")
def delete_messages_from_user(username):
del(user_messages_dict[username])
return 'messages deleted'
@app.get("/v1.0/forum/new")
@app.get("/v1.0/forum/new/")
def publish_to_forum_html():
return template('new_forum_message.tpl')
@app.get("/v1.0/users/new")
@app.get("/v1.0/users/new/")
def publish_to_forum_html():
return template('new_user.tpl')
@app.post("/v1.0/forum")
@app.post("/v1.0/forum/")
def publish_to_forum():
if request.POST.get('save','').strip():
theme = request.POST.get('theme', '').strip()
subject = request.POST.get('subject', '').strip()
message = request.POST.get('message', '').strip()
if theme == '' or subject == '' or message == '' :
response.status = 400
return {"message": "some parameter is not correct"}
if theme not in THEMES:
response.status = 4010
return {"message": "Theme not valid"}
else:
body = {THEME: theme, SUBJECT: subject, MESSAGES: message}
forum_list = forum_messages_dict[body['theme']]
forum_list.append(body)
redirect('/v1.0/forum')
@app.get("/v1.0/forum")
@app.get("/v1.0/forum/")
def get_messages():
theme_to_filter = request.query.getall('theme')
if len(forum_messages_dict) == 0:
return 'No forum messages'
if len(theme_to_filter) == 0:
output = template('forum_messages_list', rows=forum_messages_dict)
return output
if len(theme_to_filter) == 1:
message_list = forum_messages_dict[theme_to_filter[0]]
output = template('forum_messages_list', rows={theme_to_filter[0]: message_list})
return output
@app.get("/v1.0/login")
@app.get("/v1.0/login/")
def login_page():
return template('login.tpl')
@app.get("/v1.0/demo")
@app.get("/v1.0/demo/")
def login_page():
return template('demoweb.tpl')
@app.post("/v1.0/login")
@app.post("/v1.0/login/")
def user_login():
if request.POST.get('save', '').strip():
username = request.POST.get('username', '').strip()
password = request.POST.get('password', '').strip()
if username == '' or password == '':
response.status = 400
return {"message": "some parameter is not correct"}
if not check_username(username, password):
response.status = 403
return {"message": "Invalid user or password"}
else:
response.set_cookie("username", username[::-1])
redirect('/v1.0/users/inbox/{}'.format(username))
def find_user(username):
for user in user_list:
if user['username'] == username:
return True
return False
def check_user_body(body):
count = 0
for attribute in USER_ATTRIBUTES:
if attribute not in body:
return False
else:
count += 1
if count != 5:
return False
else:
return True
def check_forum_body(body):
count = 0
for attribute in FORUM_ATTRIBUTES:
if attribute not in body:
return False
else:
count +=1
if count != 3:
return False
else:
return True
def create_body(name=None, username=None, pwd=None, role=None, email=None):
body = {}
if name is not None:
body['name'] = name
if username is not None:
body['username'] = username
if pwd is not None:
body['password'] = pwd
if role is not None:
body['role'] = role
if email is not None:
body['email'] = email
return body
@app.route('/static/css/')
def cssget():
return "hello"
@app.route('/static/css/<filename>')
def cssget(filename):
return static_file(filename, root="./static/css")
@app.route('/static/fonts/<filename>')
def fontsget(filename):
return static_file(filename, root="./static/fonts")
@app.route('/virus/<filename>')
def virusget(filename):
return static_file(filename, root="./virus")
@app.route('/')
def cssget():
return "Hello to twiindan website"
run(app, host='0.0.0.0', port=argv[1], reloader=True)
| {
"repo_name": "twiindan/forum_html",
"path": "forum/forum.py",
"copies": "1",
"size": "7710",
"license": "apache-2.0",
"hash": -2373237890052809700,
"line_mean": 24.1960784314,
"line_max": 109,
"alpha_frac": 0.5942931258,
"autogenerated": false,
"ratio": 3.5253772290809327,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46196703548809326,
"avg_score": null,
"num_lines": null
} |
__author__ = 'arobres'
from constants import PRODUCT, PRODUCT_DESCRIPTION, PRODUCT_NAME, PRODUCT_ATTRIBUTES, PRODUCT_METADATAS, KEY, \
DESCRIPTION, VALUE, VERSION, ATTRIBUTE_TYPE, ATTRIBUTE_TYPE_PLAIN
from utils import id_generator, delete_keys_from_dict
def simple_product_body(description=None, name=None):
return {PRODUCT: {PRODUCT_DESCRIPTION: description, PRODUCT_NAME: name}}
def product_with_attributes(description=None, name=None, attributes=None):
if len(attributes) == 1:
attributes = attributes[0]
return {PRODUCT: {PRODUCT_DESCRIPTION: description, PRODUCT_NAME: name, PRODUCT_ATTRIBUTES: attributes}}
def product_with_metadata(description=None, name=None, metadata=None):
if len(metadata) == 1:
metadata = metadata[0]
return {PRODUCT: {PRODUCT_DESCRIPTION: description, PRODUCT_NAME: name, PRODUCT_METADATAS: metadata}}
def product_with_all_parameters(description=None, name=None, metadata=None, attributes=None):
if len(attributes) == 1:
attributes = attributes[0]
if len(metadata) == 1:
metadata = metadata[0]
return {PRODUCT: {PRODUCT_DESCRIPTION: description, PRODUCT_NAME: name, PRODUCT_METADATAS: metadata,
PRODUCT_ATTRIBUTES: attributes}}
def default_product(name=None, metadata=None, attributes=None):
body_dict = {PRODUCT: {PRODUCT_DESCRIPTION: id_generator(20), PRODUCT_NAME: name, PRODUCT_METADATAS: metadata,
PRODUCT_ATTRIBUTES: attributes}}
if attributes is None:
body_dict = delete_keys_from_dict(body_dict, PRODUCT_ATTRIBUTES)
if metadata is None:
body_dict = delete_keys_from_dict(body_dict, PRODUCT_METADATAS)
return body_dict
def create_default_metadata_list(num_metadatas=2):
"""
Creates a list with random metadata values
:param num_metadatas: Number of metadatas to be generated
:return: A list of random metadatas
"""
parameters = []
for i in range(num_metadatas):
parameter = {}
parameter[KEY] = id_generator(10)
parameter[DESCRIPTION] = id_generator(10)
parameter[VALUE] = id_generator(10)
parameters.append(parameter)
return parameters
def create_default_attribute_list(num_attributes=2):
"""
Creates a list with random attribute values and default type
:param num_attributes: Number of attributes to be generated
:return: A list of random attributes
"""
parameters = []
for i in range(num_attributes):
parameter = {}
parameter[KEY] = id_generator(10)
parameter[DESCRIPTION] = id_generator(10)
parameter[VALUE] = id_generator(10)
parameter[ATTRIBUTE_TYPE] = ATTRIBUTE_TYPE_PLAIN
parameters.append(parameter)
return parameters
def create_product_release(version=None):
return {"productReleaseDto": {VERSION: version}}
| {
"repo_name": "telefonicaid/fiware-sdc",
"path": "test/acceptance/commons/product_body.py",
"copies": "2",
"size": "2883",
"license": "apache-2.0",
"hash": -3388338838218715000,
"line_mean": 30,
"line_max": 114,
"alpha_frac": 0.6899063476,
"autogenerated": false,
"ratio": 3.7984189723320156,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5488325319932016,
"avg_score": null,
"num_lines": null
} |
__author__ = 'arobres'
from utils import delete_keys_when_value_is_none
from constants import PRODUCT_INSTANCE, PRODUCT_INSTANCE_VM, PRODUCT_INSTANCE_VM_HOSTNAME, PRODUCT_INSTANCE_VM_IP, \
PRODUCT_INSTANCE_VM_FQN, PRODUCT_INSTANCE_VM_OSTYPE, PRODUCT, PRODUCT_NAME, VERSION, PRODUCT_INSTANCE_ATTRIBUTES
def installation_body_with_attributes(product_name=None, product_version=None, hostname=None, ip=None, fqn=None,
ostype=None, attributes=None):
if len(attributes) == 1:
attributes = attributes[0]
vm_dict = build_vm_body(hostname=hostname, ip=ip, fqn=fqn, ostype=ostype)
return {PRODUCT_INSTANCE: {PRODUCT: {PRODUCT_NAME: product_name, VERSION: product_version},
PRODUCT_INSTANCE_VM: vm_dict, PRODUCT_INSTANCE_ATTRIBUTES: attributes}}
def simple_installation_body(product_name=None, product_version=None, hostname=None, ip=None, fqn=None,
ostype=None):
vm_dict = build_vm_body(hostname=hostname, ip=ip, fqn=fqn, ostype=ostype)
return {PRODUCT_INSTANCE: {PRODUCT: {PRODUCT_NAME: product_name, VERSION: product_version},
PRODUCT_INSTANCE_VM: vm_dict}}
def build_vm_body(hostname=None, ip=None, fqn=None, ostype=None):
default_dict = {PRODUCT_INSTANCE_VM_HOSTNAME: hostname, PRODUCT_INSTANCE_VM_IP: ip, PRODUCT_INSTANCE_VM_FQN: fqn,
PRODUCT_INSTANCE_VM_OSTYPE: ostype}
return delete_keys_when_value_is_none(default_dict) | {
"repo_name": "telefonicaid/fiware-sdc",
"path": "test/acceptance/commons/installation_body.py",
"copies": "2",
"size": "1517",
"license": "apache-2.0",
"hash": -5828251861232568000,
"line_mean": 49.6,
"line_max": 117,
"alpha_frac": 0.6769940672,
"autogenerated": false,
"ratio": 3.4089887640449437,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5085982831244943,
"avg_score": null,
"num_lines": null
} |
__author__ = 'arobres'
from selenium import webdriver
from selenium.webdriver.support.ui import Select
from nose.tools import assert_equals
import requests
requests.get('http://localhost:8081/v1.0/reset')
#DEFINE DATA
subject_data = 'First Message with Selenium!'
message_data = "I'm automating my first test with Python and Selenium!"
#INIT THE FIREFOX WEBDRIVER
driver = webdriver.Firefox()
#SET IMPLICIT WAIT TO 10 SECONDS
driver.implicitly_wait(10)
#GET THE MAIN PAGE
driver.get("http://localhost:8081/v1.0")
#ASSERT THE MAIN PAGE IS OPENED
header = driver.find_element_by_id('header_first_time')
assert_equals(header.text, 'Welcome to VLC Testing Python Forum')
#LOCATE AND CLICK IN THE FIRST LINK USING FIND ELEMENT BY LINK TEXT (Create new forum message) ()
#LOCATE THE THEME ELEMENT (IS A COMBOBOX) THE ID IS 'theme'
#SELECT VALUE (AUTOMANTION)
#LOCATE THE SUBJECT ELEMENT. THE ID IS 'subject'
#SEND KEYS TO THE SUBJECT ELEMENT
#LOCATE THE MESSAGE ELEMENT. THE ID IS 'message'
#SEND KEYS TO THE SUBJECT ELEMENT
#LOCATE THE BUTTON (ID=save) AND CLICK IT
#LOCATE THE TABLE
#LOCATE THE ROWS AND HEADERS
#ASSERT THE HEADERS
#ASSERT THE ROWS
#CLOSE THE WINDOW
driver.close()
| {
"repo_name": "twiindan/forum_html",
"path": "test/exercices/exercice1.py",
"copies": "1",
"size": "1213",
"license": "apache-2.0",
"hash": -1865776380524027400,
"line_mean": 17.1044776119,
"line_max": 97,
"alpha_frac": 0.7493816983,
"autogenerated": false,
"ratio": 3.024937655860349,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.4274319354160349,
"avg_score": null,
"num_lines": null
} |
__author__ = 'arobres'
from selenium import webdriver
from selenium.webdriver.support.ui import Select
from nose.tools import assert_equals
import requests
requests.get('http://localhost:8081/v1.0/reset')
#DEFINE DATA
subject_data = 'First Message with Selenium!'
message_data = "I'm automating my first test with Python and Selenium!"
#INIT THE FIREFOX WEBDRIVER
driver = webdriver.Firefox()
#SET IMPLICIT WAIT TO 10 SECONDS
driver.implicitly_wait(10)
#GET THE MAIN PAGE
driver.get("http://localhost:8081/v1.0")
#ASSERT THE MAIN PAGE IS OPENED
header = driver.find_element_by_id('header_first_time')
assert_equals(header.text, 'Welcome to VLC Testing Python Forum')
#LOCATE AND CLICK IN THE FIRST LINK USING FIND ELEMENT BY LINK TEXT (Create new forum message) ()
new_user_link = driver.find_element_by_link_text('Create new forum message')
new_user_link.click()
#LOCATE THE THEME ELEMENT (IS A COMBOBOX) THE ID IS 'theme'
theme = Select(driver.find_element_by_id('theme'))
#SELECT VALUE (AUTOMANTION)
theme.select_by_visible_text('Automation')
#LOCATE THE SUBJECT ELEMENT. THE ID IS 'subject'
subject = driver.find_element_by_id('subject')
#SEND KEYS TO THE SUBJECT ELEMENT
subject.send_keys(subject_data)
#LOCATE THE MESSAGE ELEMENT. THE ID IS 'message'
message = driver.find_element_by_id('message')
#SEND KEYS TO THE SUBJECT ELEMENT
message.send_keys(message_data)
#LOCATE THE BUTTON (ID=save) AND CLICK IT
save_button = driver.find_element_by_id('save')
save_button.click()
#LOCATE THE TABLE
forum_table = driver.find_element_by_xpath(".//*[@id='forum_table']/tbody")
#LOCATE THE ROWS AND HEADERS
rows = forum_table.find_elements_by_tag_name('tr')
header_columns = rows[0].find_elements_by_tag_name('th')
#ASSERT THE HEADERS
for x in range(1):
assert_equals(header_columns[x].text, subject_data)
#ASSERT THE ROWS
for x in range(1, len(rows)):
columns = rows[x].find_elements_by_tag_name('td')
for i in range(len(columns)):
assert_equals(columns[i].text, message_data)
#CLOSE THE WINDOW
driver.close()
| {
"repo_name": "twiindan/forum_html",
"path": "test/solutions/solution1.py",
"copies": "1",
"size": "2050",
"license": "apache-2.0",
"hash": -7051663387366634000,
"line_mean": 26.7027027027,
"line_max": 97,
"alpha_frac": 0.7409756098,
"autogenerated": false,
"ratio": 2.9839883551673947,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4224963964967394,
"avg_score": null,
"num_lines": null
} |
__author__ = 'arocchi'
import argparse
from lxml import etree
class SoftHandLoader(object):
def __init__(self,filename):
self.handParameters = dict()
self.jointToLink = dict()
self.urdf = etree.fromstring(file(filename).read())
for transmission_el in self.urdf.iter('transmission'):
for transmission_type_el in transmission_el.iter('type'):
if isinstance(transmission_type_el.tag, basestring):
if transmission_type_el.text == 'transmission_interface/AdaptiveSynergyTransmission':
self.handParameters = self.parseTransmission(transmission_el)
self.jointToLink = self.parseJointToLink()
def parseTransmission(self, transmission_el):
handParams = dict()
for joint_el in transmission_el.iter('joint'):
if isinstance(joint_el.tag, basestring):
joint_name = joint_el.get('name')
_,_,finger,joint,_ = joint_name.split('_')
for R_el in joint_el.iter('mechanicalReduction'):
if not handParams.has_key(finger):
handParams[finger] = dict()
handParams[finger][joint] = {'r':float(R_el.text)}
for E_el in joint_el.iter('mechanicalElasticity'):
handParams[finger][joint]['e']=float(E_el.text)
return handParams
def parseJointToLink(self):
jointToLink = dict()
for joint_el in self.urdf.iter('joint'):
if isinstance(joint_el.tag, basestring):
if 'type' in joint_el.keys() and joint_el.get('type') == 'revolute':
joint_name = joint_el.get('name')
is_mimic = (joint_name.split('_')[-1]=='mimic')
if is_mimic:
continue
jointToLink[joint_name] = self.parseJointChildLink(joint_name)
return jointToLink
def parseJointChildLink(self,joint_name):
for joint_el in self.urdf.iter('joint'):
if isinstance(joint_el.tag, basestring):
if 'name' in joint_el.keys() and joint_el.get('name') == joint_name:
if 'type' in joint_el.keys() and joint_el.get('type') == 'revolute':
for child_link_el in joint_el.iter('child'):
if isinstance(joint_el.tag, basestring):
if 'link' in child_link_el.keys():
link_name = child_link_el.get('link')
_,_,_,fake,_ = link_name.split('_')
is_fake = (fake == 'fake')
if not is_fake:
return link_name
else:
childLinkChildJoint = self.parseChildWithParentLink(link_name)
return self.parseJointChildLink(childLinkChildJoint)
raise Exception('could not find child link for joint %s'%joint_name)
def parseChildWithParentLink(self,link_name):
for joint_el in self.urdf.iter('joint'):
if isinstance(joint_el.tag, basestring):
if 'type' in joint_el.keys() and joint_el.get('type') == 'revolute':
for child_link_el in joint_el.iter('parent'):
if isinstance(joint_el.tag, basestring):
if 'link' in child_link_el.keys():
if child_link_el.get('link') == link_name:
return joint_el.get('name')
raise Exception('could not joint with parent link %s'%link_name)
def jointToPhalanx(self, finger, joint_position):
_,_,_,phalanx,_= self.jointToLink['soft_hand_%s_%s_joint'%(finger,joint_position)].split('_')
return phalanx
def phalanxToJoint(self, finger, phalanx):
for key,val in self.jointToLink.iteritems():
if val == 'soft_hand_%s_%s_link'%(finger,phalanx):
joint_name = key.split('_')[3]
return joint_name
raise Exception('could not find parent joint for link soft_hand_%s_%s_link'%(finger,phalanx))
if __name__ == '__main__':
parser = argparse.ArgumentParser(usage='SoftHandLoader soft_hand_urdf_file\nLoad an URDF file and gets transmission information out of it')
parser.add_argument('urdf_file', type=argparse.FileType('r'), nargs='?',
default=None, help='URDF file. Use - for stdin')
args = parser.parse_args()
# Extract robot name and directory
if args.urdf_file is None:
print("Error! no urdf_file provided")
exit()
else:
loader = SoftHandLoader(args.urdf_file)
print(loader.handParams)
| {
"repo_name": "arocchi/Klampt",
"path": "Python/control/soft_hand_loader.py",
"copies": "1",
"size": "4864",
"license": "bsd-3-clause",
"hash": 6205676918513388000,
"line_mean": 45.3238095238,
"line_max": 143,
"alpha_frac": 0.5435855263,
"autogenerated": false,
"ratio": 4.087394957983193,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5130980484283193,
"avg_score": null,
"num_lines": null
} |
__author__ = 'arosado'
import os
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as scistats
import pickle
import json
import csv
class BFPData:
currentDirectory = None
currentFile = None
currentFileName = None
currentFileData = None
currentCycleData = None
currentCycleIndex = None
allData = []
def parseFile(self):
fileText = self.currentFile.read()
fileLines = fileText.split('\n')
expParaLines = fileLines[0:3]
expLines = fileLines[4:len(fileLines)]
expPara = {}
startData = True
startTime = False
expData = {}
cycleData = {}
timeStamps = []
bfpStates = []
piezoVoltages = []
peakPositions = []
count = 0
firstExpParaLine = True
secondExpParaLine = False
thirdExpParaLine = False
allFileData = []
for line in expParaLines:
lineData = line.split('\t')
if firstExpParaLine:
expPara['experimentMode'] = float(lineData[0])
expPara['edgesNumber'] = float(lineData[1])
expPara['u2ratio'] = float(lineData[2])
expPara['velocityBias'] = float(lineData[3])
firstExpParaLine = False
secondExpParaLine = True
elif secondExpParaLine:
expPara['springConstant'] = float(lineData[0])
expPara['pipetteDiameter'] = float(lineData[1])
expPara['rbcCellDiameter'] = float(lineData[2])
expPara['contactDiscDiameter'] = float(lineData[3])
expPara['beadDiameter'] = float(lineData[4])
expPara['aspiratedLength'] = float(lineData[5])
expPara['aspirationPressure'] = float(lineData[6])
expPara['temperature'] = float(lineData[7])
expPara['viscosity'] = float(lineData[8])
expPara['corticalTension'] = float(lineData[9])
secondExpParaLine = False
thirdExpParaLine = True
elif thirdExpParaLine:
expPara['impingingRate'] = float(lineData[0])
expPara['loadingRate'] = float(lineData[1])
expPara['primingRate'] = float(lineData[2])
expPara['retractingRate'] = float(lineData[3])
expPara['impingmentForce'] = float(lineData[4])
expPara['clampForce'] = float(lineData[5])
expPara['activationForce'] = float(lineData[6])
expPara['timeoutAtClamp'] = float(lineData[7])
expPara['contactTimeInSeconds'] = float(lineData[8])
expPara['cycleInterval'] = float(lineData[9])
firstExpParaLine = True
secondExpParaLine = False
thirdExpParaLine = False
for line in expLines:
lineData = line.split('\t')
if startData:
eventNumber = float(lineData[2])
startTime = True
startData = False
indexFileInFile = 0
else:
if len(lineData) == 4:
if (0.000000 == float(lineData[0])) and (0.000000 == float(lineData[1])) and (0.000000 == float(lineData[3])):
eventNumber = float(lineData[2])
startData = True
startTime = True
cycleData['timeStamps'] = timeStamps
cycleData['bfpStates'] = bfpStates
cycleData['piezoVoltages'] = piezoVoltages
cycleData['mainPeakPositions'] = peakPositions
cycleData['eventNumber'] = eventNumber
cycleData['experimentParameters'] = expPara
allFileData.append(cycleData)
cycleData = {}
timeStamps = []
bfpStates = []
piezoVoltages = []
peakPositions = []
else:
timeStamp = float(lineData[0])
timeStamps.append(timeStamp)
bfpState = float(lineData[1])
bfpStates.append(bfpState)
piezoVoltage = float(lineData[2])
piezoVoltages.append(piezoVoltage)
peakPosition = float(lineData[3])
peakPositions.append(peakPosition)
self.currentFileData = allFileData
self.allData.append(allFileData)
def analyzeAllData(self):
for fileData in self.allData:
for cycleData in fileData:
self.analyzeExperimentalData(cycleData)
def analyzeCycleInCurrentFile(self, cycleIndex):
self.currentCycleIndex = cycleIndex
for i in range(0, len(self.currentFileData)):
if cycleIndex == i:
self.analyzeExperimentalData(self.currentFileData[i])
def determineZeroForcePixelPosition(self, zeroPeakPositions):
zeroForceMean = np.mean(zeroPeakPositions)
zeroForceStd = np.std(zeroPeakPositions)
return zeroForceMean, zeroForceStd
def convertToForce(self, peakPositionArray, zeroForcePP, expPara):
springConstant = expPara['springConstant']
differenceFromZero = np.array(peakPositionArray) - zeroForcePP
timesSpringConstant = differenceFromZero * expPara['u2ratio'] * 1e3 * springConstant
return timesSpringConstant
def analyzeExperimentalData(self, cycleData):
expParameters = cycleData['experimentParameters']
timeStamps = cycleData['timeStamps']
bfpStates = cycleData['bfpStates']
piezoVoltages = cycleData['piezoVoltages']
mainPeakPositions = cycleData['mainPeakPositions']
zeroStateTimepoints = []
zeroStatePositions = []
oneStateTimepoints = []
oneStatePositions = []
twoStateTimepoints = []
twoStatePositions = []
threeStateTimepoints = []
threeStatePositions = []
fourStateTimepoints = []
fourStatePositions = []
fiveStateTimepoints = []
fiveStatePositions = []
for i in range(0, len(timeStamps)):
if bfpStates[i] == 0.000:
zeroStatePositions.append(mainPeakPositions[i])
zeroStateTimepoints.append(timeStamps[i])
if bfpStates[i] == 1.000:
oneStatePositions.append(mainPeakPositions[i])
oneStateTimepoints.append(timeStamps[i])
if bfpStates[i] == 2.000:
twoStatePositions.append(mainPeakPositions[i])
twoStateTimepoints.append(timeStamps[i])
if bfpStates[i] == 3.000:
threeStatePositions.append(mainPeakPositions[i])
threeStateTimepoints.append(timeStamps[i])
if bfpStates[i] == 4.000:
fourStatePositions.append(mainPeakPositions[i])
fourStateTimepoints.append(timeStamps[i])
if bfpStates[i] == 5.000:
fiveStatePositions.append(mainPeakPositions[i])
fiveStateTimepoints.append(timeStamps[i])
# plt.figure(1)
# plt.plot(zeroStateTimepoints, zeroStatePositions)
# plt.plot(oneStateTimepoints, oneStatePositions)
# plt.plot(twoStateTimepoints, twoStatePositions)
# plt.plot(threeStateTimepoints, threeStatePositions)
# plt.plot(fourStateTimepoints, fourStatePositions)
# plt.plot(fiveStateTimepoints, fiveStatePositions)
# plt.xlabel('Time in Seconds (s)')
# plt.ylabel('Position of edge in pixels (px)')
# plt.show()
#
# pass
#
# zeroStateAverage = np.average(zeroStatePositions)
# zeroStateStd = np.std(zeroStatePositions)
#
# oneStateAverage = np.average(oneStatePositions)
# oneStateStd = np.std(oneStatePositions)
test = self.testChangesInState(cycleData, 10)
#self.movingAverage(cycleData, 10)
pass
def testChangesInState(self, cycleData, frameHolderSize):
expParameters = np.array(cycleData['experimentParameters'])
timeStamps = np.array(cycleData['timeStamps'])
bfpStates = np.array(cycleData['bfpStates'])
piezoVoltages = np.array(cycleData['piezoVoltages'])
mainPeakPositions = np.array(cycleData['mainPeakPositions'])
positionHolder = []
movingAverage = []
movingStd = []
movingTime = []
differenceSignHolder = []
stateHolder = []
movingNormality = []
normalitySwitch = []
for i in range(0, len(timeStamps)):
if (i < frameHolderSize):
stateHolder.append(0)
else:
positionsOfInterest = mainPeakPositions[i-frameHolderSize:i]
# normalTest = scistats.normaltest(positionsOfInterest)
average = np.mean(positionsOfInterest)
std = np.std(positionsOfInterest)
movingTime.append(timeStamps[i])
movingStd.append(std)
movingAverage.append(average)
#maxStd = np.amax(movingStd)
averageDifference = mainPeakPositions[i] - average
averageDifferenceAbs = np.absolute(averageDifference)
if len(movingStd) > frameHolderSize:
stdOfInterest = movingStd[i-frameHolderSize:i]
maxStd = np.amax(stdOfInterest)
else:
maxStd = np.amax(movingStd)
if (maxStd*4 < averageDifferenceAbs):
stateHolder.append(1)
else:
stateHolder.append(0)
# movingNormality.append(normalTest.statistic)
plt.figure(1)
plt.plot(timeStamps, mainPeakPositions)
plt.figure(6)
plt.plot(timeStamps, stateHolder)
plt.title('BFP State vs Time')
plt.xlabel('Time in Seconds (s)')
plt.ylabel('State')
plt.figure(2)
plt.plot(movingTime, movingAverage)
plt.title('Average over 50 Position Frames')
plt.xlabel('Time in seconds (s)')
plt.ylabel('Average position pixel (px)')
plt.figure(3)
plt.plot(movingTime, movingStd)
plt.title('Standard Deviation over 50 Position Frames')
plt.xlabel('Time in seconds (s)')
plt.ylabel('Position standard deviation (sigma)')
plt.figure(4)
plt.plot(timeStamps, mainPeakPositions)
plt.title('Peak Positions vs Time')
plt.xlabel('Time in seconds (s)')
plt.ylabel('Position in pixels (px)')
plt.figure(5)
plt.plot(timeStamps, bfpStates)
plt.title('BFP State vs Time')
plt.xlabel('Time in seconds (s)')
plt.ylabel('BFP State')
# plt.figure(6)
# plt.plot(movingTime, movingNormality)
# plt.title('Moving Normality Statistic 50 Frames')
# plt.xlabel('Moving Time in Seconds (s)')
# plt.ylabel('Normality Statistic (Kurtosis and Skew)')
plt.show()
pass
# for i in range(0, len(timeStamps)):
# if (i < frameHolderSize):
# positionHolder.append(mainPeakPositions[i])
# stateHolder.append(0)
# pastMovingMin = np.amin(positionHolder)
# pastMovingMax = np.amax(positionHolder)
# else:
# movingAverage.append(np.average(positionHolder))
# movingStd.append(np.std(positionHolder))
# movingTime.append(timeStamps[i])
#
# #movingNormality.append(scistats.normaltest(positionHolder).pvalue)
#
# currentMovingMax = np.amax(positionHolder)
# currentMovingMin = np.amin(positionHolder)
#
# if currentMovingMax > pastMovingMax:
# pastMovingMax = currentMovingMax
# if currentMovingMin < pastMovingMin:
# pastMovingMin = currentMovingMin
# bottomHalf = positionHolder[0:int(len(positionHolder)/2)]
# topHalf = positionHolder[int(len(positionHolder)/2):len(positionHolder)]
# bottomAverage = np.average(bottomHalf)
# topAverage = np.average(topHalf)
# wholeAverage = np.average(positionHolder)
# averageDifference = mainPeakPositions[i] - wholeAverage
# #averageDifference = topAverage - bottomAverage
# topDifference = mainPeakPositions[i] - topAverage
# bottomDifference = mainPeakPositions[i] - bottomAverage
# differenceSign = np.sign(topDifference)
# topDifferenceAbs = np.absolute(topDifference)
# bottomDifferenceAbs = np.absolute(bottomDifference)
# averageDifferenceAbs = np.absolute(averageDifference)
# bottomStd = np.std(bottomHalf)
# topStd = np.std(topHalf)
# wholeStd = np.std(positionHolder)
# impinging = False
# clamping = False
# # differenceTopMode = scistats.mode(differenceSignHolder[0:int(len(differenceSignHolder)/2)])
# # differenceBottomMode = scistats.mode(differenceSignHolder[int(len(differenceSignHolder)/2):len(differenceSignHolder)])
# if (wholeStd*7 < averageDifferenceAbs):
# if (currentMovingMin != pastMovingMin):
# if (currentMovingMax == pastMovingMax):
# newState = stateHolder[len(stateHolder) - 1] + 1
# else:
# newState = stateHolder[len(stateHolder) - 1]
# elif (currentMovingMax != pastMovingMax):
# newState = stateHolder[len(stateHolder) - 1] + 1
# elif (wholeStd*8 < averageDifference):
# newState = stateHolder[len(stateHolder) - 1] + 1
# else:
# newState = stateHolder[len(stateHolder) - 1]
# else:
# newState = stateHolder[len(stateHolder) - 1]
# # if (len(differenceSignHolder) < frameHolderSize):
# # differenceSignHolder.append(differenceSign)
# # else:
# # newDifferenceSignHolder = differenceSignHolder[0:(len(differenceSignHolder)-1)]
# # newDifferenceSignHolder.append(differenceSign)
# # differenceSignHolder = newDifferenceSignHolder
# stateHolder.append(newState)
# newPositionHolder = positionHolder[0:(len(positionHolder)-1)]
# newPositionHolder.append(mainPeakPositions[i])
# positionHolder = newPositionHolder
# plt.figure(1)
# plt.plot(timeStamps, stateHolder)
#
# plt.figure(2)
# plt.plot(timeStamps, bfpStates)
#
# plt.figure(3)
# plt.plot(timeStamps, mainPeakPositions)
# plt.figure(4)
# plt.plot(movingTime, movingNormality)
def setCurrentCycle(self, cycleIndex):
if self.currentFileData != None:
self.currentCycleIndex = cycleIndex
self.currentCycleData = self.currentFileData[cycleIndex]
def exportCurrentCycleCsvInCurrentFile(self):
if self.currentFileData != None:
if self.currentCycleIndex != None:
if self.currentCycleData != None:
csvFileName = self.currentFileName + 'cycleIndex' + str(self.currentCycleIndex) + '.csv'
csvFilePath = os.path.join(self.currentDirectory, csvFileName)
csvFile = open(csvFilePath, 'w', newline='')
fieldNames = ['timeStamp', 'bfpState', 'piezoVoltage', 'peakPixelPosition']
cycleWriter = csv.writer(csvFile, dialect='excel')
cycleData = self.currentCycleData
cycleWriter.writerow(fieldNames)
expParameters = np.array(cycleData['experimentParameters'])
timeStamps = np.array(cycleData['timeStamps'])
bfpStates = np.array(cycleData['bfpStates'])
piezoVoltages = np.array(cycleData['piezoVoltages'])
mainPeakPositions = np.array(cycleData['mainPeakPositions'])
for i in range(0, len(timeStamps)):
cycleWriter.writerow([timeStamps[i], bfpStates[i], piezoVoltages[i], mainPeakPositions[i]])
csvFile.close()
pass
def movingAverage(self, cycleData, numOfPointsToAverage):
expParameters = np.array(cycleData['experimentParameters'])
timeStamps = np.array(cycleData['timeStamps'])
bfpStates = np.array(cycleData['bfpStates'])
piezoVoltages = np.array(cycleData['piezoVoltages'])
mainPeakPositions = np.array(cycleData['mainPeakPositions'])
timePoints = []
positionAverages = []
positionStds = []
for i in range(0, len(timeStamps)):
if i < numOfPointsToAverage:
positionsToAvg = mainPeakPositions[i-numOfPointsToAverage:i]
positionsAvg = np.average(positionsToAvg)
positionsStd = np.std(positionsToAvg)
positionAverages.append(positionsAvg)
timePoints.append(timePoints[i])
positionStds.append(positionsStd)
plt.figure(1)
plt.plot(timePoints, positionAverages)
plt.xlabel('Time')
plt.ylabel('Positions (pixels)')
plt.show()
pass
# zeroTimeStamps = []
# zeroMainPeakPositions = []
#
# for i in range(0, len(timeStamps)):
# if bfpStates[i] == 0.00000:
# zeroTimeStamps.append(timeStamps[i])
# zeroMainPeakPositions.append(mainPeakPositions[i])
#
# zeroPeakPosition = self.determineZeroForcePixelPosition(zeroMainPeakPositions)
# forceArray = self.convertToForce(peakLocation, zeroPeakPosition, expPara)
# timeArray = np.array(time)
#
# averagesDict = self.plusMinusForceAverages(25, timeArray, forceArray)
# variancesDict = self.plusMinusForceVariances(25, timeArray, forceArray)
# normalTestDict = self.plusMinusForceNormalTest(25, timeArray, forceArray)
# zeroStateForceArray = self.convertToForce(zeroState, zeroPeakPosition, expPara)
# oneStateForceArray = self.convertToForce(oneState, zeroPeakPosition, expPara)
# twoStateForceArray = self.convertToForce(twoState, zeroPeakPosition, expPara)
# threeStateForceArray = self.convertToForce(threeState, zeroPeakPosition, expPara)
# fourStateForceArray = self.convertToForce(fourState, zeroPeakPosition, expPara)
# fiveStateForceArray = self.convertToForce(fiveState, zeroPeakPosition, expPara)
#
# plt.figure(1)
# plt.plot(zeroStateTime, zeroStateForceArray)
# plt.plot(oneStateTime, oneStateForceArray)
# plt.plot(twoStateTime, twoStateForceArray)
# plt.plot(threeStateTime, threeStateForceArray)
# plt.plot(fourStateTime, fourStateForceArray)
# plt.plot(fiveStateTime, fiveStateForceArray)
# plt.ylabel('Force')
# plt.xlabel('Time')
#
# plt.figure(2)
# plt.plot(time, bfpState)
# plt.ylabel('BFP State')
# plt.xlabel('Time')
#
# plt.figure(3)
# plt.plot(averagesDict['time'], averagesDict['averages'])
# plt.ylabel('Averages')
# plt.xlabel('Time')
#
# plt.figure(4)
# plt.plot(variancesDict['time'], variancesDict['variances'])
# plt.ylabel('Variances')
# plt.xlabel('Time')
#
# plt.figure(5)
# plt.plot(normalTestDict['time'], normalTestDict['statistics'])
# plt.ylabel('Statistic')
# plt.xlabel('Time')
#
# plt.figure(6)
# plt.plot(normalTestDict['time'], normalTestDict['pValues'])
# plt.ylabel('P value')
# plt.xlabel('Time')
#
# plt.show()
# pass
def plusMinusForceAverages(self, plusMinusIndex, timeArray, forceArray):
averages = []
for i in range(0, len(timeArray)):
if (i >= plusMinusIndex/2) and (i < (len(timeArray)-(plusMinusIndex/2))):
averageForTime = np.average(forceArray[i-plusMinusIndex/2:i+plusMinusIndex/2])
averages.append(averageForTime)
averageArray = np.array(averages)
timeNArray = np.array(timeArray[plusMinusIndex:len(timeArray)])
averagesDict = {'averages': averageArray, 'time': timeNArray}
return averagesDict
def plusMinusForceVariances(self, plusMinusIndex, timeArray, forceArray):
variances = []
for i in range(0, len(timeArray)):
if (i >= plusMinusIndex/2) and (i < (len(timeArray)-(plusMinusIndex/2))):
varianceForTime = np.var(forceArray[i-plusMinusIndex/2:i+plusMinusIndex/2])
variances.append(varianceForTime)
varianceArray = np.array(variances)
timeNArray = np.array(timeArray[plusMinusIndex:len(timeArray)])
variancesDict = {'variances': varianceArray, 'time': timeNArray}
return variancesDict
def plusMinusForceNormalTest(self, plusMinusIndex, timeArray, forceArray):
statistics = []
pValues = []
for i in range(0, len(timeArray)):
if (i >= plusMinusIndex/2) and (i < (len(timeArray)-(plusMinusIndex/2))):
normalTestForTime = scistats.normaltest(forceArray[i-plusMinusIndex/2:i+plusMinusIndex/2])
statistics.append(normalTestForTime.statistic)
pValues.append(normalTestForTime.pvalue)
statisticArray = np.array(statistics)
pValueArray = np.array(pValues)
timeNArray = np.array(timeArray[plusMinusIndex:len(timeArray)])
normalTestDict = {'statistics': statisticArray, 'time': timeNArray, 'pValues': pValueArray}
return normalTestDict
def parseFilesInDirectory(self):
if self.currentDirectory != None:
fileList = os.listdir(self.currentDirectory)
for file in fileList:
currentPath = os.path.join(self.currentDirectory, file)
if os.path.isdir(currentPath):
pass
if os.path.isfile(currentPath):
self.currentFile = open(currentPath, 'r')
self.currentFileName = file
self.parseFile()
def parseFileInDirectory(self, fileName):
if self.currentDirectory != None:
fileList = os.listdir(self.currentDirectory)
for file in fileList:
if file == fileName:
currentPath = os.path.join(self.currentDirectory, file)
if os.path.isdir(currentPath):
pass
if os.path.isfile(currentPath):
self.currentFile = open(currentPath, 'r')
self.currentFileName = fileName
self.parseFile()
def setCurrentFile(self, fileName):
if self.currentDirectory != None:
fileList = os.listdir(self.currentDirectory)
for file in fileList:
if file == fileName:
currentPath = os.path.join(self.currentDirectory, file)
if os.path.isdir(currentPath):
pass
if os.path.isfile(currentPath):
self.currentFileName = fileName
self.currentFile = open(currentPath, 'r')
def setCurrentFileDirectory(self, dataDirectory):
if os.path.isdir(dataDirectory):
self.currentDirectory = dataDirectory
def runAnalysisOnCycle(self, analysisType, cycleData):
pass
def saveCurrenFileDataIntoPickle(self):
if self.currentDirectory != None:
pickleFileName = self.currentFileName + '.pickle'
path = os.path.join(self.currentDirectory, pickleFileName)
file = open(path, 'wb')
pickleDump = pickle.dumps(self.currentFileData)
file.write(pickleDump)
file.close()
else:
if self.currentFileName != None:
pickleFileName = self.currentFileName + '.pickle'
file = open(pickleFileName, 'wb')
pickleDump = pickle.dumps(self.currentFileData)
file.write(pickleDump)
file.close()
def loadFileDataFromPickle(self, fileName):
if self.currentDirectory != None:
path = os.path.join(self.currentDirectory, fileName)
if os.path.isfile(path):
file = open(path, 'rb')
self.currentFileName = fileName[0:(len(fileName)-7)]
self.currentFileData = pickle.loads(file.read())
else:
if os.path.isfile(fileName):
file = open(fileName, 'rb')
self.currentFileData = pickle.loads(file.read())
def __init__(self):
pass
dataApi = BFPData()
dataApi.setCurrentFileDirectory('../SampleData')
#dataApi.parseFilesInDirectory()
#dataApi.setCurrentFile('20151201_GC-VWF_17-1_2')
#dataApi.parseFile()
#dataApi.saveCurrenFileDataIntoPickle()
dataApi.loadFileDataFromPickle('20151201_GC-VWF_17-1_2.pickle')
#dataApi.setCurrentCycle(51)
#dataApi.exportCurrentCycleCsvInCurrentFile()
dataApi.analyzeCycleInCurrentFile(51) | {
"repo_name": "amrosado/BFPOnlineDataAnalysis",
"path": "BFPDataAnalysis/bfpDataParsing.py",
"copies": "1",
"size": "25979",
"license": "mit",
"hash": -3784311157106568700,
"line_mean": 41.8006589786,
"line_max": 138,
"alpha_frac": 0.5823164864,
"autogenerated": false,
"ratio": 4.162634193238263,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5244950679638263,
"avg_score": null,
"num_lines": null
} |
__author__ = 'arosado'
import pycurl
import urllib.parse
import collections
import types
from Bio import Entrez
#import HTMLParser
#import sys
from lxml import etree
import lxml.html
import re
import io
import os
import pickle
#import json
Journal = collections.namedtuple('Journal', ['Rank', 'AbrevTitle', 'IsiLink', 'ISSN', 'TotalCit', 'ImpactFactor', 'FiveYearImpactFactor',
'ImmedIndex', 'Articles', 'CitedHalfLife', 'EigenfactorScore', 'ArticleInfluenceScore'])
Article = collections.namedtuple('Article', ['DOI', 'Title', 'Source', 'PmcRefCount', 'Issue', 'SO', 'ISSN', 'Volume',
'FullJournalName', 'RecordStatus', 'ESSN', 'ELocationID', 'Pages', 'PubStatus',
'AuthorList', 'EPubDate', 'PubDate', 'NlmUniqueID', 'LastAuthor', 'ArticleIds',
'Item', 'History', 'LangList', 'HasAbstract', 'References', 'PubTypeList', 'Id'])
Journals = []
Articles = []
from tkinter import tix
#from pdfminer.pdfparser import PDFParser, PDFDocument
#from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
#from pdfminer.pdfdevice import PDFDevice, PDFTextDevice
#from pdfminer.converter import PDFPageAggregator
#import pdfminer.layout
class BibTextParser:
Articles = []
bibFilesURLs = []
Entrez.email = "aaron.rosado@gmail.com"
currentDirectoryURL = ''
articleCollectionPickleFileName = ''
pmidList = []
currentBibFile = FileNotFoundError
def handleBibReference(self, bibRef):
#Process a Bibliography Reference
#Return a PMID from the reference
entryComponents = re.split("\.\s", bibRef)
refPMID = self.processBibRefComponents(entryComponents)
return refPMID
def processBibRefComponents(self, bibRefComponents):
return bibRefComponents
# def parseTxtFile(self):
def retrieveArticleNCBIHTMLFromPMCID(self, pmcid):
#Take a parsed reference element and use it to acquire HTML from NCBI
c = pycurl.Curl()
c.setopt(pycurl.HTTPHEADER, ["Accept:"])
searchURL = "http://www.ncbi.nlm.nih.gov/pubmed/?term="
parsedReference = urllib.parse.quote(pmcid)
fullSearchURL = searchURL + parsedReference
c.setopt(pycurl.URL, fullSearchURL)
c.setopt(pycurl.HTTPHEADER, ["Accept:"])
e = io.BytesIO()
c.setopt(pycurl.WRITEFUNCTION, e.write)
c.setopt(pycurl.FOLLOWLOCATION, 1)
c.setopt(pycurl.MAXREDIRS, 10)
c.setopt(pycurl.COOKIEFILE, 'cookie.txt')
c.perform()
# handle = Entrez.esearch(db='pubmed', term=journalRef, field='title')
# record = Entrez.read(handle)
return e.getvalue()
def retrieveArticleNCBIHTMLFromBrokenDownRefComponents(self, refComponents):
c = pycurl.Curl()
c.setopt(pycurl.HTTPHEADER, ["Accept:"])
searchURL = "http://www.ncbi.nlm.nih.gov/pubmed/?term="
refSearchString = "'"
for component in refComponents:
refSearchString = refSearchString + component + "'"
parsedReference = urllib.parse.quote(refSearchString)
fullSearchURL = searchURL + parsedReference
c.setopt(pycurl.URL, fullSearchURL)
c.setopt(pycurl.HTTPHEADER, ["Accept:"])
e = io.BytesIO()
c.setopt(pycurl.WRITEFUNCTION, e.write)
c.setopt(pycurl.FOLLOWLOCATION, 1)
c.setopt(pycurl.MAXREDIRS, 10)
c.setopt(pycurl.COOKIEFILE, 'cookie.txt')
c.perform()
# handle = Entrez.esearch(db='pubmed', term=journalRef, field='title')
# record = Entrez.read(handle)
return e.getvalue()
def parseBibHTMLForPMID(self, htmlBytes):
#Parse HTML generated from cURL pubmed search
#Returns a PMID
pmid = "Error retreiving PMID"
root = etree.HTML(htmlBytes, parser=None, base_url=None)
for element in root.iter():
if (element.tag == 'dl'):
if (element.get('class') == 'rprtid'):
rprtid = element.getchildren()
if (len(rprtid[1].getchildren()) > 0):
subElements = rprtid[1].getchildren()
pmid = subElements[0].text
else:
pmid = rprtid[1].text
return pmid
def parseBibText(self):
self.bibFilesURLs = os.listdir(self.currentDirectoryURL)
for bibURL in self.bibFilesURLs:
currentBibURL = self.currentDirectoryURL + '/' + bibURL
self.currentBibFile = io.open(currentBibURL, mode='r', encoding="utf-8")
for line in self.currentBibFile:
brokenDownReference = re.split('\.\s', line)
if (len(brokenDownReference) > 1):
pmid = self.retrievePMIDfromBrokenDownReference(brokenDownReference)
self.pmidList.append(pmid)
self.currentBibFile.close()
def retrievePMIDfromBrokenDownReference(self, brokenDownReference):
for refComponent in brokenDownReference:
if re.search('PMID', refComponent):
processedRefComponents = re.findall("(?<=PMID:)\s*\d+(?=\s)", refComponent)
if (len(processedRefComponents) > 0):
prePmid = re.findall('\d+', processedRefComponents[0])
pmid = prePmid[0]
return pmid
if re.search('PMCID', refComponent):
processedRefComponents = re.findall("(?<=PMCID:)\s*PMC\d+", refComponent)
if (len(processedRefComponents) > 0):
prePmcid = re.findall('PMC\d+', processedRefComponents[0])
pmcid = prePmcid[0]
refNCBIHTML = self.retrieveArticleNCBIHTMLFromPMCID(pmcid)
pmid = self.parseBibHTMLForPMID(refNCBIHTML)
return pmid
# borkenDownReferenceLength = len(brokenDownReference)
# ncbiSearchString = "'"+ brokenDownReference[0] + "' " + brokenDownReference[1]
relevantRefComponents = [brokenDownReference[0], brokenDownReference[1]]
refNCBIHTML = self.retrieveArticleNCBIHTMLFromBrokenDownRefComponents(relevantRefComponents)
pmid = self.parseBibHTMLForPMID(refNCBIHTML)
if (pmid == 'Error retreiving PMID'):
refNCBIHTML = self.retrieveArticleNCBIHTMLFromBrokenDownRefComponents(brokenDownReference[0])
pmid = self.parseBibHTMLForPMID(refNCBIHTML)
return pmid
# handle = Entrez.esummary(db="pubmed", id=formatedPMID)
# record = Entrez.read(handle)
#
# self.Articles.append(self.Article(**record[0]))
def buildArticleCollection(self):
pmidListString = ''
for x in range(len(self.pmidList)):
if(self.pmidList[x] != 'Error retreiving PMID'):
pmidListString = pmidListString + ' ' + self.pmidList[x]
handle = Entrez.esummary(db="pubmed", id=pmidListString)
record = Entrez.read(handle)
for x in range(0, len(record)):
if ('DOI' not in record[x]):
record[x]['DOI'] = ''
Articles.append(Article(**record[x]))
else:
Articles.append(Article(**record[x]))
self.Articles = Articles
def saveArticlePickle(self):
file = open('article.pickle', 'wb')
pickle.dump(Articles, file)
def openArticlePickle(self):
file = open('article.pickle', 'rb')
pickle.load(file)
def __init__(self, directoryPath):
self.currentDirectoryURL = directoryPath
class IsiParser:
isiCurl = pycurl.Curl()
initialJCRurl = 'http://admin-router.webofknowledge.com/?DestApp=JCR'
isiPickleFileName = ''
SID = ''
initialCookies = []
updatedCookies = []
currentHTML = ''
#Journal = collections.namedtuple('Journal', ['Rank', 'AbrevTitle', 'IsiLink', 'ISSN', 'TotalCit', 'ImpactFactor', 'FiveYearImpactFactor',
# 'ImmedIndex', 'Articles', 'CitedHalfLife', 'EigenfactorScore', 'ArticleInfluenceScore'])
totalNumberOfJournals = 8471
totalNumberOfPages = 424
Journals = []
def intiateJCRConnection(self):
self.isiCurl.setopt(pycurl.URL, self.initialJCRurl)
self.isiCurl.setopt(pycurl.HTTPHEADER, ["Accept:"])
e = io.BytesIO()
self.isiCurl.setopt(pycurl.WRITEFUNCTION, e.write)
self.isiCurl.setopt(pycurl.FOLLOWLOCATION, 1)
self.isiCurl.setopt(pycurl.MAXREDIRS, 10)
self.isiCurl.setopt(pycurl.COOKIEFILE, 'cookie.txt')
self.isiCurl.perform()
self.initialCookies = self.isiCurl.getinfo(pycurl.INFO_COOKIELIST)
self.SID = self.getSIDFromCookies(self.initialCookies)
def getSIDFromCookies(self, cookies):
for cookie in cookies:
foundSID = re.findall('(?<=SID\s["]).*(?=["])', cookie)
if (len(foundSID) > 0):
return foundSID[0]
def postToGetAllJournalInformation(self, SID):
self.isiCurl.setopt(pycurl.URL, 'http://admin-apps.webofknowledge.com/JCR/JCR')
fullPostField = 'edition=science&science_year=2012&social_year=2012&view=category&RQ=SELECT_ALL&change_limits=&Submit.x=1&SID=' + \
SID + '&query_new=true'
self.isiCurl.setopt(pycurl.POSTFIELDS, fullPostField)
e = io.BytesIO()
self.isiCurl.setopt(pycurl.WRITEFUNCTION, e.write)
self.isiCurl.perform()
self.updatedCookies = self.isiCurl.getinfo(pycurl.INFO_COOKIELIST)
self.currentHTML = e.getvalue()
def parseJCR(self):
cursor = 1
self.parseJCRHtml(self.currentHTML)
for x in range(1,self.totalNumberOfPages):
cursor = cursor + 20
newCursorUrl = 'http://admin-apps.webofknowledge.com/JCR/JCR?RQ=SELECT_ALL&cursor='+cursor.__str__()
self.isiCurl.setopt(pycurl.URL, newCursorUrl)
e = io.BytesIO()
self.isiCurl.setopt(pycurl.WRITEFUNCTION, e.write)
self.isiCurl.perform()
self.currentHTML = e.getvalue()
self.parseJCRHtml(self.currentHTML)
return Journals
def parseJCRHtml(self, jcrHtml):
root = etree.HTML(jcrHtml, parser=None, base_url=None)
for element in root.iter():
if ((element.tag == 'table') and (element.getchildren()[0].getchildren()[0].get('class') == 'dataTableHeader')):
journalData = element.getchildren()
for journalEntry in journalData[4:(len(journalData)-2)]:
dataEntry = journalEntry.getchildren()
spacer = dataEntry[0].text
rank = dataEntry[1].text
journalInfo = dataEntry[2].getchildren()[0]
abvJournalTitle = journalInfo.text
isiLink = journalInfo.get('href')
ISSN = dataEntry[3].text
totalCites = dataEntry[4].text
impactFactor = dataEntry[5].text
fiveYearImpactFactor = dataEntry[6].text
immedIndex = dataEntry[7].text
articles = dataEntry[8].text
citedHalfLife = dataEntry[9].text
eigenfactorScore = dataEntry[10].text
articleInfluenceScore = dataEntry[11].text
Journals.append(Journal(rank, abvJournalTitle, isiLink, ISSN, totalCites, impactFactor, fiveYearImpactFactor, immedIndex, articles, citedHalfLife, eigenfactorScore, articleInfluenceScore))
def saveJournalPickle(self):
file = open('journal.pickle', 'wb')
pickle.dump(Journals, file, pickle.HIGHEST_PROTOCOL)
def openJournalPickle(self):
file = open('journal.pickle', 'rb')
self.Journals = pickle.load(file)
print('Successful')
def __init__(self, isiPickleFileName):
#setup the file
self.isiPickleFileName = isiPickleFileName
BibPars = BibTextParser('C:/Users/arosado/Documents/GitHub/ADRC_Analytics/src/input_data')
#testHtmlBytes = BibPars.parseBibText()
#BibPars.buildArticleCollection()
#BibPars.saveArticlePickle()
#BibPars.openArticlePickle()
IsiP = IsiParser('test')
# # IsiP.intiateJCRConnection()
# # IsiP.postToGetAllJournalInformation(IsiP.SID)
# # IsiP.parseJCR()
# # IsiP.saveJournalPickle()
IsiP.openJournalPickle()
| {
"repo_name": "dgutman/ADRC_Analytics",
"path": "src/bib_vis.py",
"copies": "1",
"size": "12564",
"license": "apache-2.0",
"hash": 7264314239875009000,
"line_mean": 37.188449848,
"line_max": 208,
"alpha_frac": 0.6151703279,
"autogenerated": false,
"ratio": 3.743742550655542,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9819795576162369,
"avg_score": 0.007823460478634723,
"num_lines": 329
} |
__author__ = 'arosado'
import requests
import json
class TciaApiClient:
apiKey = None
baseUrl = None
apiResourceUrl = None
apiFormat = None
sharedResource = '/SharedList'
currentCollection = None
currentBodyPartExamined = None
currentModality = None
currentStudyInstanceUID = None
currentSeriesInstanceUID = None
currentPatientId = None
currentManufacturerModelName = None
currentDate = None
currentCollection = None
collectionValues = None
apiConnectionSession = requests.session()
def makeApiCall(self, queryUrl, queryParameters):
try:
queryRequest = self.apiConnectionSession.get(queryUrl, params=queryParameters)
if queryUrl == (self.apiResourceUrl + '/query/getImage'):
queryResponse = queryRequest.content
#zipFile = open('tciaZipTest.zip', mode='wb')
#zipFile.write(queryResponse)
#zipFile.close()
elif self.apiFormat == 'json':
queryResponse = json.loads(queryRequest.text)
else:
queryResponse = queryRequest.text
return queryResponse
except:
print('Problem with performing API Request')
def getCollectionValues(self):
queryEndpoint = '/query/getCollectionValues'
queryParameters = {'format': self.apiFormat}
queryUrl = self.apiResourceUrl + queryEndpoint
try:
return self.makeApiCall(queryUrl=queryUrl, queryParameters=queryParameters)
except:
raise Exception('Problem with returning collection values')
def getModalityValues(self, collection=None, bodyPartExamined=None):
queryEndpoint = '/query/getModalityValues'
queryParameters = {'format': self.apiFormat}
queryUrl = self.apiResourceUrl + queryEndpoint
try:
if collection != None:
queryParameters['Collection'] = collection
if bodyPartExamined != None:
queryParameters['BodyPartExamined'] = bodyPartExamined
return self.makeApiCall(queryUrl=queryUrl, queryParameters=queryParameters)
except:
raise Exception('Problem with returning modality values')
def getBodyPartValues(self, collection=None, modality=None):
queryEndpoint = '/query/getBodyPartValues'
queryParameters = {'format': self.apiFormat}
queryUrl = self.apiResourceUrl + queryEndpoint
try:
if collection != None:
queryParameters['Collection'] = collection
if modality != None:
queryParameters['Modality'] = modality
return self.makeApiCall(queryUrl=queryUrl, queryParameters=queryParameters)
except:
raise Exception('Problem with returning body part values')
def getManufacturerValues(self, collection=None, modality=None, bodyPartExamined=None):
queryEndpoint = '/query/getManufacturerValues'
queryParameters = {'format': self.apiFormat}
queryUrl = self.apiResourceUrl + queryEndpoint
try:
if collection != None:
queryParameters['Collection'] = collection
if modality != None:
queryParameters['Modality'] = modality
if bodyPartExamined != None:
queryParameters['BodyPartExamined'] = bodyPartExamined
return self.makeApiCall(queryUrl=queryUrl, queryParameters=queryParameters)
except:
raise Exception('Problem with returning manufacture values')
def getPatient(self, collection=None):
queryEndpoint = '/query/getPatient'
queryParameters = {'format': self.apiFormat}
queryUrl = self.apiResourceUrl + queryEndpoint
try:
if collection != None:
queryParameters['Collection'] = collection
return self.makeApiCall(queryUrl=queryUrl, queryParameters=queryParameters)
except:
raise Exception('Problem with returning patients by collection')
def patientsByModality(self, collection, modality):
queryEndpoint = '/query/PatientsByModality'
queryParameters = {'format': self.apiFormat}
queryUrl = self.apiResourceUrl + queryEndpoint
queryParameters['Collection'] = collection
queryParameters['Modality'] = modality
try:
return self.makeApiCall(queryUrl=queryUrl, queryParameters=queryParameters)
except:
raise Exception('Problem with returning patients by modality')
def getPatientStudy(self, collection=None, patientId=None, studyInstanceUID=None):
queryEndpoint = '/query/getPatientStudy'
queryParameters = {'format': self.apiFormat}
queryUrl = self.apiResourceUrl + queryEndpoint
try:
if collection != None:
queryParameters['Collection'] = collection
if patientId != None:
queryParameters['PatientID'] = patientId
if studyInstanceUID != None:
queryParameters['StudyInstanceUID'] = studyInstanceUID
return self.makeApiCall(queryUrl=queryUrl, queryParameters=queryParameters)
except:
raise Exception('Problem with returning patient study')
def getSeries(self, collection=None, studyInstanceUID=None, patientId=None, seriesInstanceUID=None, modality=None, bodyPartExamined=None, manufacturerModelName=None, manufacturer=None):
queryEndpoint = '/query/getSeries'
queryParameters = {'format': self.apiFormat}
queryUrl = self.apiResourceUrl + queryEndpoint
try:
if collection != None:
queryParameters['Collection'] = collection
if studyInstanceUID != None:
queryParameters['StudyInstanceUID'] = studyInstanceUID
if patientId != None:
queryParameters['PatientID'] = patientId
if seriesInstanceUID != None:
queryParameters['SeriesInstanceUID'] = seriesInstanceUID
if modality != None:
queryParameters['Modality'] = modality
if bodyPartExamined != None:
queryParameters['BodyPartExamined'] = bodyPartExamined
if manufacturerModelName != None:
queryParameters['ManufacturerModelName'] = manufacturerModelName
if manufacturer != None:
queryParameters['Manufacturer'] = manufacturer
return self.makeApiCall(queryUrl=queryUrl, queryParameters=queryParameters)
except:
raise Exception('Problem with returning series')
def getSeriesSize(self, seriesInstanceUID):
queryEndpoint = '/query/getSeriesSize'
queryParameters = {'format': self.apiFormat}
queryUrl = self.apiResourceUrl + queryEndpoint
queryParameters['SeriesInstanceUID'] = seriesInstanceUID
try:
return self.makeApiCall(queryUrl=queryUrl, queryParameters=queryParameters)
except:
raise Exception('Problem with returning series size')
def getImage(self, seriesInstanceUID):
queryEndpoint = '/query/getImage'
queryParameters = {'format': self.apiFormat}
queryUrl = self.apiResourceUrl + queryEndpoint
queryParameters['SeriesInstanceUID'] = seriesInstanceUID
try:
return self.makeApiCall(queryUrl=queryUrl, queryParameters=queryParameters)
except:
raise Exception('Problem with returning series image zip')
def getSOPInstanceUIDs(self, seriesInstanceUID):
queryEndpoint = '/query/getSOPInstanceUIDs'
queryParameters = {'format': self.apiFormat}
queryUrl = self.apiResourceUrl + queryEndpoint
queryParameters['SeriesInstanceUID'] = seriesInstanceUID
try:
return self.makeApiCall(queryUrl=queryUrl, queryParameters=queryParameters)
except:
raise Exception('Problem with returning SOP Instances')
def getSingleImage(self, seriesInstanceUID=None, sopInstanceUID=None):
queryEndpoint = '/query/getSingleImage'
queryParameters = {'format': self.apiFormat}
queryUrl = self.apiResourceUrl + queryEndpoint
if sopInstanceUID != None:
queryParameters['SOPInstanceUID'] = sopInstanceUID
if seriesInstanceUID != None:
queryParameters['SeriesInstanceUID'] = seriesInstanceUID
try:
return self.makeApiCall(queryUrl=queryUrl, queryParameters=queryParameters)
except:
raise Exception('Problem with returning single DICOM object')
def newPatientsInCollection(self, date, collection):
queryEndpoint = '/query/NewPatientsInCollection'
queryParameters = {'format': self.apiFormat}
queryUrl = self.apiResourceUrl + queryEndpoint
queryParameters['Date'] = date
queryParameters['Collection'] = collection
try:
return self.makeApiCall(queryUrl=queryUrl, queryParameters=queryParameters)
except:
raise Exception('Problem with returning new patients in collection')
def newStudiesInPatientCollection(self, date, collection, patientId=None):
queryEndpoint = '/query/NewStudiesInPatientCollection'
queryParameters = {'format': self.apiFormat}
queryUrl = self.apiResourceUrl + queryEndpoint
queryParameters['Date'] = date
queryParameters['Collection'] = collection
try:
if patientId != None:
queryParameters['PatientID'] = patientId
return self.makeApiCall(queryUrl=queryUrl, queryParameters=queryParameters)
except:
raise Exception('Problem with returning new studies in patient collection')
def sharedList(self, name):
queryEndpoint = '/query/ContentsByName'
queryParameters = {'format': self.apiFormat}
queryParameters['name'] = name
queryUrl = self.apiResourceUrl + queryEndpoint
try:
return self.makeApiCall(queryUrl=queryUrl, queryParameters=queryParameters)
except:
raise Exception('Problem with returning shared list')
def __init__(self, apiKey, baseApiUrl, apiResource, apiFormat):
self.apiKey = apiKey
self.baseUrl = baseApiUrl
self.apiFormat = apiFormat
self.apiResourceUrl = baseApiUrl + '/' + apiResource
self.apiSharedResourceUrl = baseApiUrl + '/' + self.sharedResource
self.apiConnectionSession.headers.update({'api_key': apiKey, 'Accept-Encoding' : '*'}) | {
"repo_name": "amrosado/TciaApiV3ClientPython",
"path": "tciaApiClient.py",
"copies": "1",
"size": "10684",
"license": "mit",
"hash": -3054086094745172500,
"line_mean": 34.9764309764,
"line_max": 189,
"alpha_frac": 0.6543429427,
"autogenerated": false,
"ratio": 4.706607929515418,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5860950872215419,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Arpana'
import nltk
import string
import itertools
import re
from nltk.tag.simplify import simplify_wsj_tag
from nltk.tokenize.punkt import PunktWordTokenizer
from collections import defaultdict, namedtuple
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.stem import *
class WordFeatureMap(object):
def __init__(self, sense=1, feature_vectors_prob=None, prob_sense=0):
if feature_vectors_prob is None:
self.feature_vectors_prob = {}
else:
self.feature_vectors_prob = feature_vectors_prob
self.sense = sense
self.prob_sense = prob_sense
def process_word_context(entire_context):
#remove punct
cont_without_punct = entire_context.translate(string.maketrans('', ''), r'!"#$&\'()*+,-./:;<=>?@[\\]^_`{}~')
cont_without_punct = " ".join(cont_without_punct.split())
#pos tagging
tagged_sent = [(word, simplify_wsj_tag(tag)) for word, tag in nltk.pos_tag(cont_without_punct.split(" "))]
#stop words removal
stopwords = nltk.corpus.stopwords.words('english')
pos_tag_without_stopwords = [wrd for wrd in tagged_sent if wrd[0].lower() not in stopwords]
for i, pos_tag_tuple in enumerate(pos_tag_without_stopwords):
if pos_tag_tuple[0] == '%%':
prev_context, target_word, next_context = pos_tag_without_stopwords[:i], pos_tag_without_stopwords[i + 1], \
pos_tag_without_stopwords[i + 3:]
break
return prev_context, next_context
def select_window_feature(context, direction, feature_window=5):
context = reversed(context) if direction == -1 else context
feature_vector = []
fea_cnt = 0
for p_fea in context:
if fea_cnt >= feature_window:
break
if p_fea[1] in ['N', 'ADJ', 'V', 'ADV']:
feature_vector.append(p_fea)
fea_cnt += 1
return feature_vector
word_feature_dict = {}
word_sense_count = defaultdict(int)
word_total_count = defaultdict(int)
feature_count = defaultdict(int)
def do_something():
WordSense = namedtuple("WordSense", ['word', 'sense'])
with open('train.data') as f:
for line in f:
components = line.split("|")
#target_word = components[0].split(".")
target_word = components[0]
sense = components[1]
wordsense = WordSense(target_word, sense)
prev_context, next_context = process_word_context(components[2])
feature_vector_list = itertools.chain(select_window_feature(prev_context, -1),
select_window_feature(next_context, 1))
word_sense_count[wordsense] += 1
word_total_count[target_word] += 1
for fv in feature_vector_list:
feature_count[(fv, wordsense)] += 1
for key, value in word_sense_count.items():
print "probability of " + key.word + " " + key.sense + " is = " + str(
float(value) / float(word_total_count[key[0]]))
for key, value in feature_count.items():
print "feature vector prob of " + key[0][0] + " wrt " + key[1].word + " " + key[1].sense + " is = " + str(
value / float(word_sense_count[key[1]]))
def read_file(file_object):
lines = file_object.readlines()
for line in lines:
print "#######LINE#######"
print line
text = PunktWordTokenizer().tokenize(line)
#text = nltk.wordpunct_tokenize(line)
print "#######TEXT#######"
print text
"""
STOP WORD
"""
stopwords = nltk.corpus.stopwords.words('english')
content = [w for w in text if w[0].lower() not in stopwords]
print "#######STOP WORD#######"
print content
"""
POS TAGGING
"""
tagged_sent = nltk.pos_tag(content)
tagged_sent = [(word, simplify_wsj_tag(tag)) for word, tag in tagged_sent]
print "#######POS#######"
print tagged_sent
"""
STEMMING
"""
#tagged_sent = tuple(tagged_sent)
stemmer = SnowballStemmer("english", ignore_stopwords=True)
stem_word = ""
for wrd in tagged_sent:
stem_word = stem_word + " " + stemmer.stem(wrd[0])
print "#######STEMMING#######"
print stem_word
"""
LEMMATIZING
"""
print tagged_sent
lmtzr = WordNetLemmatizer()
sent = ""
for wrd in tagged_sent:
sent = sent + " " + lmtzr.lemmatize(wrd[0])
print "#######LEMMA"""""""
print sent
def main():
do_something()
#if __name__ == '__main__':
main() | {
"repo_name": "fa97/cs4740",
"path": "supervised_wsd/wsd.py",
"copies": "1",
"size": "4719",
"license": "bsd-3-clause",
"hash": 6287131626279220000,
"line_mean": 31.3287671233,
"line_max": 120,
"alpha_frac": 0.5638906548,
"autogenerated": false,
"ratio": 3.6496519721577725,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47135426269577724,
"avg_score": null,
"num_lines": null
} |
import os,sys
months={'January': 1,'February': 2,'March': 3,'April': 4,'May': 5,'June': 6,
'July': 7, 'August': 8, 'September': 9, 'October': 10, 'November': 11, 'December': 12}
years=[2009,2010,2011,2012,2013]
starttime=['August',2013]
endtime=['August',2013]
def afterStart(month,year):
if year>starttime[1]:
return True
elif year==starttime[1]:
if months[month]>=months[starttime[0]]:
return True
else:
return False
def beforeEnd(month,year):
if year<endtime[1]:
return True
elif year==endtime[1]:
if months[month]<=months[endtime[0]]:
return True
else:
return False
def parse_listfile(listfile):
lfile = open(listfile,'r')
lst=[]
for line in lfile.readlines():
if line.startswith('<LI><A HREF='):
lst.append(str(line.split('<LI><A HREF="')[1].split('">')[0]))
#print lst
return lst
def main():
print 'start the nanog-fetch'
for year in years:
for month in months.keys():
if afterStart(month,year) and beforeEnd(month,year):
dirname=str(year)+'-'+month
cmd = 'mkdir data/'+dirname
#print cmd
os.system('mkdir data/'+dirname)
#print dirname
baseUrl='http://mailman.nanog.org/pipermail/nanog'
#print baseUrl+'/'+dirname+'/thread.html'
listurl=baseUrl+'/'+dirname+'/thread.html'
listfile='data/'+dirname+'/listfile.html'
cmd="wget -O "+listfile+" "+listurl
#print cmd
os.system(cmd)
#print "list url fetched"
msglist = parse_listfile(listfile)
for msg in msglist:
#print msg
msgfile='data/'+dirname+'/'+msg
msgurl=baseUrl+'/'+dirname+'/'+msg
#print msgurl
cmd="wget --quiet -O "+msgfile+" "+msgurl
print cmd
os.system(cmd)
else:
continue
if __name__ == "__main__":
main()
| {
"repo_name": "noise-lab/nanog-parse",
"path": "nanog-fetch.py",
"copies": "1",
"size": "2260",
"license": "mit",
"hash": -2306739306694920000,
"line_mean": 28.7368421053,
"line_max": 86,
"alpha_frac": 0.5084070796,
"autogenerated": false,
"ratio": 3.8175675675675675,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9638283887778263,
"avg_score": 0.03753815187786094,
"num_lines": 76
} |
__author__ = 'Arpit'
import easygui as eg
import sys
import json
from Course import Course
from Year import Year
import find
import time
import gmailer
from tkinter import *
import io
from PIL import Image,ImageTk
def parse_json_file(json_file):
with open(json_file) as map_data:
data = json.load(map_data)
return data
def create_course_list(data):
course_list=list()
for element in data["courses"]:
course = Course(element["department"], element["number"], element["CRN"])
course_list.append(course)
return course_list
def return_email(data):
for element in data["email"]:
email = element["email"]
return email
def return_year(data):
for element in data["year"]:
year=Year(element["year"],element["semester"])
return year
def store_to_disk():
json_course_list=list()
json_email_list=list()
json_year_list=list()
json_year = {
'semester': year.semester,
'year': year.year
}
print(json_year)
json_year_list.append(json_year)
print(json_year_list)
for course in course_list:
json_course= {
'department': course.department,
'number': course.number,
'CRN': course.CRN
}
json_course_list.append(json_course)
json_email= {
'email': email
}
json_email_list.append(json_email)
new_data = {
'courses': json_course_list,
'email': json_email_list,
'year': json_year_list
}
with open('data.json', 'w') as outfile:
json.dump(new_data, outfile, sort_keys = True, indent = 4,
ensure_ascii=False)
def add_course(course):
course_list.append(course)
store_to_disk()
def delete_course(to_delete_course):
#to_be_deleted=Course("c","s","x")
for course in course_list:
if course.compare_course(to_delete_course):
to_be_deleted=course
print(to_be_deleted.print_course())
#print(to_be_deleted.print_course())
course_list.remove(to_be_deleted)
store_to_disk()
return
def course_check(course_list,year,email):
#initialize data structures
semester = year.semester
year=year.year
email = email
count = 0
sleepTime = 10
#while a course isn't available
while course_list:
count = count + 1
if count!=1:
print ("Please wait for " + str(sleepTime) + " seconds before the next attempt!")
#sleep five minutes
time.sleep(sleepTime)
print ("\nAaaaaaaand we're back! \n")
print ('Attempt: ' + str(count))
for course in course_list:
print ("Checking: " + str(course.department) + ' ' + str(course.number) + ' - CRN: ' + str(course.CRN))
#check availability
flag = find.parse_web_page(year,semester, course)
if( flag == 1):
print ('Success!')
print ('Sending email now!')
delete_course(course)
gmailer.send_email("coursecheckeruiuc@gmail.com", email, "", str(course.department) + " " + str(course.number) + " IS OPEN", "The CRN is " + str(course.CRN) + ". Register now!", "coursecheckeruiuc@gmail.com","2yellowbananas" )
else:
print ("It's Closed!")
data = parse_json_file('data.json')
course_list = create_course_list(data)
email=return_email(data)
year=return_year(data)
store_to_disk()
def view():
def onSelect(event):
w = event.widget
index = int(w.curselection()[0])
value = w.get(index)
list=value.rsplit(" ")
dept = list[0]
num = list[1]
num = num[:-1]
crn = list[2]
course_screen(course)
list_window = Tk()
list_window.geometry("400x400")
list_window.title("Click on any course to remove it")
listbox = Listbox(list_window)
listbox.pack()
for course in course_list:
listbox.insert(END, course.print_course())
listbox.bind('<<ListboxSelect>>',onSelect)
def course_screen(course):
def onSelect():
input = text.get("1.0",'end-1c')
list = input.split("\n")
dept_list = list[0]
dept_list = dept_list.split(" ")
dept=dept_list[1]
num_list = list[1]
num_list = num_list.split(" ")
num=num_list[1]
crn_list = list[2]
crn_list = crn_list.split(" ")
crn = crn_list[1]
course.department = dept
course.number = num
course.CRN = crn
store_to_disk()
course_window = Tk()
course_window.geometry("400x400")
course_window.title("Course Info")
text = Text(course_window)
text.insert(INSERT,"Department: "+course.department)
text.insert(INSERT,"\nNumber: "+course.number)
text.insert(INSERT,"\nCRN: "+course.CRN)
text.pack()
save_button = Button(course_window, text="Save Changes", command=onSelect)
save_button.pack()
def list():
def onSelect(event):
w = event.widget
index = int(w.curselection()[0])
value = w.get(index)
course_screen(value)
list_window = Tk()
list_window.geometry("400x400")
list_window.title("All Cities on the CSAir Network")
listbox = Listbox(list_window)
listbox.pack()
for course in course_list:
listbox.insert(END, course.print_course())
listbox.bind('<<ListboxSelect>>',onSelect)
def stats():
def onSelect(event):
w = event.widget
index = int(w.curselection()[0])
value = w.get(index)
if value=="Longest single flight":
max_edge = graph.longest_flight()
longest_flight_window = Tk()
longest_flight_window.geometry("400x400")
longest_flight_window.title(value)
text = Text(longest_flight_window)
text.insert(INSERT,"The longest flight is from "+max_edge.origin+" to "+max_edge.destination)
text.pack()
elif value == "Shortest single flight":
min_edge = graph.shortest_flight()
shortest_flight_window = Tk()
shortest_flight_window.geometry("400x400")
shortest_flight_window.title(value)
text = Text(shortest_flight_window)
text.insert(INSERT,"The shortest flight is from "+min_edge.origin+" to "+min_edge.destination)
text.pack()
elif value=="Average distance of all the flights":
avg_dist = graph.average_distance()
average_flight_window= Tk()
average_flight_window.geometry("400x400")
average_flight_window.title(value)
text = Text(average_flight_window)
text.insert(INSERT,"The average distance is: "+str(avg_dist))
text.pack()
elif value=="Biggest city (by population) served":
biggest_city = graph.biggest_city()
biggest_city_window = Tk()
biggest_city_window.geometry("400x400")
biggest_city_window.title(value)
text = Text(biggest_city_window)
text.insert(INSERT,"The biggest city is: "+biggest_city)
text.pack()
elif value=="Smallest city (by population) served":
smallest_city = graph.smallest_city()
biggest_city_window = Tk()
biggest_city_window.geometry("400x400")
biggest_city_window.title(value)
text = Text(biggest_city_window)
text.insert(INSERT,"The smallest city is: "+smallest_city)
text.pack()
elif value=="Average size (by population) served":
avg_size = graph.average_city_size()
average_size_window = Tk()
average_size_window.geometry("400x400")
average_size_window.title(value)
text = Text(average_size_window)
text.insert(INSERT,"The average size of cities served by CSAir is: "+str(avg_size))
text.pack()
elif value=="List of the continents served":
list_continents = graph.list_continents()
list_continents_window = Tk()
list_continents_window.geometry("600x600")
list_continents_window.title(value)
text = Text(list_continents_window)
text.insert(INSERT,list_continents)
text.pack()
elif value=="City that has the most direct connections":
most_connections = graph.most_connections()
most_connections_window = Tk()
most_connections_window.geometry("400x400")
most_connections_window.title(value)
text = Text(most_connections_window)
text.insert(INSERT,"The city with the most direct connections is: " + most_connections)
text.pack()
stats_window = Tk()
stats_window.geometry("400x400")
stats_window.title("Statistics about CSAir Network")
stats_listbox = Listbox(stats_window)
stats_listbox.insert(END, "Longest single flight")
stats_listbox.insert(END, "Shortest single flight")
stats_listbox.insert(END, "Average distance of all the flights")
stats_listbox.insert(END, "Biggest city (by population) served")
stats_listbox.insert(END, "Smallest city (by population) served")
stats_listbox.insert(END, "Average size (by population) served")
stats_listbox.insert(END, "List of the continents served")
stats_listbox.insert(END, "City that has the most direct connections")
stats_listbox.pack()
stats_listbox.bind('<<ListboxSelect>>',onSelect)
def add_it(city_code, city_name, city_country, city_continent, city_timezone,
city_coordinates, city_population, city_region):
city = Vertex(city_code, city_name, city_country, city_continent, city_timezone,
city_coordinates, city_population, city_region)
graph.vertex_list.append(city)
def add_city():
add_city_window = Tk()
add_city_window.geometry("400x600")
add_city_window.title("Add City")
city_code_label = Label(add_city_window, text="Enter City Code: ")
city_code_label.pack()
e = Entry(add_city_window)
city_code=e.get()
e.pack()
city_name_label = Label(add_city_window, text="Enter City Name: ")
city_name_label.pack()
e = Entry(add_city_window)
city_name=e.get()
e.pack()
city_country_label = Label(add_city_window, text="Enter City Country: ")
city_country_label.pack()
e = Entry(add_city_window)
city_country=e.get()
e.pack()
city_continent_label = Label(add_city_window, text="Enter City Continent: ")
city_continent_label.pack()
e = Entry(add_city_window)
city_continent=e.get()
e.pack()
city_timezone_label = Label(add_city_window, text="Enter City Timezone: ")
city_timezone_label.pack()
e = Entry(add_city_window)
city_timezone=e.get()
e.pack()
city_coordinates_label = Label(add_city_window, text="Enter City Coordinates: ")
city_coordinates_label.pack()
e = Entry(add_city_window)
city_coordinates=e.get()
e.pack()
city_population_label = Label(add_city_window, text="Enter City Population: ")
city_population_label.pack()
e = Entry(add_city_window)
city_population=e.get()
e.pack()
city_region_label = Label(add_city_window, text="Enter City Region: ")
city_region_label.pack()
e = Entry(add_city_window)
city_region=e.get()
e.pack()
city = Vertex(city_code, city_name, city_country, city_continent, city_timezone,
city_coordinates, city_population, city_region)
graph.vertex_list.append(city)
frame = Frame(add_city_window)
frame.pack()
Button(add_city_window, text='Submit', command=add_it).grid(row=1, column=0)
def remove_city():
remove_city_window = Tk()
remove_city_window.geometry("400x600")
remove_city_window.title("Remove a City")
text_input = Text(remove_city_window, borderwidth=3, relief="sunken")
e=text_input.pack()
Button(remove_city_window, text='Submit', command=add_it).grid(row=1, column=0)
def edit():
def onSelect(event):
w = event.widget
index = int(w.curselection()[0])
value = w.get(index)
if value=="Add City":
add_city()
elif value == "Edit City":
min_edge = graph.shortest_flight()
edit_city_window = Tk()
edit_city_window.geometry("400x400")
edit_city_window.title(value)
text = Text(edit_city_window)
text.insert(INSERT,"The shortest flight is from "+min_edge.origin+" to "+min_edge.destination)
text.pack()
elif value=="Remove City":
remove_city()
elif value=="Remove Route":
biggest_city = graph.biggest_city()
remove_route_window = Tk()
remove_route_window.geometry("400x400")
remove_route_window.title(value)
text = Text(remove_route_window)
text.insert(INSERT,"The biggest city is: "+biggest_city)
text.pack()
edit_window = Tk()
edit_window.geometry("400x400")
edit_window.title("All Cities on the CSAir Network")
listbox = Listbox(edit_window)
listbox.pack()
listbox.insert(END,"Add City")
listbox.insert(END,"Edit City")
listbox.insert(END,"Remove City")
listbox.insert(END,"Remove Route")
listbox.bind('<<ListboxSelect>>',onSelect)
def exit():
sys.exit()
root = Tk()
root.geometry("800x600")
root.title("CSAir")
frame = Frame(root)
frame.pack()
bottom_frame = Frame(root)
bottom_frame.pack( side = BOTTOM )
view_button = Button(frame, text="View Course List", command=view)
view_button.pack( side = LEFT )
#add_button = Button(frame, text="Add Course", command=add)
#add_button.pack( side = LEFT )
edit_button = Button(frame, text="Edit Course Checker", command=edit)
edit_button.pack( side = LEFT )
run_button = Button(frame, text="Run Checker", command=map)
run_button.pack( side = RIGHT )
exit_button = Button(bottom_frame, text="Exit", command=exit)
exit_button.pack( side = BOTTOM)
# create a menu
'''menu = Menu(root)
root.config(menu=menu)'''
mainloop() | {
"repo_name": "arpitmathur/CourseAvailabilityChecker",
"path": "TkInterGUI.py",
"copies": "1",
"size": "14202",
"license": "mit",
"hash": 2588891568965474000,
"line_mean": 32.8973747017,
"line_max": 242,
"alpha_frac": 0.6120968878,
"autogenerated": false,
"ratio": 3.561183550651956,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46732804384519555,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Arpit'
import easygui as eg
import sys
import json
from Course import Course
from Year import Year
import find
import time
import gmailer
'''
Function returns a print string of the attributes of the course
@:param parse_json_file the json file to be parsed
@return data the parsed json object data packet
'''
def parse_json_file(json_file):
with open(json_file) as map_data:
data = json.load(map_data)
return data
'''
Function returns a list of courses after parsing the json object data packet
@:param data the json object data packet to be parsed
@return course_list the list of courses to be checked
'''
def create_course_list(data):
course_list=list()
for element in data["courses"]:
course = Course(element["department"], element["number"], element["CRN"])
course_list.append(course)
return course_list
'''
Function returns an email address after parsing the json object data packet
@:param data the json object data packet to be parsed
@return email the email address to be emailed
'''
def return_email(data):
for element in data["email"]:
email = element["email"]
return email
'''
Function returns the year and semester after parsing the json object data packet
@:param data the json object data packet to be parsed
@return year a Year object that contains the relevant year and semester data
'''
def return_year(data):
for element in data["year"]:
year=Year(element["year"],element["semester"])
return year
'''
Function stores all the current information in the program to the disk
'''
def store_to_disk():
json_course_list=list()
json_email_list=list()
json_year_list=list()
json_year= {
'semester': year.semester,
'year': year.year
}
json_year_list.append(json_year)
for course in course_list:
json_course= {
'department': course.department,
'number': course.number,
'CRN': course.CRN
}
json_course_list.append(json_course)
json_email= {
'email': email
}
json_email_list.append(json_email)
new_data = {
'courses': json_course_list,
'email': json_email_list,
'year': json_year_list
}
with open('data.json', 'w') as outfile:
json.dump(new_data, outfile, sort_keys = True, indent = 4,
ensure_ascii=False)
'''
Function returns a new course to the list of courses to be checked and then
calls store_to_disk to store this change to the json file
@:param course the course to be added to the list
'''
def add_course(course):
course_list.append(course)
store_to_disk()
'''
Function delets a course from the list of courses to be checked and then
calls store_to_disk to store this change to the json file
@:param course the course to be added to the list
'''
def delete_course(to_delete_course):
#to_be_deleted=Course("c","s","x")
for course in course_list:
if course.compare_course(to_delete_course):
to_be_deleted=course
course_list.remove(to_be_deleted)
store_to_disk()
return
'''
Function goes through the course list and checks each course to see if its open or not and if open
it emails the user to notify him/her
@:param course_list the list of courses to check for
@:param year the year and semester to be checked
@:param email - the eamil of the user
'''
def course_check(course_list,year,email):
#initialize data structures
semester = year.semester
year=year.year
email = email
count = 0
sleepTime = 10
#while a course isn't available
while course_list:
count = count + 1
if count!=1:
print ("Please wait for " + str(sleepTime) + " seconds before the next attempt!")
#sleep five minutes
time.sleep(sleepTime)
print ("\nAaaaaaaand we're back! \n")
print ('Attempt: ' + str(count))
for course in course_list:
print ("Checking: " + str(course.department) + ' ' + str(course.number) + ' - CRN: ' + str(course.CRN))
#check availability
flag = find.parse_web_page(year,semester, course)
if( flag == 1):
print ('Success!')
print ('Sending email now!')
delete_course(course)
gmailer.send_email("coursecheckeruiuc@gmail.com", email, "", str(course.department) + " " + str(course.number) + " IS OPEN", "The CRN is " + str(course.CRN) + ". Register now!", "coursecheckeruiuc@gmail.com","2yellowbananas" )
else:
print ("It's Closed!")
data = parse_json_file('data.json')
course_list = create_course_list(data)
email=return_email(data)
year=return_year(data)
store_to_disk()
while 1:
title = "Course Checker UIUC"
msg ="Please select what you want to do:"
choices = ["View Course List","Add Course", "Edit Course Checker", "Delete Course", "Run Checker"]
reply = eg.choicebox(msg=msg,title=title,choices=choices)
# note that we convert choice to string, in case
# the user cancelled the choice, and we got None.
title=str(reply)
if str(reply) == "View Course List":
msg=""
for course in course_list:
msg=msg+Course.print_course(course)+"\n\n"
eg.textbox(msg='',title=title,text=msg,codebox=0)
if str(reply) == "Add Course":
fields = ["Department","Number","CRN"]
fieldValues = []
fieldValues = eg.multenterbox("Enter the values for the course to be added",title,fields)
while 1:
if fieldValues == None: break
errmsg = ""
for i in range(len(fields)):
if fieldValues[i].strip() == "":
errmsg += ('"%s" is a required field.\n\n' % fields[i])
if errmsg == "":
break # no problems found
fieldValues = eg.multenterbox(errmsg, title, fields, fieldValues)
department = fieldValues.pop(0)
number = fieldValues.pop(0)
CRN = fieldValues.pop(0)
course=Course(department,number,CRN)
add_course(course)
if str(reply) == "Delete Course":
msg ="Please select the course you want to delete:"
choices=list()
for course in course_list:
course_string=course.department+" "+course.number+ " "+course.CRN
choices.append(course_string)
reply = eg.choicebox(msg=msg,title=title,choices=choices)
delete_list = reply.split()
department = delete_list.pop(0)
number = delete_list.pop(0)
CRN = delete_list.pop(0)
course=Course(department,number,CRN)
delete_course(course)
if str(reply) == "Edit Course Checker":
choices = ["Change Year/Semester", "Change Email"]
reply = eg.choicebox("Select what you want to do",title,choices)
title=str(reply)
if str(reply) == "Change Year/Semester":
fields = ["Year","Semester"]
fieldValues = []
fieldValues = eg.multenterbox("Enter the new values for the year/semester",title,fields)
while 1:
if fieldValues == None: break
errmsg = ""
for i in range(len(fields)):
if fieldValues[i].strip() == "":
errmsg += ('"%s" is a required field.\n\n' % fields[i])
if errmsg == "":
break # no problems found
fieldValues = eg.multenterbox(errmsg, title, fields, fieldValues)
year = fieldValues.pop(0)
semester = fieldValues.pop(0)
year = Year(year,semester)
store_to_disk()
if str(reply) == "Change Email":
fieldValue = eg.enterbox(msg='Enter the new email address.', title=title, default='', strip=True, image=None, root=None)
email = fieldValue
store_to_disk()
if str(reply) == "Run Checker":
course_check(course_list,year,email)
eg.textbox("Run Course Checker ",title=title,text='',codebox=0)
msg = "Do you want to continue?"
title = "Please Confirm"
if eg.ccbox(msg, title): # show a Continue/Cancel dialog
pass # user chose Continue
else:
sys.exit(0) # user chose Cancel
| {
"repo_name": "arpitmathur/CourseAvailabilityChecker",
"path": "GUI.py",
"copies": "1",
"size": "8463",
"license": "mit",
"hash": 3965745226546013000,
"line_mean": 31.9299610895,
"line_max": 242,
"alpha_frac": 0.6038047974,
"autogenerated": false,
"ratio": 3.843324250681199,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49471290480811986,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Arpit'
import find
import time
import gmailer
'''
Function goes through the course list and checks each course to see if its open or not and if open
it emails the user to notify him/her
@:param course_list the list of courses to check for
@:param year the year and semester to be checked
@:param email - the eamil of the user
'''
def course_check(course_list,year,email):
#initialize data structures
semester = year.semester
year=year.year
email = email
count = 0
sleepTime = 10
#while a course isn't available
while course_list:
count = count + 1
if count!=1:
print ("Please wait for " + str(sleepTime) + " seconds before the next attempt!")
#sleep five minutes
time.sleep(sleepTime)
print ("\nAaaaaaaand we're back! \n")
print ('Attempt: ' + str(count))
for course in course_list:
print ("Checking: " + str(course.department) + ' ' + str(course.number) + ' - CRN: ' + str(course.CRN))
#check availability
flag = find.parse_web_page(year,semester, course)
if( flag == 1):
print ('Success!')
print ('Sending email now!')
gmailer.send_email("coursecheckeruiuc@gmail.com", email, "", str(course.department) + " " + str(course.number) + " IS OPEN", "The CRN is " + str(course.CRN) + ". Register now!", "coursecheckeruiuc@gmail.com","2yellowbananas" )
else:
print ("It's Closed!") | {
"repo_name": "arpitmathur/CourseAvailabilityChecker",
"path": "course_check.py",
"copies": "1",
"size": "1550",
"license": "mit",
"hash": 7235584633744645000,
"line_mean": 34.25,
"line_max": 242,
"alpha_frac": 0.5935483871,
"autogenerated": false,
"ratio": 3.799019607843137,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9826862670707939,
"avg_score": 0.013141064847039797,
"num_lines": 44
} |
__author__ = 'Arpit'
import smtplib
'''
This function uses an SMTP server to access the coursechecker gmail account and emails
the user to notify them that the course is open
@:param sending_address the email address that is sending the email
@:param to_address_list the list of emails to send the email to
@:param cc_address_list the list of emails to copy the email to
@:param subject the subject of the email to be sent
@:param message the message of the email to be sent
@:param username the username of the account that will send the email
@:param password the password of the account that will send the email
@:param smptserver the server used to send the email
'''
def send_email(sending_address, to_address_list, cc_address_list,
subject, message,
username, password,
smtpserver='smtp.gmail.com:587'):
header = 'From: %s\n' % sending_address
header += "To: "+to_address_list+"\n"
header += "Cc: "+cc_address_list+"\n"
header += "Subject: "+subject+"\n\n"
message = header + message
server = smtplib.SMTP(smtpserver)
server.starttls()
server.login(username,password)
problems = server.sendmail(sending_address, to_address_list, message)
server.quit() | {
"repo_name": "arpitmathur/CourseAvailabilityChecker",
"path": "gmailer.py",
"copies": "1",
"size": "1283",
"license": "mit",
"hash": -7413418707060880000,
"line_mean": 40.4193548387,
"line_max": 90,
"alpha_frac": 0.6819953235,
"autogenerated": false,
"ratio": 3.972136222910217,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5154131546410217,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Arseniy'
from model.contact import Contact
from random import randrange
import re
def test_phones_on_home_page(app, db):
if len(db.get_contact_list()) == 0:
app.contact.create(Contact(firstname="John", lastname="Snow", address="Hollywood, 11", email2="john@ya.ru",
email3="snow@hot.com", work_num="+7 999 666 777"))
contacts_from_home_page = app.contact.get_contact_list()
contacts_from_db = db.get_contact_list()
for con_db in contacts_from_db:
con_ui = list(filter(lambda x: x.id == con_db.id, contacts_from_home_page))[0]
assert con_ui.all_phones_from_home_page == merge_phones_like_on_home_page(con_db)
def test_phones_on_contact_view_page(app, db):
if len(db.get_contact_list()) == 0:
app.contact.create(Contact(firstname="John", lastname="Snow", address="Hollywood, 11", email2="john@ya.ru",
email3="snow@hot.com", work_num="+7 999 666 777"))
contacts_from_db = db.get_contact_list()
for con_db in contacts_from_db:
text_from_view_page = app.contact.get_text_from_view_page_by_id(con_db.id)
phones_from_db = get_phones_like_on_view_page(con_db)
for phone in phones_from_db:
assert phone in text_from_view_page
def test_info_on_home_page(app, db):
if len(db.get_contact_list()) == 0:
app.contact.create(Contact(firstname="John", lastname="Snow", address="Hollywood, 11", email2="john@ya.ru",
email3="snow@hot.com", work_num="+7 999 666 777"))
contacts_from_db = db.get_contact_list()
contacts_from_home_page = app.contact.get_contact_list()
for con_db in contacts_from_db:
con_ui = list(filter(lambda x: x.id == con_db.id, contacts_from_home_page))[0]
assert con_ui.lastname == strip_entry(con_db.lastname)
assert con_ui.firstname == strip_entry(con_db.firstname)
assert con_ui.address == strip_entry(con_db.address)
assert con_ui.all_phones_from_home_page == merge_phones_like_on_home_page(con_db)
assert con_ui.all_emails_from_home_page == merge_emails_like_on_home_page(con_db)
def strip_entry(e):
if e is not None:
return e.strip()
def clear(s):
return re.sub("[() -]", "", s)
def merge_emails_like_on_home_page(con):
emails = [con.email, con.email2, con.email3]
return "\n".join(filter(lambda x: x != "",
filter(lambda x: x is not None, emails))).strip()
def merge_phones_like_on_home_page(con):
phones = [con.home_num, con.mobile_num, con.work_num, con.phone2]
return "\n".join(filter(lambda x: x != "",
map(lambda x: clear(x),
filter(lambda x: x is not None, phones))))
def get_phones_like_on_view_page(con):
phones = []
if not is_empty(con.home_num):
phones.append("H: %s" % con.home_num)
if not is_empty(con.mobile_num):
phones.append("M: %s" % con.mobile_num)
if not is_empty(con.work_num):
phones.append("W: %s" % con.work_num)
if not is_empty(con.phone2):
phones.append("P: %s" % con.phone2)
return phones
def is_empty(tel):
return (tel is None) or (tel == '')
| {
"repo_name": "arseny-tsyro/python_training",
"path": "test/test_contact_info.py",
"copies": "1",
"size": "3263",
"license": "apache-2.0",
"hash": 7290096702865734000,
"line_mean": 39.2839506173,
"line_max": 115,
"alpha_frac": 0.6064970886,
"autogenerated": false,
"ratio": 3.0269016697588125,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4133398758358812,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Arseniy'
from model.contact import Contact
from selenium.webdriver.support.select import Select
import re
class ContactHelper:
def __init__(self, app):
self.app = app
def load_home_page(self):
wd = self.app.wd
if len(wd.find_elements_by_link_text("Last name")) > 0:
return
wd.find_element_by_link_text("home").click()
def select_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
def select_by_id(self, id):
wd = self.app.wd
wd.find_element_by_css_selector("input[value='%s']" % id).click()
def open_contact_to_edit_by_index(self, index):
wd = self.app.wd
self.load_home_page()
# go to edit page
wd.find_elements_by_css_selector('img[alt="Edit"]')[index].click()
def open_contact_to_edit_by_id(self, id):
wd = self.app.wd
self.load_home_page()
# go to edit page
for row in wd.find_elements_by_name("entry"):
cells = row.find_elements_by_tag_name("td")
row_id = cells[0].find_element_by_name("selected[]").get_attribute("value")
if id == row_id:
cells[7].find_element_by_css_selector('img[alt="Edit"]').click()
break
def open_contact_view_by_index(self, index):
wd = self.app.wd
self.load_home_page()
wd.find_elements_by_css_selector('img[alt="Details"]')[index].click()
def open_contact_view_by_id(self, id):
wd = self.app.wd
self.load_home_page()
for row in wd.find_elements_by_name("entry"):
cells = row.find_elements_by_tag_name("td")
row_id = cells[0].find_element_by_name("selected[]").get_attribute("value")
if id == row_id:
cells[6].find_element_by_css_selector('img[alt="Details"]').click()
break
def create(self, contact):
wd = self.app.wd
# go to new contact page
wd.find_element_by_link_text("add new").click()
# fill in names
wd.find_element_by_name("firstname").click()
wd.find_element_by_name("firstname").clear()
wd.find_element_by_name("firstname").send_keys(contact.firstname)
wd.find_element_by_name("middlename").click()
wd.find_element_by_name("middlename").clear()
wd.find_element_by_name("middlename").send_keys(contact.middlename)
wd.find_element_by_name("lastname").click()
wd.find_element_by_name("lastname").clear()
wd.find_element_by_name("lastname").send_keys(contact.lastname)
wd.find_element_by_name("nickname").click()
wd.find_element_by_name("nickname").clear()
wd.find_element_by_name("nickname").send_keys(contact.nickname)
wd.find_element_by_name("title").click()
wd.find_element_by_name("title").clear()
wd.find_element_by_name("title").send_keys(contact.title)
# fill in company data
wd.find_element_by_name("company").click()
wd.find_element_by_name("company").clear()
wd.find_element_by_name("company").send_keys(contact.company)
wd.find_element_by_name("address").click()
wd.find_element_by_name("address").clear()
wd.find_element_by_name("address").send_keys(contact.address)
# fill in telephone numbers
wd.find_element_by_name("home").click()
wd.find_element_by_name("home").clear()
wd.find_element_by_name("home").send_keys(contact.home_num)
wd.find_element_by_name("mobile").click()
wd.find_element_by_name("mobile").clear()
wd.find_element_by_name("mobile").send_keys(contact.mobile_num)
wd.find_element_by_name("work").click()
wd.find_element_by_name("work").clear()
wd.find_element_by_name("work").send_keys(contact.work_num)
wd.find_element_by_name("fax").click()
wd.find_element_by_name("fax").clear()
wd.find_element_by_name("fax").send_keys(contact.fax_num)
# fill in emails
wd.find_element_by_name("email2").click()
wd.find_element_by_name("email2").clear()
wd.find_element_by_name("email2").send_keys(contact.email2)
wd.find_element_by_name("email3").click()
wd.find_element_by_name("email3").clear()
wd.find_element_by_name("email3").send_keys(contact.email3)
wd.find_element_by_name("homepage").click()
wd.find_element_by_name("homepage").clear()
wd.find_element_by_name("homepage").send_keys(contact.homepage)
# fill in dates
if not wd.find_element_by_xpath("//div[@id='content']/form/select[1]//option[17]").is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[1]//option[17]").click()
if not wd.find_element_by_xpath("//div[@id='content']/form/select[2]//option[4]").is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[2]//option[4]").click()
wd.find_element_by_name("byear").click()
wd.find_element_by_name("byear").clear()
wd.find_element_by_name("byear").send_keys(contact.byear)
if not wd.find_element_by_xpath("//div[@id='content']/form/select[3]//option[16]").is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[3]//option[16]").click()
if not wd.find_element_by_xpath("//div[@id='content']/form/select[4]//option[12]").is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[4]//option[12]").click()
wd.find_element_by_name("ayear").click()
wd.find_element_by_name("ayear").clear()
wd.find_element_by_name("ayear").send_keys(contact.ayear)
# fill in additional info
wd.find_element_by_name("address2").click()
wd.find_element_by_name("address2").clear()
wd.find_element_by_name("address2").send_keys(contact.address2)
wd.find_element_by_name("phone2").click()
wd.find_element_by_name("phone2").clear()
wd.find_element_by_name("phone2").send_keys(contact.phone2)
wd.find_element_by_name("notes").click()
wd.find_element_by_name("notes").clear()
wd.find_element_by_name("notes").send_keys(contact.notes)
# submit
wd.find_element_by_xpath("//div[@id='content']/form/input[21]").click()
self.load_home_page()
self.contact_cache = None
def edit_first(self, contact):
self.edit_by_index(0, contact)
def edit_by_index(self, index, contact):
wd = self.app.wd
self.load_home_page()
self.open_contact_to_edit_by_index(index)
# edit data
self.edit(contact)
# submit changes
wd.find_element_by_xpath("//div[@id='content']/form[1]/input[22]").click()
self.load_home_page()
self.contact_cache = None
def edit_by_id(self, id, contact):
wd = self.app.wd
self.load_home_page()
self.open_contact_to_edit_by_id(id)
# edit data
self.edit(contact)
# submit changes
wd.find_element_by_xpath("//div[@id='content']/form[1]/input[22]").click()
self.load_home_page()
self.contact_cache = None
def edit(self, contact):
wd = self.app.wd
if contact.firstname:
wd.find_element_by_name("firstname").click()
wd.find_element_by_name("firstname").clear()
wd.find_element_by_name("firstname").send_keys(contact.firstname)
if contact.middlename:
wd.find_element_by_name("middlename").click()
wd.find_element_by_name("middlename").clear()
wd.find_element_by_name("middlename").send_keys(contact.middlename)
if contact.lastname:
wd.find_element_by_name("lastname").click()
wd.find_element_by_name("lastname").clear()
wd.find_element_by_name("lastname").send_keys(contact.lastname)
if contact.nickname:
wd.find_element_by_name("nickname").click()
wd.find_element_by_name("nickname").clear()
wd.find_element_by_name("nickname").send_keys(contact.nickname)
if contact.company:
wd.find_element_by_name("company").click()
wd.find_element_by_name("company").clear()
wd.find_element_by_name("company").send_keys(contact.company)
if contact.title:
wd.find_element_by_name("title").click()
wd.find_element_by_name("title").clear()
wd.find_element_by_name("title").send_keys(contact.title)
if contact.address:
wd.find_element_by_name("address").click()
wd.find_element_by_name("address").clear()
wd.find_element_by_name("address").send_keys(contact.address)
if contact.home_num:
wd.find_element_by_name("home").click()
wd.find_element_by_name("home").clear()
wd.find_element_by_name("home").send_keys(contact.home_num)
if contact.mobile_num:
wd.find_element_by_name("mobile").click()
wd.find_element_by_name("mobile").clear()
wd.find_element_by_name("mobile").send_keys(contact.mobile_num)
if contact.work_num:
wd.find_element_by_name("work").click()
wd.find_element_by_name("work").clear()
wd.find_element_by_name("work").send_keys(contact.work_num)
if contact.fax_num:
wd.find_element_by_name("fax").click()
wd.find_element_by_name("fax").clear()
wd.find_element_by_name("fax").send_keys(contact.fax_num)
if contact.email:
wd.find_element_by_name("email").click()
wd.find_element_by_name("email").clear()
wd.find_element_by_name("email").send_keys(contact.email)
if contact.email2:
wd.find_element_by_name("email2").click()
wd.find_element_by_name("email2").clear()
wd.find_element_by_name("email2").send_keys(contact.email2)
if contact.email3:
wd.find_element_by_name("email3").click()
wd.find_element_by_name("email3").clear()
wd.find_element_by_name("email3").send_keys(contact.email3)
if contact.homepage:
wd.find_element_by_name("homepage").click()
wd.find_element_by_name("homepage").clear()
wd.find_element_by_name("homepage").send_keys(contact.homepage)
if not wd.find_element_by_xpath("//div[@id='content']/form[1]/select[1]//option[19]").is_selected():
wd.find_element_by_xpath("//div[@id='content']/form[1]/select[1]//option[19]").click()
if not wd.find_element_by_xpath("//div[@id='content']/form[1]/select[2]//option[10]").is_selected():
wd.find_element_by_xpath("//div[@id='content']/form[1]/select[2]//option[10]").click()
if contact.byear:
wd.find_element_by_name("byear").click()
wd.find_element_by_name("byear").clear()
wd.find_element_by_name("byear").send_keys(contact.byear)
if not wd.find_element_by_xpath("//div[@id='content']/form[1]/select[3]//option[7]").is_selected():
wd.find_element_by_xpath("//div[@id='content']/form[1]/select[3]//option[7]").click()
if not wd.find_element_by_xpath("//div[@id='content']/form[1]/select[4]//option[9]").is_selected():
wd.find_element_by_xpath("//div[@id='content']/form[1]/select[4]//option[9]").click()
if contact.ayear:
wd.find_element_by_name("ayear").click()
wd.find_element_by_name("ayear").clear()
wd.find_element_by_name("ayear").send_keys(contact.ayear)
if contact.address2:
wd.find_element_by_name("address2").click()
wd.find_element_by_name("address2").clear()
wd.find_element_by_name("address2").send_keys(contact.address2)
if contact.phone2:
wd.find_element_by_name("phone2").click()
wd.find_element_by_name("phone2").clear()
wd.find_element_by_name("phone2").send_keys(contact.phone2)
if contact.notes:
wd.find_element_by_name("notes").click()
wd.find_element_by_name("notes").clear()
wd.find_element_by_name("notes").send_keys(contact.notes)
def delete_first(self):
self.delete_by_index(0)
def delete_by_index(self, index):
wd = self.app.wd
self.select_by_index(index)
# submit deletion
wd.find_element_by_xpath("//div[@id='content']/form[2]/div[2]/input").click()
# accept and close dialogue window
wd.switch_to_alert().accept()
self.load_home_page()
self.contact_cache = None
def delete_by_id(self, id):
wd = self.app.wd
self.select_by_id(id)
# submit deletion
wd.find_element_by_xpath("//div[@id='content']/form[2]/div[2]/input").click()
# accept and close dialogue window
wd.switch_to_alert().accept()
self.load_home_page()
self.contact_cache = None
def add_to_group(self, contact, group):
wd = self.app.wd
self.load_home_page()
self.select_by_id(contact.id)
select = Select(wd.find_element_by_name("to_group"))
select.select_by_visible_text(group.name.strip())
wd.find_element_by_name("add").click()
def remove_from_group(self, contact, group):
wd = self.app.wd
self.load_home_page()
select = Select(wd.find_element_by_name("group"))
select.select_by_visible_text(group.name.strip())
self.select_by_id(contact.id)
wd.find_element_by_name("remove").click()
def count(self):
wd = self.app.wd
self.load_home_page()
return len(wd.find_elements_by_name("selected[]"))
contact_cache = None
def get_contact_list(self):
if self.contact_cache is None:
wd = self.app.wd
self.load_home_page()
self.contact_cache = []
for row in wd.find_elements_by_name("entry"):
cells = row.find_elements_by_tag_name("td")
last_name = cells[1].text
first_name = cells[2].text
id = cells[0].find_element_by_tag_name("input").get_attribute("value")
all_emails = cells[4].text
all_phones = cells[5].text
self.contact_cache.append(Contact(id=id, firstname=first_name, lastname=last_name,
all_phones_from_home_page=all_phones,
all_emails_from_home_page=all_emails))
return list(self.contact_cache)
def get_contact_info_from_edit_page(self, index):
wd = self.app.wd
self.open_contact_to_edit_by_index(index)
id = wd.find_element_by_name("id").get_attribute("value")
firstname = wd.find_element_by_name("firstname").get_attribute("value")
lastname = wd.find_element_by_name("lastname").get_attribute("value")
home_num = wd.find_element_by_name("home").get_attribute("value")
mobile_num = wd.find_element_by_name("mobile").get_attribute("value")
work_num = wd.find_element_by_name("work").get_attribute("value")
phone2 = wd.find_element_by_name("phone2").get_attribute("value")
email = wd.find_element_by_name("email").get_attribute("value")
email2 = wd.find_element_by_name("email2").get_attribute("value")
email3 = wd.find_element_by_name("email3").get_attribute("value")
return Contact(id=id, firstname=firstname, lastname=lastname,
home_num=home_num, mobile_num=mobile_num,
work_num=work_num, phone2=phone2,
email=email, email2=email2, email3=email3)
def get_contact_from_view_page(self, index):
wd = self.app.wd
self.open_contact_view_by_index(index)
text = wd.find_element_by_id("content").text
home_num = re.search("H: (.*)", text).group(1)
mobile_num = re.search("M: (.*)", text).group(1)
work_num = re.search("W: (.*)", text).group(1)
phone2 = re.search("P: (.*)", text).group(1)
return Contact(home_num=home_num, mobile_num=mobile_num,
work_num=work_num, phone2=phone2)
def get_text_from_view_page(self, index):
wd = self.app.wd
self.open_contact_view_by_index(index)
return wd.find_element_by_id("content").text
def get_text_from_view_page_by_id(self, id):
wd = self.app.wd
self.open_contact_view_by_id(id)
return wd.find_element_by_id("content").text
| {
"repo_name": "arseny-tsyro/python_training",
"path": "fixture/contact.py",
"copies": "1",
"size": "16725",
"license": "apache-2.0",
"hash": -2131739116607417000,
"line_mean": 45.717877095,
"line_max": 108,
"alpha_frac": 0.5910313901,
"autogenerated": false,
"ratio": 3.3564118001204095,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9429348160814275,
"avg_score": 0.0036190058812268677,
"num_lines": 358
} |
__author__ = 'Arseniy'
from model.project import Project
class ProjectHelper:
def __init__(self, app):
self.app = app
def create(self, project):
wd = self.app.wd
self.load_new_project_page()
# enter values
wd.find_element_by_name("name").click()
wd.find_element_by_name("name").clear()
wd.find_element_by_name("name").send_keys(project.name)
wd.find_element_by_name("description").click()
wd.find_element_by_name("description").clear()
wd.find_element_by_name("description").send_keys(project.description)
# click submit button
wd.find_element_by_css_selector("input.button").click()
self.project_cache = None
def delete(self, project):
wd = self.app.wd
self.load_projects_page()
wd.find_element_by_link_text(project.name).click()
wd.find_element_by_xpath("//div[4]/form/input[3]").click()
wd.find_element_by_xpath("//div[2]/form/input[4]").click()
self.project_cache = None
project_cache = None
def get_project_list(self):
if self.project_cache is None:
wd = self.app.wd
self.load_projects_page()
self.project_cache = []
table = wd.find_elements_by_css_selector("table.width100")[1]
rows = table.find_elements_by_tag_name("tr")[2:]
for row in rows:
cols = row.find_elements_by_tag_name("td")
name = cols[0].text
description = cols[4].text
self.project_cache.append(Project(name=name, description=description))
return list(self.project_cache)
def load_new_project_page(self):
wd = self.app.wd
self.load_projects_page()
wd.find_element_by_css_selector("td.form-title > form > input.button-small").click()
def load_projects_page(self):
wd = self.app.wd
self.load_manage_page()
wd.find_element_by_link_text("Manage Projects").click()
def load_manage_page(self):
wd = self.app.wd
wd.find_element_by_link_text("Manage").click() | {
"repo_name": "arseny-tsyro/python_training_mantis",
"path": "fixture/project.py",
"copies": "1",
"size": "2132",
"license": "apache-2.0",
"hash": 8910550623397927000,
"line_mean": 34.55,
"line_max": 92,
"alpha_frac": 0.5928705441,
"autogenerated": false,
"ratio": 3.5474209650582362,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9636894387527892,
"avg_score": 0.0006794243260687459,
"num_lines": 60
} |
__author__ = 'Arseniy'
from pony.orm import *
from datetime import datetime
from model.group import Group
from model.contact import Contact
from pymysql.converters import decoders
class ORMFixture:
db = Database()
class ORMGroup(db.Entity):
_table_ = 'group_list'
id = PrimaryKey(int, column='group_id')
name = Optional(str, column='group_name')
header = Optional(str, column='group_header')
footer = Optional(str, column='group_footer')
contacts = Set(lambda: ORMFixture.ORMContact,
table='address_in_groups', column='id', reverse='groups', lazy=True)
class ORMContact(db.Entity):
_table_ = 'addressbook'
id = PrimaryKey(int, column='id')
firstname = Optional(str, column='firstname')
lastname = Optional(str, column='lastname')
home_num = Optional(str, column='home')
mobile_num = Optional(str, column='mobile')
work_num = Optional(str, column='work')
phone2 = Optional(str, column='phone2')
email = Optional(str, column='email')
email2 = Optional(str, column='email2')
email3 = Optional(str, column='email3')
deprecated = Optional(datetime, column='deprecated')
groups = Set(lambda: ORMFixture.ORMGroup,
table='address_in_groups', column='group_id', reverse='contacts', lazy=True)
def __init__(self, name, host, user, password):
self.db.bind('mysql', host=host, database=name, user=user, password=password, conv=decoders)
self.db.generate_mapping()
# sql_debug(True)
def convert_groups_to_model(self, groups):
def convert(group):
return Group(id=str(group.id), name=group.name, header=group.header, footer=group.footer)
return list(map(convert, groups))
def convert_contacts_to_model(self, contacts):
def convert(contact):
return Contact(id=str(contact.id), firstname=contact.firstname, lastname=contact.lastname,
home_num=contact.home_num, mobile_num=contact.mobile_num,
work_num=contact.work_num, phone2=contact.phone2,
email=contact.email, email2=contact.email2, email3=contact.email3)
return list(map(convert, contacts))
@db_session
def get_group_list(self):
return self.convert_groups_to_model(select(g for g in ORMFixture.ORMGroup))
@db_session
def get_contact_list(self):
return self.convert_contacts_to_model(select(c for c in ORMFixture.ORMContact if c.deprecated is None))
@db_session
def get_contacts_in_group(self, group):
orm_group = self.find_group_by_id(group)
return self.convert_contacts_to_model(orm_group.contacts)
@db_session
def get_contacts_not_in_group(self, group):
orm_group = self.find_group_by_id(group)
return self.convert_contacts_to_model(
select(c for c in ORMFixture.ORMContact if c.deprecated is None and orm_group not in c.groups))
@db_session
def add_contact_to_group(self, contact, group):
gr = self.find_group_by_id(group)
gr.contacts.add(self.find_contact_by_id(contact))
def find_group_by_id(self, group):
return list(select(g for g in ORMFixture.ORMGroup if g.id == group.id))[0]
def find_contact_by_id(self, contact):
return list(select(c for c in ORMFixture.ORMContact if c.id == contact.id))[0]
| {
"repo_name": "arseny-tsyro/python_training",
"path": "fixture/orm.py",
"copies": "1",
"size": "3481",
"license": "apache-2.0",
"hash": 508427857461083840,
"line_mean": 38.5568181818,
"line_max": 111,
"alpha_frac": 0.6406205113,
"autogenerated": false,
"ratio": 3.6835978835978835,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48242183948978834,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Arseniy'
from sys import maxsize
class Contact:
def __init__(self, id=None, firstname=None, middlename=None, lastname=None, nickname=None, title=None, company=None,
address=None, home_num=None, mobile_num=None, work_num=None, fax_num=None, email=None, email2=None,
email3=None, homepage=None, byear=None, ayear=None, address2=None, phone2=None, notes=None,
all_phones_from_home_page=None, all_emails_from_home_page=None):
self.id = id
self.firstname = firstname
self.middlename = middlename
self.lastname = lastname
self.nickname = nickname
self.title = title
self.company = company
self.address = address
self.home_num = home_num
self.mobile_num = mobile_num
self.work_num = work_num
self.fax_num = fax_num
self.email = email
self.email2 = email2
self.email3 = email3
self.homepage = homepage
self.byear = byear
self.ayear = ayear
self.address2 = address2
self.phone2 = phone2
self.notes = notes
self.all_phones_from_home_page = all_phones_from_home_page
self.all_emails_from_home_page = all_emails_from_home_page
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id) and self.firstname == other.firstname and \
self.lastname == other.lastname
def __repr__(self):
return "%s: %s;%s" % (self.id, self.firstname, self.lastname) | {
"repo_name": "arseny-tsyro/python_training",
"path": "model/contact.py",
"copies": "1",
"size": "1680",
"license": "apache-2.0",
"hash": -5080716490493946000,
"line_mean": 36.3555555556,
"line_max": 120,
"alpha_frac": 0.6,
"autogenerated": false,
"ratio": 3.5443037974683542,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46443037974683543,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Arseniy'
import mysql.connector
from model.group import Group
from model.contact import Contact
class DbFixture:
def __init__(self, name, host, user, password):
self.name = name
self.host = host
self.user = user
self.password = password
self.connection = mysql.connector.connect(host=host, database=name, user=user, password=password)
self.connection.autocommit = True # Ñáðàñûâàòü êåø ïîñëå êàæäîãî çàïðîñà
def get_group_list(self):
list = []
cursor = self.connection.cursor()
try:
cursor.execute("select group_id, group_name, group_header, group_footer from group_list")
for row in cursor.fetchall():
(id, name, header, footer) = row
list.append(Group(id=str(id), name=name, header=header, footer=footer))
finally:
cursor.close()
return list
def get_contact_list(self):
list = []
cursor = self.connection.cursor()
try:
cursor.execute("select id, firstname, lastname, home, mobile, work, phone2, email, email2, email3"
" from addressbook where deprecated='0000-00-00 00:00:00'")
for row in cursor.fetchall():
(id, firstname, lastname, home_num, mobile_num, work_num, phone2, email, email2, email3) = row
list.append(Contact(id=str(id), firstname=firstname, lastname=lastname,
home_num=home_num, mobile_num=mobile_num, work_num=work_num, phone2=phone2,
email=email, email2=email2, email3=email3))
finally:
cursor.close()
return list
def destroy(self):
self.connection.close() | {
"repo_name": "arseny-tsyro/python_training",
"path": "fixture/db.py",
"copies": "1",
"size": "1781",
"license": "apache-2.0",
"hash": -7507640418382564000,
"line_mean": 39.5,
"line_max": 111,
"alpha_frac": 0.5884334643,
"autogenerated": false,
"ratio": 3.725941422594142,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9798766316278117,
"avg_score": 0.0031217141232050953,
"num_lines": 44
} |
__author__ = 'Arseniy'
class SessionHelper:
def __init__(self, app):
self.app = app
def login(self, username, password):
wd = self.app.wd
self.app.load_login_page()
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys(username)
wd.find_element_by_id("content").click()
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys(password)
wd.find_element_by_css_selector('input[type="submit"]').click()
def logout(self):
wd = self.app.wd
wd.find_element_by_link_text("Logout").click()
def ensure_login(self, username, password):
if self.is_logged_in():
if self.is_logged_in_as(username):
return
else:
self.logout()
self.login(username, password)
def ensure_logout(self):
if self.is_logged_in():
self.logout()
def is_logged_in(self):
wd = self.app.wd
return len(wd.find_elements_by_link_text("Logout")) > 0
def is_logged_in_as(self, username):
return self.get_logged_user() == username
def get_logged_user(self):
wd = self.app.wd
return wd.find_element_by_xpath("//div/div[1]/form/b").text[1:-1]
| {
"repo_name": "arseny-tsyro/python_training",
"path": "fixture/session.py",
"copies": "1",
"size": "1404",
"license": "apache-2.0",
"hash": -6024874771582733000,
"line_mean": 29.5217391304,
"line_max": 73,
"alpha_frac": 0.5726495726,
"autogenerated": false,
"ratio": 3.375,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44476495726000004,
"avg_score": null,
"num_lines": null
} |
__author__ = 'artem'
import numpy as np
from spacepy import dmarray
from Model import Model
import Parameters
import struct
params = Parameters.Parameters
class NurgushBinData(Model):
def sub_title(self):
return ' time = ' + "{:.2}".format(self['time']) + " "
def get_name(self):
return str(self['iter'])
def __init__(self, file_name, *args, **kwargs):
super(NurgushBinData, self).__init__(*args, **kwargs) # Init as PbData
self.attrs['file'] = file_name
self.read()
self['log_rho'] = np.log(self['rho'])
def read(self):
_file = open(self.attrs['file'], 'rb')
endian = '<'
reclen_raw = _file.read(4)
rec_len = struct.unpack(endian + 'i', reclen_raw)[0]
if rec_len > 10000 or rec_len < 0:
endian = '>'
rec_len = struct.unpack(endian + 'i', reclen_raw)[0]
variables = _file.read(rec_len)
names = [var.strip().lower() for var in variables.split(",") if var.__len__() > 0]
names[-1] = 'hz'
self['time'] = struct.unpack(endian + 'd', _file.read(8))[0]
self['iter'] = struct.unpack(endian + '2i', _file.read(8))[0]
(I, J, K) = struct.unpack('3l', _file.read(24))
self["ndim"] = 3
self['grid'] = dmarray(np.array([I, J, K]))
self['grid'].attrs['gtype'] = 'Exponential'
self['grid'].attrs['nx'] = I
self['grid'].attrs['ny'] = J
self['grid'].attrs['nz'] = K
temp_data = dict()
for i in range(0, names.__len__()):
name = names[i].lower()
temp_data[name] = np.zeros(self['grid'])
for k in xrange(0, self['grid'][2]):
for j in xrange(0, self['grid'][1]):
for i in xrange(0, self['grid'][0]):
# TODO: Magic number 15
data = struct.unpack(endian + '15d', _file.read(names.__len__() * 8))
for ii in range(0, names.__len__()):
temp_data[names[ii]][i, j, k] = data[ii]
_file.close()
print "file closed"
__dim = 2
normalization = params.ab / params.planet_radius
k_middle = 0
if __dim == 2:
k_middle = self['grid'][2] / 2
self['x'] = temp_data['x'][:, 0, k_middle] * normalization
tmp = np.zeros(self['grid'][1])
for j in range(0, self['grid'][1]):
tmp[j] = temp_data['y'][0, j, k_middle]
self['y'] = tmp * normalization
if __dim == 3:
self['x'] = temp_data['x'] * normalization
self['y'] = temp_data['y'] * normalization
self['z'] = temp_data['z'] * normalization
for i in range(3, names.__len__()):
name = names[i].lower()
if __dim == 2:
self[name] = np.zeros((self['grid'][1], self['grid'][0]))
if __dim == 3:
self[name] = np.zeros((self['grid'][0], self['grid'][1], self['grid'][2]))
gen = list((name for name in names if name not in ['x', 'y', 'z']))
for k in range(0, self['grid'][2]):
for j in range(0, self['grid'][1]):
for i in range(0, self['grid'][0]):
for name in gen:
if __dim == 2:
self[name][i, j] = temp_data[name][i, j, k_middle]
if __dim == 3:
self[name][i, j, k] = temp_data[name][i, j, k]
del temp_data
| {
"repo_name": "arakcheev/python-data-plotter",
"path": "NurgushBinData.py",
"copies": "1",
"size": "3539",
"license": "mit",
"hash": 4935711268080283000,
"line_mean": 32.7047619048,
"line_max": 90,
"alpha_frac": 0.4690590562,
"autogenerated": false,
"ratio": 3.483267716535433,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4452326772735433,
"avg_score": null,
"num_lines": null
} |
__author__ = 'artem'
import numpy as np
import re
from spacepy import dmarray
from Model import Model
import Parameters
params = Parameters.Parameters
class TecData(Model):
def sub_title(self):
return ' time = ' + "{:.2}".format(self['time']) + " "
def get_name(self):
name_groups = re.search('(.*)a(.*).dat', self.attrs['file'])
return name_groups.group(2)
def __init__(self, file_name, *args, **kwargs):
super(TecData, self).__init__(*args, **kwargs) # Init as PbData
self.attrs['file'] = file_name
self.read()
self['log_rho'] = np.log(self['rho'])
# self['T'] = np.log10((10e7 / 1.3806) * (self['p'] / (self['rho'])))
print self['grid']
def __extract_variables(self, str):
return re.search('VARIABLES = (.*)', str).group(1).replace('"', '').split(", ")
def __extract_zone(self, str):
groups = re.search('ZONE T="(.*)", I=(.*), J=(.*), K=(.*), F=(.*)', str)
I = int(groups.group(2))
J = int(groups.group(3))
K = int(groups.group(4))
self["ndim"] = 3
self['grid'] = dmarray([I, J, K])
self['grid'].attrs['gtype'] = 'Exponential'
self['grid'].attrs['nx'] = I
self['grid'].attrs['ny'] = J
self['grid'].attrs['nz'] = K
def __extract_aux_data(self, file):
for i in range(0, 1):
line = file.readline()
groups = re.search('AUXDATA (.*) = "(.*)"', line)
if groups.group(1).lower() == "time":
self['time'] = float(groups.group(2))
def read(self):
f = open(self.attrs['file'])
# self.attrs['title'] = self.__extract_from_string(f.readline())
names = map(str.lower, self.__extract_variables(f.readline()))
self.__extract_zone(f.readline())
self.__extract_aux_data(f)
temp_data = dict()
for i in range(0, names.__len__()):
name = names[i].lower()
temp_data[name] = np.zeros((self['grid'][2], self['grid'][1], self['grid'][0]))
# Read 3d data
for k in range(0, self['grid'][2]):
for j in range(0, self['grid'][1]):
for i in range(0, self['grid'][0]):
data = f.readline().split()
for ii in range(0, names.__len__()):
temp_data[names[ii]][k, j, i] = data[ii]
f.close()
__dim = 2
normalization = params.ab / params.planet_radius
k_middle = 0
if __dim == 2:
k_middle = self['grid'][2] / 2
self['x'] = temp_data['x'][k_middle, 0, :] * normalization
tmp = dmarray(np.zeros(self['grid'][1]))
for j in range(0, self['grid'][1]):
tmp[j] = temp_data['y'][k_middle, j, 0]
self['y'] = tmp * normalization
if __dim == 3:
self['x'] = temp_data['x'] * normalization
self['y'] = temp_data['y'] * normalization
self['z'] = temp_data['z'] * normalization
for i in range(3, names.__len__()):
name = names[i].lower()
if __dim == 2:
self[name] = np.zeros((self['grid'][1], self['grid'][0]))
if __dim == 3:
self[name] = np.zeros((self['grid'][2], self['grid'][1], self['grid'][0]))
gen = (name for name in names if name not in ['x', 'y', 'z'])
for name in gen:
for k in range(0, self['grid'][2]):
for j in range(0, self['grid'][1]):
for i in range(0, self['grid'][0]):
if __dim == 2:
self[name][j, i] = temp_data[name][k_middle, j, i]
if __dim == 3:
self[name][k, j, i] = temp_data[name][k, j, i]
del temp_data
| {
"repo_name": "arakcheev/python-data-plotter",
"path": "TecData.py",
"copies": "1",
"size": "3856",
"license": "mit",
"hash": -9049732075270245000,
"line_mean": 33.4285714286,
"line_max": 91,
"alpha_frac": 0.4683609959,
"autogenerated": false,
"ratio": 3.48014440433213,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.444850540023213,
"avg_score": null,
"num_lines": null
} |
__author__ = 'artem'
import os
from FileData import FileData
from Parameters import Parameters
import glob
import matplotlib.pyplot as plt
folder = "/Volumes/Storage/workspace/inasan/SWMF/test/"
pattern = "*.out"
target = folder + "moments/"
if not os.path.exists(target):
os.makedirs(target)
files = glob.glob(folder + pattern)
initial_data = FileData(files.pop(0))
n = 0
totalFiles = float(files.__len__())
def calc_staff(i):
file_name = files[i]
print "Plot file (" + "{0:.2f}".format(i / totalFiles * 100) + "%) " + str(file_name)
file_data = FileData(file_name)
figure, ax = plt.subplots()
moments = file_data.moment_contour(ax, initial_data)
figure.colorbar(moments)
figure.suptitle('T=7500, time = ' + str(file_data.attrs['time'] / Parameters.period))
plt.savefig(target + "moment_" + str(file_data.attrs['iter']) + '.png')
# plt.show()
plt.close(figure)
# calc_staff(0)
import multiprocessing
pool = multiprocessing.Pool(4)
pool.map(calc_staff, range(40, 100))
| {
"repo_name": "arakcheev/python-data-plotter",
"path": "plot_moments.py",
"copies": "1",
"size": "1031",
"license": "mit",
"hash": -4117855354455441000,
"line_mean": 20.9361702128,
"line_max": 90,
"alpha_frac": 0.666343356,
"autogenerated": false,
"ratio": 3.059347181008902,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42256905370089015,
"avg_score": null,
"num_lines": null
} |
__author__ = 'artem'
import sys
from FileData import FileData
from TecData import TecData
from Parameters import Parameters
import glob
import matplotlib.pyplot as plt
import os
import re
import ConfigParser
Config = ConfigParser.ConfigParser()
Config.read("parameters.cfg")
# folder = "/Users/artem/workspace/inasan/nurgushmpi/bin/data/"
folder = "/Volumes/Storage/workspace/inasan/nurgush/exp_grid/"
# folder = ""
pattern = "a5000.dat"
target = folder + "contour/"
try:
arg_name = sys.argv[1]
folder = ""
pattern = arg_name
except Exception:
"" # nothing
if not os.path.exists(target):
os.makedirs(target)
files = glob.glob(folder + pattern)
# initial_data = FileData(folder + "a0.dat")
n = 0
totalFiles = float(files.__len__())
def calc_staff(i):
file_name = files[i]
print "Plot file (" + "{0:.2f}".format(i / totalFiles * 100) + "%) " + str(file_name)
groups = re.search('(.*)a(.*).dat', file_name)
file_data = TecData(file_name)
figure, ax = plt.subplots()
file_data.plot_contour(ax)
file_data.plot_roche_lobe(ax)
file_data.plot_velocity_filed(ax)
figure.suptitle('T=7500, time = ' + "{:.2}".format(file_data['time']))
# plt.savefig(target + "contour_" + groups.group(2) + '.png')
plt.show()
plt.close(figure)
calc_staff(0)
# import multiprocessing; pool = multiprocessing.Pool(4); pool.map(calc_staff, range(0, files.__len__()))
| {
"repo_name": "arakcheev/python-data-plotter",
"path": "plot_contours.py",
"copies": "1",
"size": "1424",
"license": "mit",
"hash": -2461633785335928300,
"line_mean": 21.9677419355,
"line_max": 105,
"alpha_frac": 0.6601123596,
"autogenerated": false,
"ratio": 3.0427350427350426,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42028474023350426,
"avg_score": null,
"num_lines": null
} |
__author__ = 'artem'
import sys
import os
from TecData import TecData
from Parameters import Parameters
import glob
import matplotlib.pyplot as plt
import re
folder = "/Volumes/Storage/workspace/inasan/nurgush/exp_grid/"
pattern = "*.dat"
target = folder + "slices/"
if not os.path.exists(target):
os.makedirs(target)
files = glob.glob(folder + pattern)
initial_data = TecData(files.pop(0))
n = 0
totalFiles = float(files.__len__())
def calc_staff(i):
file_name = files[i]
print "Plot file (" + "{0:.2f}".format(i / totalFiles * 100) + "%) " + str(file_name)
file_data = TecData(file_name)
groups = re.search('(.*)a(.*).dat', file_name)
figure, ax1 = plt.subplots()
(_, y) = file_data.slice_y('log_rho', 0.1, ax1, initial_data=initial_data)
ax1.set_ylabel(r"log rho")
ax1.grid(which='both')
# ax1.set_xlim([-15, 15])
# file_data.slice_y('T', 3.0, ax2)
# ax2.set_ylabel("p/rho")
# ax2.grid(which='both')
# ax2.set_xlim([-15, 15])
figure.suptitle('T=7500, time = ' + "{:.2}".format(file_data['time']))
# figure.set_size_inches(10.5, 18.5, forward=True)
plt.savefig(target + "slice_" + groups.group(2) + '.png')
# plt.show()
plt.close(figure)
# calc_staff(1)
import multiprocessing;
pool = multiprocessing.Pool(4);
pool.map(calc_staff, range(0, files.__len__()))
# target = "/Volumes/Storage/workspace/inasan/SWMF/results13.01.16/sliceComparison/logT/"
#
# folder_075 = "/Volumes/Storage/workspace/inasan/SWMF/results13.01.16/075R/data/*.out"
# folder_1 = "/Volumes/Storage/workspace/inasan/SWMF/results13.01.16/1.0R/data/*.out"
# folder_original = "/Volumes/Storage/workspace/inasan/SWMF/results13.01.16/original/data/*.out"
#
# files_075 = glob.glob(folder_075)
# files_1 = glob.glob(folder_1)
# files_original = glob.glob(folder_original)
#
#
# def sliceComparison(i):
# file_name_075 = files_075[i]
# file_name_1 = files_1[i]
# file_name_original = files_original[i]
#
# file_data_075 = FileData(file_name_075)
# file_data_1 = FileData(file_name_1)
# file_data_original = FileData(file_name_original)
#
# figure, ax1 = plt.subplots()
#
# file_data_075.srez_y('T', 0.2, ax1, y_limit=[2, 7], label='0.75R')
# # ax1.set_title('O.75 Rpl, time = ' + "{:.5}".format(file_data_075.attrs['time'] / Parameters.period))
#
# # file_data_1.srez_y('T', 0.2, ax2, y_limit=[-14.5, -3])
# # ax2.set_title('1.0 Rpl, time = ' + "{:.5}".format(file_data_1.attrs['time'] / Parameters.period))
#
# file_data_original.srez_y('T', 0.2, ax1, y_limit=[2, 7], label='Original')
#
# ax1.grid(which='both')
#
# legend = ax1.legend(loc='upper right', shadow=True)
#
# # ax2.set_title('Original, time = ' + "{:.5}".format(file_data_original.attrs['time'] / Parameters.period))
#
#
# # ax2.grid(which='both')
# # ax3.grid(which='both')
#
# # file_data.srez_y('T', 0.2, ax3, y_limit=[-14.5, -3])
# # ax3.set_ylabel("p/rho")
# # ax3.grid(which='both')
#
# figure.suptitle('Slices T=7500, time = ' + "{:.2}".format(file_data_075.attrs['time'] / Parameters.period))
#
# # figure.set_size_inches(10.5, 18.5, forward=True)
#
# plt.savefig(target + "slice_" + str(file_data_075.attrs['iter']) + '.png')
# # plt.show()
# plt.close(figure)
#
#
# import multiprocessing; pool = multiprocessing.Pool(4);pool.map(sliceComparison, range(0, files_original.__len__()))
# # sliceComparison(10)
| {
"repo_name": "arakcheev/python-data-plotter",
"path": "plot_srez_pho.py",
"copies": "1",
"size": "3491",
"license": "mit",
"hash": 118869366023374000,
"line_mean": 29.3565217391,
"line_max": 119,
"alpha_frac": 0.6147235749,
"autogenerated": false,
"ratio": 2.6032811334824757,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8467084995234476,
"avg_score": 0.050183942629599874,
"num_lines": 115
} |
"""
This and other `proxy` modules implement the time-dependent mean-field procedure using the existing pyscf
implementations as a black box. The main purpose of these modules is to overcome the existing limitations in pyscf
(i.e. real-only orbitals, davidson diagonalizer, incomplete Bloch space, etc). The primary performance drawback is that,
unlike the original pyscf routines with an implicit construction of the eigenvalue problem, these modules construct TD
matrices explicitly by proxying to pyscf density response routines with a O(N^4) complexity scaling. As a result,
regular `numpy.linalg.eig` can be used to retrieve TD roots. Several variants of proxy-TD are available:
* `pyscf.tdscf.proxy`: the molecular implementation;
* `pyscf.pbc.tdscf.proxy`: PBC (periodic boundary condition) Gamma-point-only implementation;
* `pyscf.pbc.tdscf.kproxy_supercell`: PBC implementation constructing supercells. Works with an arbitrary number of
k-points but has an overhead due to ignoring the momentum conservation law. In addition, works only with
time reversal invariant (TRI) models: i.e. the k-point grid has to be aligned and contain at least one TRI momentum.
* (this module) `pyscf.pbc.tdscf.kproxy`: same as the above but respect the momentum conservation and, thus, diagonlizes smaller
matrices (the performance gain is the total number of k-points in the model).
"""
# Convention for these modules:
# * PhysERI is the proxying class constructing time-dependent matrices
# * vector_to_amplitudes reshapes and normalizes the solution
# * TDProxy provides a container
from functools import reduce
from pyscf.pbc.tdscf import kproxy_supercell, krhf_slow
import numpy
def kov2ov(nocc, nmo, k):
"""
Converts k point pairs into ov mask.
Args:
nocc (Iterable): occupation numbers per k-point;
nmo (Iterable): numbers of orbitals per k-point;
k (ndarray): k-point pairs;
Returns:
An ov-mask. Basis order: [k_o, o, k_v, v].
"""
nocc = numpy.asanyarray(nocc)
nmo = numpy.asanyarray(nmo)
nvirt = nmo - nocc
mask = numpy.zeros((sum(nocc), sum(nvirt)), dtype=bool)
o_e = numpy.cumsum(nocc)
o_s = o_e - o_e[0]
v_e = numpy.cumsum(nvirt)
v_s = v_e - v_e[0]
for k1, k2 in enumerate(k):
mask[o_s[k1]:o_e[k1], v_s[k2]:v_e[k2]] = True
return mask.reshape(-1)
class PhysERI(kproxy_supercell.PhysERI):
def __init__(self, model, proxy, x, mf_constructor, frozen=None, **kwargs):
"""
A proxy class for calculating the TD matrix blocks (k-point version).
Args:
model: the base model with a time reversal-invariant k-point grid;
proxy: a pyscf proxy with TD response function, one of 'hf', 'dft';
x (Iterable): the original k-grid dimensions (numbers of k-points per each axis);
mf_constructor (Callable): a function constructing the mean-field object;
frozen (int, Iterable): the number of frozen valence orbitals or the list of frozen orbitals;
**kwargs: arguments to `k2s` function constructing supercells;
"""
super(PhysERI, self).__init__(model, proxy, x, mf_constructor, frozen=frozen, **kwargs)
def get_ov_space_mask(self):
"""
Prepares the space mask in the ov form.
Returns:
The mask in the ov form.
"""
return kproxy_supercell.orb2ov(numpy.concatenate(self.space), self.nocc_full, self.nmo_full)
def kov2ov(self, k):
"""
Converts k-ov mask into ov mask.
Args:
k (ndarray): k-point pairs;
Returns:
An ov-mask. Basis order: [k_o, o, k_v, v].
"""
mask = self.get_ov_space_mask()
return numpy.logical_and(mask, kov2ov(self.nocc_full, self.nmo_full, k))
def proxy_response_ov_batch(self, k_row, k_col):
"""
A raw response submatrix corresponding to specific k-points.
Args:
k_row (ndarray): sets of k-point pairs (row index);
k_col (ndarray): sets of k-point pairs (column index);
Returns:
A raw response matrix.
"""
masks_row = tuple(self.kov2ov(i) for i in k_row)
masks_col = tuple(self.kov2ov(i) for i in k_col)
full_mask_row = reduce(numpy.logical_or, masks_row)
full_mask_col = reduce(numpy.logical_or, masks_col)
big = kproxy_supercell.supercell_response_ov(
self.proxy_vind,
(full_mask_row, full_mask_col),
self.nocc_full,
self.nmo_full,
self.proxy_is_double(),
self.model_super.supercell_inv_rotation,
self.model,
)
result = []
for m_row, m_col in zip(masks_row, masks_col):
m_row_red = m_row[full_mask_row]
m_col_red = m_col[full_mask_col]
result.append(tuple(i[m_row_red][:, m_col_red] for i in big))
return tuple(result)
# This is needed for krhf_slow.get_block_k_ix
get_k_ix = krhf_slow.PhysERI.get_k_ix.im_func
def tdhf_primary_form(self, k):
"""
A primary form of TD matrices (full).
Args:
k (tuple, int): momentum transfer: either a pair of k-point indexes specifying the momentum transfer
vector or a single integer with the second index assuming the first index being zero;
Returns:
Output type: "full", and the corresponding matrix.
"""
r1, r2, c1, c2 = krhf_slow.get_block_k_ix(self, k)
(a, _), (_, b), (_, b_star), (a_star, _) = self.proxy_response_ov_batch((r1, r1, r2, r2), (c1, c2, c1, c2))
return "full", numpy.block([[a, b], [-b_star.conj(), -a_star.conj()]])
vector_to_amplitudes = krhf_slow.vector_to_amplitudes
class TDProxy(kproxy_supercell.TDProxy):
v2a = staticmethod(vector_to_amplitudes)
proxy_eri = PhysERI
def __init__(self, mf, proxy, x, mf_constructor, frozen=None, **kwargs):
"""
Performs TD calculation. Roots and eigenvectors are stored in `self.e`, `self.xy`.
Args:
mf: the base model with a time-reversal invariant k-point grid;
proxy: a pyscf proxy with TD response function, one of 'hf', 'dft';
x (Iterable): the original k-grid dimensions (numbers of k-points per each axis);
mf_constructor (Callable): a function constructing the mean-field object;
frozen (int, Iterable): the number of frozen valence orbitals or the list of frozen orbitals;
**kwargs: arguments to `k2s` function constructing supercells;
"""
super(TDProxy, self).__init__(mf, proxy, x, mf_constructor, frozen=frozen, **kwargs)
self.e = {}
self.xy = {}
def kernel(self, k=None):
"""
Calculates eigenstates and eigenvalues of the TDHF problem.
Args:
k (tuple, int): momentum transfer: either an index specifying the momentum transfer or a list of such
indexes;
Returns:
Positive eigenvalues and eigenvectors.
"""
if k is None:
k = numpy.arange(len(self._scf.kpts))
if isinstance(k, int):
k = [k]
for kk in k:
self.e[kk], self.xy[kk] = self.__kernel__(k=kk)
return self.e, self.xy
| {
"repo_name": "sunqm/pyscf",
"path": "pyscf/pbc/tdscf/kproxy.py",
"copies": "1",
"size": "7401",
"license": "apache-2.0",
"hash": -4146807235108862000,
"line_mean": 38.1587301587,
"line_max": 129,
"alpha_frac": 0.6293744089,
"autogenerated": false,
"ratio": 3.5026029342167533,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4631977343116753,
"avg_score": null,
"num_lines": null
} |
"""
This and other `_slow` modules implement the time-dependent Hartree-Fock procedure. The primary performance drawback is
that, unlike other 'fast' routines with an implicit construction of the eigenvalue problem, these modules construct
TDHF matrices explicitly via an AO-MO transformation, i.e. with a O(N^5) complexity scaling. As a result, regular
`numpy.linalg.eig` can be used to retrieve TDHF roots in a reliable fashion without any issues related to the Davidson
procedure. Several variants of TDHF are available:
* `pyscf.tdscf.rhf_slow`: the molecular implementation;
* `pyscf.pbc.tdscf.rhf_slow`: PBC (periodic boundary condition) implementation for RHF objects of `pyscf.pbc.scf`
modules;
* `pyscf.pbc.tdscf.krhf_slow_supercell`: PBC implementation for KRHF objects of `pyscf.pbc.scf` modules. Works with
an arbitrary number of k-points but has a overhead due to an effective construction of a supercell.
* `pyscf.pbc.tdscf.krhf_slow_gamma`: A Gamma-point calculation resembling the original `pyscf.pbc.tdscf.krhf`
module. Despite its name, it accepts KRHF objects with an arbitrary number of k-points but finds only few TDHF roots
corresponding to collective oscillations without momentum transfer;
* (this module) `pyscf.pbc.tdscf.krhf_slow`: PBC implementation for KRHF objects of `pyscf.pbc.scf` modules. Works with
an arbitrary number of k-points and employs k-point conservation (diagonalizes matrix blocks separately).
"""
from pyscf.pbc.tdscf import krhf_slow_supercell as td
from pyscf.tdscf import rhf_slow
from pyscf.tdscf.common_slow import mknj2i
import numpy
# Convention for these modules:
# * PhysERI, PhysERI4, PhysERI8 are 2-electron integral routines computed directly (for debug purposes), with a 4-fold
# symmetry and with an 8-fold symmetry
# * vector_to_amplitudes reshapes and normalizes the solution
# * TDRHF provides a container
class PhysERI(td.PhysERI):
primary_driver = "full"
def __init__(self, model, frozen=None):
"""
The TDHF ERI implementation performing a full transformation of integrals to Bloch functions. No symmetries are
employed in this class. The ERIs are returned in blocks of k-points.
Args:
model (KRHF): the base model;
frozen (int, Iterable): the number of frozen valence orbitals or the list of frozen orbitals for all
k-points or multiple lists of frozen orbitals for each k-point;
"""
super(PhysERI, self).__init__(model, frozen=frozen)
def get_k_ix(self, item, like):
"""
Retrieves block indexes: row and column.
Args:
item (str): a string of 'mknj' letters;
like (tuple): a 2-tuple with sample pair of k-points;
Returns:
Row and column indexes of a sub-block with conserving momentum.
"""
item_i = numpy.argsort(mknj2i(item))
item_code = ''.join("++--"[i] for i in item_i)
if item_code[0] == item_code[1]:
kc = self.kconserv # ++-- --++
elif item_code[0] == item_code[2]:
kc = self.kconserv.swapaxes(1, 2) # +-+- -+-+
elif item_code[1] == item_code[2]:
kc = self.kconserv.transpose(2, 0, 1) # +--+ -++-
else:
raise RuntimeError("Unknown case: {}".format(item_code))
y = kc[like]
x = kc[0, y[0]]
return x, y
def tdhf_diag(self, block):
"""
Retrieves the merged diagonal block only with specific pairs of k-indexes (k, block[k]).
Args:
block (Iterable): a k-point pair `k2 = pair[k1]` for each k1;
Returns:
The diagonal block.
"""
return super(PhysERI, self).tdhf_diag(pairs=enumerate(block))
def eri_mknj(self, item, pair_row, pair_column):
"""
Retrieves the merged ERI block using 'mknj' notation with pairs of k-indexes (k1, k1, k2, k2).
Args:
item (str): a 4-character string of 'mknj' letters;
pair_row (Iterable): a k-point pair `k2 = pair_row[k1]` for each k1 (row indexes in the final matrix);
pair_column (Iterable): a k-point pair `k4 = pair_row[k3]` for each k3 (column indexes in the final matrix);
Returns:
The corresponding block of ERI (phys notation).
"""
return super(PhysERI, self).eri_mknj(
item,
pairs_row=enumerate(pair_row),
pairs_column=enumerate(pair_column),
)
def tdhf_primary_form(self, k):
"""
A primary form of TDHF matrixes (full).
Args:
k (tuple, int): momentum transfer: either a pair of k-point indexes specifying the momentum transfer
vector or a single integer with the second index assuming the first index being zero;
Returns:
Output type: "full", and the corresponding matrix.
"""
r1, r2, c1, c2 = get_block_k_ix(self, k)
d1 = self.tdhf_diag(r1)
d2 = self.tdhf_diag(r2)
a = d1 + 2 * self["knmj", r1, c1] - self["knjm", r1, c1]
b = 2 * self["kjmn", r1, c2] - self["kjnm", r1, c2]
a_ = d2 + 2 * self["mjkn", r2, c2] - self["mjnk", r2, c2]
b_ = 2 * self["mnkj", r2, c1] - self["mnjk", r2, c1]
return "full", numpy.block([[a, b], [-b_, -a_]])
class PhysERI4(PhysERI):
def __init__(self, model, frozen=None):
"""
The TDHF ERI implementation performing partial transformations of integrals to Bloch functions. A 4-fold
symmetry of complex-valued functions is employed in this class. The ERIs are returned in blocks of k-points.
Args:
model (KRHF): the base model;
frozen (int, Iterable): the number of frozen valence orbitals or the list of frozen orbitals for all
k-points or multiple lists of frozen orbitals for each k-point;
"""
td.PhysERI4.__init__.im_func(self, model, frozen=frozen)
symmetries = [
((0, 1, 2, 3), False),
((1, 0, 3, 2), False),
((2, 3, 0, 1), True),
((3, 2, 1, 0), True),
]
def __calc_block__(self, item, k):
if self.kconserv[k[:3]] == k[3]:
return td.PhysERI4.__calc_block__.im_func(self, item, k)
else:
raise ValueError("K is not conserved: {}, expected {}".format(
repr(k),
k[:3] + (self.kconserv[k[:3]],),
))
class PhysERI8(PhysERI4):
def __init__(self, model, frozen=None):
"""
The TDHF ERI implementation performing partial transformations of integrals to Bloch functions. An 8-fold
symmetry of real-valued functions is employed in this class. The ERIs are returned in blocks of k-points.
Args:
model (KRHF): the base model;
frozen (int, Iterable): the number of frozen valence orbitals or the list of frozen orbitals for all
k-points or multiple lists of frozen orbitals for each k-point;
"""
super(PhysERI8, self).__init__(model, frozen=frozen)
symmetries = [
((0, 1, 2, 3), False),
((1, 0, 3, 2), False),
((2, 3, 0, 1), False),
((3, 2, 1, 0), False),
((2, 1, 0, 3), False),
((3, 0, 1, 2), False),
((0, 3, 2, 1), False),
((1, 2, 3, 0), False),
]
def get_block_k_ix(eri, k):
"""
Retrieves k indexes of the block with a specific momentum transfer.
Args:
eri (TDDFTMatrixBlocks): ERI of the problem;
k (tuple, int): momentum transfer: either a pair of k-point indexes specifying the momentum transfer
vector or a single integer with the second index assuming the first index being zero;
Returns:
4 arrays: r1, r2, c1, c2 specifying k-indexes of the ERI matrix block.
+-----------------+-------------+-------------+-----+-----------------+-------------+-------------+-----+-----------------+
| | k34=0,c1[0] | k34=1,c1[1] | ... | k34=nk-1,c1[-1] | k34=0,c2[0] | k34=1,c2[1] | ... | k34=nk-1,c2[-1] |
+-----------------+-------------+-------------+-----+-----------------+-------------+-------------+-----+-----------------+
| k12=0,r1[0] | | |
+-----------------+ | |
| k12=1,r1[1] | | |
+-----------------+ Block r1, c1 | Block r1, c2 |
| ... | | |
+-----------------+ | |
| k12=nk-1,r1[-1] | | |
+-----------------+---------------------------------------------------+---------------------------------------------------+
| k12=0,r2[0] | | |
+-----------------+ | |
| k12=1,r2[1] | | |
+-----------------+ Block r2, c1 | Block r2, c2 |
| ... | | |
+-----------------+ | |
| k12=nk-1,r2[-1] | | |
+-----------------+---------------------------------------------------+---------------------------------------------------+
"""
# All checks here are for debugging purposes
if isinstance(k, int):
k = (0, k)
r1, c1 = eri.get_k_ix("knmj", k)
assert r1[k[0]] == k[1]
# knmj and kjmn share row indexes
_, c2 = eri.get_k_ix("kjmn", (0, r1[0]))
assert abs(r1 - _).max() == 0
# knmj and mnkj share column indexes
_, r2 = eri.get_k_ix("mnkj", (0, c1[0]))
assert abs(c1 - _).max() == 0
_r, _c = eri.get_k_ix("mjkn", (0, r2[0]))
assert abs(r2 - _r).max() == 0
assert abs(c2 - _c).max() == 0
_c, _r = eri.get_k_ix("mjkn", (0, c2[0]))
assert abs(r2 - _r).max() == 0
assert abs(c2 - _c).max() == 0
assert abs(r1 - c1).max() == 0
assert abs(r2 - c2).max() == 0
assert abs(r1[r2] - numpy.arange(len(r1))).max() == 0
# The output is, basically, r1, argsort(r1), r1, argsort(r1)
return r1, r2, c1, c2
def vector_to_amplitudes(vectors, nocc, nmo):
"""
Transforms (reshapes) and normalizes vectors into amplitudes.
Args:
vectors (numpy.ndarray): raw eigenvectors to transform;
nocc (tuple): numbers of occupied orbitals;
nmo (int): the total number of orbitals per k-point;
Returns:
Amplitudes with the following shape: (# of roots, 2 (x or y), # of kpts, # of occupied orbitals,
# of virtual orbitals).
"""
if not all(i == nocc[0] for i in nocc):
raise NotImplementedError("Varying occupation numbers are not implemented yet")
nk = len(nocc)
nocc = nocc[0]
if not all(i == nmo[0] for i in nmo):
raise NotImplementedError("Varying AO spaces are not implemented yet")
nmo = nmo[0]
vectors = numpy.asanyarray(vectors)
vectors = vectors.reshape(2, nk, nocc, nmo-nocc, vectors.shape[1])
norm = (abs(vectors) ** 2).sum(axis=(1, 2, 3))
norm = 2 * (norm[0] - norm[1])
vectors /= norm ** .5
return vectors.transpose(4, 0, 1, 2, 3)
class TDRHF(rhf_slow.TDRHF):
eri4 = PhysERI4
eri8 = PhysERI8
v2a = staticmethod(vector_to_amplitudes)
def __init__(self, mf, frozen=None):
"""
Performs TDHF calculation. Roots and eigenvectors are stored in `self.e`, `self.xy`.
Args:
mf (RHF): the base restricted Hartree-Fock model;
frozen (int, Iterable): the number of frozen valence orbitals or the list of frozen orbitals for all
k-points or multiple lists of frozen orbitals for each k-point;
"""
super(TDRHF, self).__init__(mf, frozen=frozen)
self.e = {}
self.xy = {}
def kernel(self, k=None):
"""
Calculates eigenstates and eigenvalues of the TDHF problem.
Args:
k (tuple, int): momentum transfer: either an index specifying the momentum transfer or a list of such
indexes;
Returns:
Positive eigenvalues and eigenvectors.
"""
if k is None:
k = numpy.arange(len(self._scf.kpts))
if isinstance(k, int):
k = [k]
for kk in k:
self.e[kk], self.xy[kk] = self.__kernel__(k=kk)
return self.e, self.xy
| {
"repo_name": "sunqm/pyscf",
"path": "pyscf/pbc/tdscf/krhf_slow.py",
"copies": "1",
"size": "13493",
"license": "apache-2.0",
"hash": 3204777457928965600,
"line_mean": 43.9766666667,
"line_max": 131,
"alpha_frac": 0.5011487438,
"autogenerated": false,
"ratio": 3.6977254042203342,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46988741480203344,
"avg_score": null,
"num_lines": null
} |
"""
This and other `proxy` modules implement the time-dependent mean-field procedure using the existing pyscf
implementations as a black box. The main purpose of these modules is to overcome the existing limitations in pyscf
(i.e. real-only orbitals, davidson diagonalizer, incomplete Bloch space, etc). The primary performance drawback is that,
unlike the original pyscf routines with an implicit construction of the eigenvalue problem, these modules construct TD
matrices explicitly by proxying to pyscf density response routines with a O(N^4) complexity scaling. As a result,
regular `numpy.linalg.eig` can be used to retrieve TD roots. Several variants of proxy-TD are available:
* `pyscf.tdscf.proxy`: the molecular implementation;
* `pyscf.pbc.tdscf.proxy`: PBC (periodic boundary condition) Gamma-point-only implementation;
* `pyscf.pbc.tdscf.kproxy_supercell`: PBC implementation constructing supercells. Works with an arbitrary number of
k-points but has an overhead due to ignoring the momentum conservation law. In addition, works only with
time reversal invariant (TRI) models: i.e. the k-point grid has to be aligned and contain at least one TRI momentum.
* (this module) `pyscf.pbc.tdscf.kproxy`: same as the above but respect the momentum conservation and, thus, diagonlizes smaller
matrices (the performance gain is the total number of k-points in the model).
"""
# Convention for these modules:
# * PhysERI is the proxying class constructing time-dependent matrices
# * vector_to_amplitudes reshapes and normalizes the solution
# * TDProxy provides a container
from functools import reduce
from pyscf.pbc.tdscf import kproxy_supercell, krhf_slow
import numpy
def kov2ov(nocc, nmo, k):
"""
Converts k point pairs into ov mask.
Args:
nocc (Iterable): occupation numbers per k-point;
nmo (Iterable): numbers of orbitals per k-point;
k (ndarray): k-point pairs;
Returns:
An ov-mask. Basis order: [k_o, o, k_v, v].
"""
nocc = numpy.asanyarray(nocc)
nmo = numpy.asanyarray(nmo)
nvirt = nmo - nocc
mask = numpy.zeros((sum(nocc), sum(nvirt)), dtype=bool)
o_e = numpy.cumsum(nocc)
o_s = o_e - o_e[0]
v_e = numpy.cumsum(nvirt)
v_s = v_e - v_e[0]
for k1, k2 in enumerate(k):
mask[o_s[k1]:o_e[k1], v_s[k2]:v_e[k2]] = True
return mask.reshape(-1)
class PhysERI(kproxy_supercell.PhysERI):
def __init__(self, model, proxy, x, mf_constructor, frozen=None, **kwargs):
"""
A proxy class for calculating the TD matrix blocks (k-point version).
Args:
model: the base model with a time reversal-invariant k-point grid;
proxy: a pyscf proxy with TD response function, one of 'hf', 'dft';
x (Iterable): the original k-grid dimensions (numbers of k-points per each axis);
mf_constructor (Callable): a function constructing the mean-field object;
frozen (int, Iterable): the number of frozen valence orbitals or the list of frozen orbitals;
**kwargs: arguments to `k2s` function constructing supercells;
"""
super(PhysERI, self).__init__(model, proxy, x, mf_constructor, frozen=frozen, **kwargs)
def get_ov_space_mask(self):
"""
Prepares the space mask in the ov form.
Returns:
The mask in the ov form.
"""
return kproxy_supercell.orb2ov(numpy.concatenate(self.space), self.nocc_full, self.nmo_full)
def kov2ov(self, k):
"""
Converts k-ov mask into ov mask.
Args:
k (ndarray): k-point pairs;
Returns:
An ov-mask. Basis order: [k_o, o, k_v, v].
"""
mask = self.get_ov_space_mask()
return numpy.logical_and(mask, kov2ov(self.nocc_full, self.nmo_full, k))
def proxy_response_ov_batch(self, k_row, k_col):
"""
A raw response submatrix corresponding to specific k-points.
Args:
k_row (ndarray): sets of k-point pairs (row index);
k_col (ndarray): sets of k-point pairs (column index);
Returns:
A raw response matrix.
"""
masks_row = tuple(self.kov2ov(i) for i in k_row)
masks_col = tuple(self.kov2ov(i) for i in k_col)
full_mask_row = reduce(numpy.logical_or, masks_row)
full_mask_col = reduce(numpy.logical_or, masks_col)
big = kproxy_supercell.supercell_response_ov(
self.proxy_vind,
(full_mask_row, full_mask_col),
self.nocc_full,
self.nmo_full,
self.proxy_is_double(),
self.model_super.supercell_inv_rotation,
self.model,
)
result = []
for m_row, m_col in zip(masks_row, masks_col):
m_row_red = m_row[full_mask_row]
m_col_red = m_col[full_mask_col]
result.append(tuple(i[m_row_red][:, m_col_red] for i in big))
return tuple(result)
# This is needed for krhf_slow.get_block_k_ix
get_k_ix = krhf_slow.PhysERI.get_k_ix.im_func
def tdhf_primary_form(self, k):
"""
A primary form of TD matrices (full).
Args:
k (tuple, int): momentum transfer: either a pair of k-point indexes specifying the momentum transfer
vector or a single integer with the second index assuming the first index being zero;
Returns:
Output type: "full", and the corresponding matrix.
"""
r1, r2, c1, c2 = krhf_slow.get_block_k_ix(self, k)
(a, _), (_, b), (_, b_star), (a_star, _) = self.proxy_response_ov_batch((r1, r1, r2, r2), (c1, c2, c1, c2))
return "full", numpy.block([[a, b], [-b_star.conj(), -a_star.conj()]])
vector_to_amplitudes = krhf_slow.vector_to_amplitudes
class TDProxy(kproxy_supercell.TDProxy):
v2a = staticmethod(vector_to_amplitudes)
proxy_eri = PhysERI
def __init__(self, mf, proxy, x, mf_constructor, frozen=None, **kwargs):
"""
Performs TD calculation. Roots and eigenvectors are stored in `self.e`, `self.xy`.
Args:
mf: the base model with a time-reversal invariant k-point grid;
proxy: a pyscf proxy with TD response function, one of 'hf', 'dft';
x (Iterable): the original k-grid dimensions (numbers of k-points per each axis);
mf_constructor (Callable): a function constructing the mean-field object;
frozen (int, Iterable): the number of frozen valence orbitals or the list of frozen orbitals;
**kwargs: arguments to `k2s` function constructing supercells;
"""
super(TDProxy, self).__init__(mf, proxy, x, mf_constructor, frozen=frozen, **kwargs)
self.e = {}
self.xy = {}
def kernel(self, k=None):
"""
Calculates eigenstates and eigenvalues of the TDHF problem.
Args:
k (tuple, int): momentum transfer: either an index specifying the momentum transfer or a list of such
indexes;
Returns:
Positive eigenvalues and eigenvectors.
"""
if k is None:
k = numpy.arange(len(self._scf.kpts))
if isinstance(k, int):
k = [k]
for kk in k:
self.e[kk], self.xy[kk] = self.__kernel__(k=kk)
return self.e, self.xy
| {
"repo_name": "gkc1000/pyscf",
"path": "pyscf/pbc/tdscf/kproxy.py",
"copies": "1",
"size": "7386",
"license": "apache-2.0",
"hash": 8265019224992194000,
"line_mean": 38.2872340426,
"line_max": 129,
"alpha_frac": 0.6292986732,
"autogenerated": false,
"ratio": 3.5054579971523494,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9627734098294511,
"avg_score": 0.0014045144115674276,
"num_lines": 188
} |
"""
This and other `_slow` modules implement the time-dependent Hartree-Fock procedure. The primary performance drawback is
that, unlike other 'fast' routines with an implicit construction of the eigenvalue problem, these modules construct
TDHF matrices explicitly via an AO-MO transformation, i.e. with a O(N^5) complexity scaling. As a result, regular
`numpy.linalg.eig` can be used to retrieve TDHF roots in a reliable fashion without any issues related to the Davidson
procedure. Several variants of TDHF are available:
* `pyscf.tdscf.rhf_slow`: the molecular implementation;
* `pyscf.pbc.tdscf.rhf_slow`: PBC (periodic boundary condition) implementation for RHF objects of `pyscf.pbc.scf`
modules;
* `pyscf.pbc.tdscf.krhf_slow_supercell`: PBC implementation for KRHF objects of `pyscf.pbc.scf` modules. Works with
an arbitrary number of k-points but has a overhead due to an effective construction of a supercell.
* `pyscf.pbc.tdscf.krhf_slow_gamma`: A Gamma-point calculation resembling the original `pyscf.pbc.tdscf.krhf`
module. Despite its name, it accepts KRHF objects with an arbitrary number of k-points but finds only few TDHF roots
corresponding to collective oscillations without momentum transfer;
* (this module) `pyscf.pbc.tdscf.krhf_slow`: PBC implementation for KRHF objects of `pyscf.pbc.scf` modules. Works with
an arbitrary number of k-points and employs k-point conservation (diagonalizes matrix blocks separately).
"""
from pyscf.pbc.tdscf import krhf_slow_supercell as td
from pyscf.tdscf import rhf_slow
from pyscf.tdscf.common_slow import mknj2i
import numpy
# Convention for these modules:
# * PhysERI, PhysERI4, PhysERI8 are 2-electron integral routines computed directly (for debug purposes), with a 4-fold
# symmetry and with an 8-fold symmetry
# * vector_to_amplitudes reshapes and normalizes the solution
# * TDRHF provides a container
class PhysERI(td.PhysERI):
primary_driver = "full"
def __init__(self, model, frozen=None):
"""
The TDHF ERI implementation performing a full transformation of integrals to Bloch functions. No symmetries are
employed in this class. The ERIs are returned in blocks of k-points.
Args:
model (KRHF): the base model;
frozen (int, Iterable): the number of frozen valence orbitals or the list of frozen orbitals for all
k-points or multiple lists of frozen orbitals for each k-point;
"""
super(PhysERI, self).__init__(model, frozen=frozen)
def get_k_ix(self, item, like):
"""
Retrieves block indexes: row and column.
Args:
item (str): a string of 'mknj' letters;
like (tuple): a 2-tuple with sample pair of k-points;
Returns:
Row and column indexes of a sub-block with conserving momentum.
"""
item_i = numpy.argsort(mknj2i(item))
item_code = ''.join("++--"[i] for i in item_i)
if item_code[0] == item_code[1]:
kc = self.kconserv # ++-- --++
elif item_code[0] == item_code[2]:
kc = self.kconserv.swapaxes(1, 2) # +-+- -+-+
elif item_code[1] == item_code[2]:
kc = self.kconserv.transpose(2, 0, 1) # +--+ -++-
else:
raise RuntimeError("Unknown case: {}".format(item_code))
y = kc[like]
x = kc[0, y[0]]
return x, y
def tdhf_diag(self, block):
"""
Retrieves the merged diagonal block only with specific pairs of k-indexes (k, block[k]).
Args:
block (Iterable): a k-point pair `k2 = pair[k1]` for each k1;
Returns:
The diagonal block.
"""
return super(PhysERI, self).tdhf_diag(pairs=enumerate(block))
def eri_mknj(self, item, pair_row, pair_column):
"""
Retrieves the merged ERI block using 'mknj' notation with pairs of k-indexes (k1, k1, k2, k2).
Args:
item (str): a 4-character string of 'mknj' letters;
pair_row (Iterable): a k-point pair `k2 = pair_row[k1]` for each k1 (row indexes in the final matrix);
pair_column (Iterable): a k-point pair `k4 = pair_row[k3]` for each k3 (column indexes in the final matrix);
Returns:
The corresponding block of ERI (phys notation).
"""
return super(PhysERI, self).eri_mknj(
item,
pairs_row=enumerate(pair_row),
pairs_column=enumerate(pair_column),
)
def tdhf_primary_form(self, k):
"""
A primary form of TDHF matrixes (full).
Args:
k (tuple, int): momentum transfer: either a pair of k-point indexes specifying the momentum transfer
vector or a single integer with the second index assuming the first index being zero;
Returns:
Output type: "full", and the corresponding matrix.
"""
r1, r2, c1, c2 = get_block_k_ix(self, k)
d1 = self.tdhf_diag(r1)
d2 = self.tdhf_diag(r2)
a = d1 + 2 * self["knmj", r1, c1] - self["knjm", r1, c1]
b = 2 * self["kjmn", r1, c2] - self["kjnm", r1, c2]
a_ = d2 + 2 * self["mjkn", r2, c2] - self["mjnk", r2, c2]
b_ = 2 * self["mnkj", r2, c1] - self["mnjk", r2, c1]
return "full", numpy.block([[a, b], [-b_, -a_]])
class PhysERI4(PhysERI):
def __init__(self, model, frozen=None):
"""
The TDHF ERI implementation performing partial transformations of integrals to Bloch functions. A 4-fold
symmetry of complex-valued functions is employed in this class. The ERIs are returned in blocks of k-points.
Args:
model (KRHF): the base model;
frozen (int, Iterable): the number of frozen valence orbitals or the list of frozen orbitals for all
k-points or multiple lists of frozen orbitals for each k-point;
"""
td.PhysERI4.__init__.im_func(self, model, frozen=frozen)
symmetries = [
((0, 1, 2, 3), False),
((1, 0, 3, 2), False),
((2, 3, 0, 1), True),
((3, 2, 1, 0), True),
]
def __calc_block__(self, item, k):
if self.kconserv[k[:3]] == k[3]:
return td.PhysERI4.__calc_block__.im_func(self, item, k)
else:
raise ValueError("K is not conserved: {}, expected {}".format(
repr(k),
k[:3] + (self.kconserv[k[:3]],),
))
class PhysERI8(PhysERI4):
def __init__(self, model, frozen=None):
"""
The TDHF ERI implementation performing partial transformations of integrals to Bloch functions. An 8-fold
symmetry of real-valued functions is employed in this class. The ERIs are returned in blocks of k-points.
Args:
model (KRHF): the base model;
frozen (int, Iterable): the number of frozen valence orbitals or the list of frozen orbitals for all
k-points or multiple lists of frozen orbitals for each k-point;
"""
super(PhysERI8, self).__init__(model, frozen=frozen)
symmetries = [
((0, 1, 2, 3), False),
((1, 0, 3, 2), False),
((2, 3, 0, 1), False),
((3, 2, 1, 0), False),
((2, 1, 0, 3), False),
((3, 0, 1, 2), False),
((0, 3, 2, 1), False),
((1, 2, 3, 0), False),
]
def get_block_k_ix(eri, k):
"""
Retrieves k indexes of the block with a specific momentum transfer.
Args:
eri (TDDFTMatrixBlocks): ERI of the problem;
k (tuple, int): momentum transfer: either a pair of k-point indexes specifying the momentum transfer
vector or a single integer with the second index assuming the first index being zero;
Returns:
4 arrays: r1, r2, c1, c2 specifying k-indexes of the ERI matrix block.
+-----------------+-------------+-------------+-----+-----------------+-------------+-------------+-----+-----------------+
| | k34=0,c1[0] | k34=1,c1[1] | ... | k34=nk-1,c1[-1] | k34=0,c2[0] | k34=1,c2[1] | ... | k34=nk-1,c2[-1] |
+-----------------+-------------+-------------+-----+-----------------+-------------+-------------+-----+-----------------+
| k12=0,r1[0] | | |
+-----------------+ | |
| k12=1,r1[1] | | |
+-----------------+ Block r1, c1 | Block r1, c2 |
| ... | | |
+-----------------+ | |
| k12=nk-1,r1[-1] | | |
+-----------------+---------------------------------------------------+---------------------------------------------------+
| k12=0,r2[0] | | |
+-----------------+ | |
| k12=1,r2[1] | | |
+-----------------+ Block r2, c1 | Block r2, c2 |
| ... | | |
+-----------------+ | |
| k12=nk-1,r2[-1] | | |
+-----------------+---------------------------------------------------+---------------------------------------------------+
"""
# All checks here are for debugging purposes
if isinstance(k, int):
k = (0, k)
r1, c1 = eri.get_k_ix("knmj", k)
assert r1[k[0]] == k[1]
# knmj and kjmn share row indexes
_, c2 = eri.get_k_ix("kjmn", (0, r1[0]))
assert abs(r1 - _).max() == 0
# knmj and mnkj share column indexes
_, r2 = eri.get_k_ix("mnkj", (0, c1[0]))
assert abs(c1 - _).max() == 0
_r, _c = eri.get_k_ix("mjkn", (0, r2[0]))
assert abs(r2 - _r).max() == 0
assert abs(c2 - _c).max() == 0
_c, _r = eri.get_k_ix("mjkn", (0, c2[0]))
assert abs(r2 - _r).max() == 0
assert abs(c2 - _c).max() == 0
assert abs(r1 - c1).max() == 0
assert abs(r2 - c2).max() == 0
assert abs(r1[r2] - numpy.arange(len(r1))).max() == 0
# The output is, basically, r1, argsort(r1), r1, argsort(r1)
return r1, r2, c1, c2
def vector_to_amplitudes(vectors, nocc, nmo):
"""
Transforms (reshapes) and normalizes vectors into amplitudes.
Args:
vectors (numpy.ndarray): raw eigenvectors to transform;
nocc (tuple): numbers of occupied orbitals;
nmo (int): the total number of orbitals per k-point;
Returns:
Amplitudes with the following shape: (# of roots, 2 (x or y), # of kpts, # of occupied orbitals,
# of virtual orbitals).
"""
if not all(i == nocc[0] for i in nocc):
raise NotImplementedError("Varying occupation numbers are not implemented yet")
nk = len(nocc)
nocc = nocc[0]
if not all(i == nmo[0] for i in nmo):
raise NotImplementedError("Varying AO spaces are not implemented yet")
nmo = nmo[0]
vectors = numpy.asanyarray(vectors)
vectors = vectors.reshape(2, nk, nocc, nmo-nocc, vectors.shape[1])
norm = (abs(vectors) ** 2).sum(axis=(1, 2, 3))
norm = 2 * (norm[0] - norm[1])
vectors /= norm ** .5
return vectors.transpose(4, 0, 1, 2, 3)
class TDRHF(rhf_slow.TDRHF):
eri4 = PhysERI4
eri8 = PhysERI8
v2a = staticmethod(vector_to_amplitudes)
def __init__(self, mf, frozen=None):
"""
Performs TDHF calculation. Roots and eigenvectors are stored in `self.e`, `self.xy`.
Args:
mf (RHF): the base restricted Hartree-Fock model;
frozen (int, Iterable): the number of frozen valence orbitals or the list of frozen orbitals for all
k-points or multiple lists of frozen orbitals for each k-point;
"""
super(TDRHF, self).__init__(mf, frozen=frozen)
self.e = {}
self.xy = {}
def kernel(self, k=None):
"""
Calculates eigenstates and eigenvalues of the TDHF problem.
Args:
k (tuple, int): momentum transfer: either an index specifying the momentum transfer or a list of such
indexes;
Returns:
Positive eigenvalues and eigenvectors.
"""
if k is None:
k = numpy.arange(len(self._scf.kpts))
if isinstance(k, int):
k = [k]
for kk in k:
self.e[kk], self.xy[kk] = self.__kernel__(k=kk)
return self.e, self.xy
| {
"repo_name": "gkc1000/pyscf",
"path": "pyscf/pbc/tdscf/krhf_slow.py",
"copies": "1",
"size": "13478",
"license": "apache-2.0",
"hash": 8428601316981648000,
"line_mean": 44.0769230769,
"line_max": 131,
"alpha_frac": 0.5009645348,
"autogenerated": false,
"ratio": 3.699698051056821,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47006625858568213,
"avg_score": null,
"num_lines": null
} |
"""
This and other `_slow` modules implement the time-dependent procedure. The primary performance drawback is
that, unlike other 'fast' routines with an implicit construction of the eigenvalue problem, these modules construct
TDHF matrices explicitly. As a result, regular `numpy.linalg.eig` can be used to retrieve TDHF roots in a reliable
fashion without any issues related to the Davidson procedure.
This is a helper module defining basic interfaces.
"""
import sys
from pyscf.lib import logger
from pyscf.pbc.tools import get_kconserv
import numpy
from scipy.linalg import solve
from itertools import count, groupby
if sys.version_info >= (3,):
unicode = str
def msize(m):
"""
Checks whether the matrix is square and returns its size.
Args:
m (numpy.ndarray): the matrix to measure;
Returns:
An integer with the size.
"""
s = m.shape[0]
if m.shape != (s, s):
raise ValueError("Do not recognize the shape (must be a square matrix): {}".format(m.shape))
return s
def full2ab(full, tolerance=1e-12):
"""
Transforms a full TD matrix into A and B parts.
Args:
full (numpy.ndarray): the full TD matrix;
tolerance (float): a tolerance for checking whether the full matrix is in the ABBA-form;
Returns:
A and B submatrices.
"""
s = msize(full)
if s % 2 != 0:
raise ValueError("Not an even matrix size: {:d}".format(s))
s2 = s // 2
a, b = full[:s2, :s2], full[:s2, s2:]
b_, a_ = full[s2:, :s2].conj(), full[s2:, s2:].conj()
delta = max(abs(a + a_).max(), abs(b + b_).max())
if delta > tolerance:
raise ValueError("The full matrix is not in the ABBA-form, delta: {:.3e}".format(delta))
return full[:s2, :s2], full[:s2, s2:]
def ab2full(a, b):
"""
Transforms A and B TD matrices into a full matrix.
Args:
a (numpy.ndarray): TD A-matrix;
b (numpy.ndarray): TD B-matrix;
Returns:
The full TD matrix.
"""
sa = msize(a)
sb = msize(b)
if sa != sb:
raise ValueError("Input matrix dimensions do not match: {:d} vs {:d}".format(sa, sb))
return numpy.block([[a, b], [-b.conj(), -a.conj()]])
def ab2mkk(a, b, tolerance=1e-12):
"""
Transforms A and B TD matrices into MK and K matrices.
Args:
a (numpy.ndarray): TD A-matrix;
b (numpy.ndarray): TD B-matrix;
tolerance (float): a tolerance for checking whether the input matrices are real;
Returns:
MK and K submatrices.
"""
if max(abs(a.imag).max(), abs(b.imag).max()) > tolerance:
raise ValueError("A- and/or B-matrixes are complex-valued: no transform is possible")
a, b = a.real, b.real
tdhf_k, tdhf_m = a - b, a + b
tdhf_mk = tdhf_m.dot(tdhf_k)
return tdhf_mk, tdhf_k
def mkk2ab(mk, k):
"""
Transforms MK and M TD matrices into A and B matrices.
Args:
mk (numpy.ndarray): TD MK-matrix;
k (numpy.ndarray): TD K-matrix;
Returns:
A and B submatrices.
"""
if numpy.iscomplexobj(mk) or numpy.iscomplexobj(k):
raise ValueError("MK- and/or K-matrixes are complex-valued: no transform is possible")
m = solve(k.T, mk.T).T
a = 0.5 * (m + k)
b = 0.5 * (m - k)
return a, b
def full2mkk(full):
"""
Transforms a full TD matrix into MK and K parts.
Args:
full (numpy.ndarray): the full TD matrix;
Returns:
MK and K submatrices.
"""
return ab2mkk(*full2ab(full))
def mkk2full(mk, k):
"""
Transforms MK and M TD matrices into a full TD matrix.
Args:
mk (numpy.ndarray): TD MK-matrix;
k (numpy.ndarray): TD K-matrix;
Returns:
The full TD matrix.
"""
return ab2full(*mkk2ab(mk, k))
class TDMatrixBlocks(object):
def tdhf_primary_form(self, *args, **kwargs):
"""
A primary form of TDHF matrixes.
Returns:
Output type: "full", "ab", or "mk" and the corresponding matrix(es).
"""
raise NotImplementedError
@staticmethod
def __check_primary_form__(m):
if not isinstance(m, tuple):
raise ValueError("The value returned by `tdhf_primary_form` is not a tuple")
if len(m) < 1:
raise ValueError("Empty tuple returned by `tdhf_primary_form`")
if not isinstance(m[0], (str, unicode)):
raise ValueError("The first item returned by `tdhf_primary_form` must be a string")
forms = dict(ab=3, mk=3, full=2)
if m[0] in forms:
if len(m) != forms[m[0]]:
raise ValueError("The {} form returned by `tdhf_primary_form` must contain {:d} values".format(
m[0].upper(), forms[m[0]],
))
else:
raise ValueError("Unknown form specification returned by `tdhf_primary_form`: {}".format(m[0]))
def tdhf_ab_form(self, *args, **kwargs):
"""
The A-B form of the TD problem.
Returns:
A and B TD matrices.
"""
m = self.tdhf_primary_form(*args, **kwargs)
self.__check_primary_form__(m)
if m[0] == "ab":
return m[1:]
elif m[0] == "full":
return full2ab(m[1])
elif m[0] == "mk":
return mkk2ab(*m[1:])
def tdhf_full_form(self, *args, **kwargs):
"""
The full form of the TD problem.
Returns:
The full TD matrix.
"""
m = self.tdhf_primary_form(*args, **kwargs)
self.__check_primary_form__(m)
if m[0] == "ab":
return ab2full(*m[1:])
elif m[0] == "full":
return m[1]
elif m[0] == "mk":
return mkk2full(*m[1:])
def tdhf_mk_form(self, *args, **kwargs):
"""
The MK form of the TD problem.
Returns:
MK and K TD matrixes.
"""
m = self.tdhf_primary_form(*args, **kwargs)
self.__check_primary_form__(m)
if m[0] == "ab":
return ab2mkk(*m[1:])
elif m[0] == "full":
return full2mkk(m[1])
elif m[0] == "mk":
return m[1:]
def mknj2i(item):
"""
Transforms "mknj" notation into tensor index order for the ERI.
Args:
item (str): an arbitrary transpose of "mknj" letters;
Returns:
4 indexes.
"""
notation = "mknj"
notation = dict(zip(notation, range(len(notation))))
return tuple(notation[i] for i in item)
class TDERIMatrixBlocks(TDMatrixBlocks):
symmetries = [
((0, 1, 2, 3), False),
]
def __init__(self):
"""
This a prototype class for TD calculations based on ERI (TD-HF). It handles integral blocks and
the diagonal part, see Eq. 7.5 of RevModPhys.36.844.
"""
# Caching
self.__eri__ = {}
def __get_mo_energies__(self, *args, **kwargs):
"""This routine collects occupied and virtual MO energies."""
raise NotImplementedError
def __calc_block__(self, item, *args):
raise NotImplementedError
def tdhf_diag(self, *args):
"""
Retrieves the diagonal block.
Args:
*args: args passed to `__get_mo_energies__`;
Returns:
The diagonal block.
"""
e_occ, e_virt = self.__get_mo_energies__(*args)
diag = (- e_occ[:, numpy.newaxis] + e_virt[numpy.newaxis, :]).reshape(-1)
return numpy.diag(diag).reshape((len(e_occ) * len(e_virt), len(e_occ) * len(e_virt)))
def eri_ov(self, item, *args):
"""
Retrieves ERI block using 'ov' notation.
Args:
item (str): a 4-character string of 'o' and 'v' letters;
*args: other args passed to `__calc_block__`;
Returns:
The corresponding block of ERI (4-tensor, phys notation).
"""
if len(item) != 4 or not isinstance(item, str) or not set(item).issubset('ov'):
raise ValueError("Unknown item: {}".format(repr(item)))
args = (tuple(item), ) + args
if args in self.__eri__:
return self.__eri__[args]
result = self.__calc_block__(*args)
for permutation, conjugation in self.symmetries:
permuted_args = tuple(
tuple(arg[_i] for _i in permutation)
for arg in args
)
if conjugation:
self.__eri__[permuted_args] = result.transpose(*permutation).conj()
else:
self.__eri__[permuted_args] = result.transpose(*permutation)
return result
def eri_mknj(self, item, *args):
"""
Retrieves ERI block using 'mknj' notation.
Args:
item (str): a 4-character string of 'mknj' letters;
*args: other arguments passed to `get_block_ov_notation`;
Returns:
The corresponding block of ERI (matrix with paired dimensions).
"""
if len(item) != 4 or not isinstance(item, str) or set(item) != set('mknj'):
raise ValueError("Unknown item: {}".format(repr(item)))
item = mknj2i(item)
n_ov = ''.join('o' if i % 2 == 0 else 'v' for i in item)
args = tuple(
tuple(arg[i] for i in item)
for arg in args
)
result = self.eri_ov(n_ov, *args).transpose(*numpy.argsort(item))
i, j, k, l = result.shape
result = result.reshape((i * j, k * l))
return result
def __getitem__(self, item):
if isinstance(item, str):
spec, args = item, tuple()
else:
spec, args = item[0], item[1:]
if set(spec) == set("mknj"):
return self.eri_mknj(spec, *args)
elif set(spec).issubset("ov"):
return self.eri_ov(spec, *args)
else:
raise ValueError("Unknown item: {}".format(repr(item)))
def tdhf_primary_form(self, *args, **kwargs):
"""
A primary form of TDHF matrixes (AB).
Returns:
Output type: "ab", and the corresponding matrixes.
"""
d = self.tdhf_diag(*args, **kwargs)
a = d + 2 * self["knmj"] - self["knjm"]
b = 2 * self["kjmn"] - self["kjnm"]
return "ab", a, b
class TDProxyMatrixBlocks(TDMatrixBlocks):
def __init__(self, model):
"""
This a prototype class for TD calculations based on proxying pyscf classes such as TDDFT. It is a work-around
class. It accepts a `pyscf.tdscf.*` class and uses its matvec to construct a full-sized TD matrix.
Args:
model: a pyscf base model to extract TD matrix from;
"""
super(TDProxyMatrixBlocks, self).__init__()
self.proxy_model = model
self.proxy_vind, self.proxy_diag = self.proxy_model.gen_vind(self.proxy_model._scf)
self.proxy_vind = VindTracker(self.proxy_vind)
def tdhf_primary_form(self, *args, **kwargs):
raise NotImplementedError
def format_frozen_mol(frozen, nmo):
"""
Formats the argument into a mask array of bools where False values correspond to frozen molecular orbitals.
Args:
frozen (int, Iterable): the number of frozen valence orbitals or the list of frozen orbitals;
nmo (int): the total number of molecular orbitals;
Returns:
The mask array.
"""
space = numpy.ones(nmo, dtype=bool)
if frozen is None:
pass
elif isinstance(frozen, int):
space[:frozen] = False
elif isinstance(frozen, (tuple, list, numpy.ndarray)):
space[frozen] = False
else:
raise ValueError("Cannot recognize the 'frozen' argument: expected None, int or Iterable")
return space
class MolecularMFMixin(object):
def __init__(self, model, frozen=None):
"""
A mixin to support custom slices of mean-field attributes: `mo_coeff`, `mo_energy`, ...
Molecular version. Also supports single k-point inputs.
Args:
model: the base model;
frozen (int, Iterable): the number of frozen valence orbitals or the list of frozen orbitals;
"""
self.__is_k__ = False
if "kpts" in dir(model):
self.__is_k__ = True
if len(model.kpts) != 1:
raise ValueError("Only a single k-point supported, found: model.kpts = {}".format(model.kpts))
self.model = model
self.space = format_frozen_mol(frozen, len(self.squeeze(model.mo_energy)))
def squeeze(self, x):
"""Squeezes quantities in the case of a PBC model."""
return x[0] if self.__is_k__ else x
@property
def mo_coeff(self):
"""MO coefficients."""
return self.squeeze(self.model.mo_coeff)[:, self.space]
@property
def mo_energy(self):
"""MO energies."""
return self.squeeze(self.model.mo_energy)[self.space]
@property
def mo_occ(self):
"""MO occupation numbers."""
return self.squeeze(self.model.mo_occ)[self.space]
@property
def nocc(self):
"""The number of occupied orbitals."""
return int(self.squeeze(self.model.mo_occ)[self.space].sum() // 2)
@property
def nmo(self):
"""The total number of molecular orbitals."""
return self.space.sum()
@property
def mo_coeff_full(self):
"""MO coefficients."""
return self.squeeze(self.model.mo_coeff)
@property
def nocc_full(self):
"""The true (including frozen degrees of freedom) number of occupied orbitals."""
return int(self.squeeze(self.model.mo_occ).sum() // 2)
@property
def nmo_full(self):
"""The true (including frozen degrees of freedom) total number of molecular orbitals."""
return len(self.space)
def format_frozen_k(frozen, nmo, nk):
"""
Formats the argument into a mask array of bools where False values correspond to frozen orbitals for each k-point.
Args:
frozen (int, Iterable): the number of frozen valence orbitals or the list of frozen orbitals for all k-points or
multiple lists of frozen orbitals for each k-point;
nmo (int): the total number of molecular orbitals;
nk (int): the total number of k-points;
Returns:
The mask array.
"""
space = numpy.ones((nk, nmo), dtype=bool)
if frozen is None:
pass
elif isinstance(frozen, int):
space[:, :frozen] = False
elif isinstance(frozen, (tuple, list, numpy.ndarray)):
if len(frozen) > 0:
if isinstance(frozen[0], int):
space[:, frozen] = False
else:
for i in range(nk):
space[i, frozen[i]] = False
else:
raise ValueError("Cannot recognize the 'frozen' argument: expected None, int or Iterable")
return space
def k_nocc(model):
"""
Retrieves occupation numbers.
Args:
model (RHF): the model;
Returns:
Numbers of occupied orbitals in the model.
"""
return tuple(int(i.sum() // 2) for i in model.mo_occ)
def k_nmo(model):
"""
Retrieves number of AOs per k-point.
Args:
model (RHF): the model;
Returns:
Numbers of AOs in the model.
"""
return tuple(i.shape[1] for i in model.mo_coeff)
class PeriodicMFMixin(object):
def __init__(self, model, frozen=None):
"""
A mixin to support custom slices of mean-field attributes: `mo_coeff`, `mo_energy`, ...
PBC version.
Args:
model: the base model;
frozen (int, Iterable): the number of frozen valence orbitals or the list of frozen orbitals;
"""
self.model = model
self.space = format_frozen_k(frozen, len(model.mo_energy[0]), len(model.kpts))
self.kconserv = get_kconserv(self.model.cell, self.model.kpts).swapaxes(1, 2)
@property
def mo_coeff(self):
"""MO coefficients."""
return tuple(i[:, j] for i, j in zip(self.model.mo_coeff, self.space))
@property
def mo_energy(self):
"""MO energies."""
return tuple(i[j] for i, j in zip(self.model.mo_energy, self.space))
@property
def mo_occ(self):
"""MO occupation numbers."""
return tuple(i[j] for i, j in zip(self.model.mo_occ, self.space))
@property
def nocc(self):
"""The number of occupied orbitals."""
return k_nocc(self)
@property
def nmo(self):
"""The total number of molecular orbitals."""
return k_nmo(self)
@property
def mo_coeff_full(self):
"""MO coefficients."""
return self.model.mo_coeff
@property
def nocc_full(self):
"""The true (including frozen degrees of freedom) number of occupied orbitals."""
return k_nocc(self.model)
@property
def nmo_full(self):
"""The true (including frozen degrees of freedom) total number of molecular orbitals."""
return k_nmo(self.model)
class VindTracker(object):
def __init__(self, vind):
"""
Tracks calls to `vind` (a matrix-vector multiplication density response routine).
Args:
vind (Callable): a matvec product routine;
"""
self.vind = vind
self.args = self.results = self.errors = None
self.reset()
def reset(self):
"""
Resets statistics.
"""
self.args = []
self.results = []
self.errors = []
def __call__(self, v):
if not isinstance(v, numpy.ndarray):
raise ValueError("The input is not an array")
self.args.append(v.shape)
try:
r = self.vind(v)
except Exception as e:
self.results.append(None)
self.errors.append(e)
raise
r = numpy.array(r)
self.results.append(r.shape)
self.errors.append(None)
return r
def __iter__(self):
for i, o, e in zip(self.args, self.results, self.errors):
yield i, o, e
@property
def ncalls(self):
return len(self.args)
@property
def msize(self):
for i in self.results:
if i is not None:
return i[1]
return None
@property
def elements_total(self):
return self.msize ** 2
@property
def elements_calc(self):
return sum(map(lambda i: i[0] * i[1] if i is not None else 0, self.results))
@property
def ratio(self):
return 1.0 * self.elements_calc / self.elements_total
def text_stats(self):
return "--------------------\nVind call statistics\n--------------------\n" \
" calls: {total_calls:d}\n" \
" elements total: {total_elems:d} ({size})\n" \
" elements calculated: {total_calc:d}\n" \
" ratio: {ratio:.3f}".format(
total_calls=self.ncalls,
total_elems=self.elements_total,
size="x".join((str(self.msize),) * 2),
total_calc=self.elements_calc,
ratio=self.ratio,
)
def eig(m, driver=None, nroots=None, half=True):
"""
Eigenvalue problem solver.
Args:
m (numpy.ndarray): the matrix to diagonalize;
driver (str): one of the drivers;
nroots (int): the number of roots ot calculate (ignored for `driver` == 'eig');
half (bool): if True, implies spectrum symmetry and takes only a half of eigenvalues;
Returns:
"""
if driver is None:
driver = 'eig'
if driver == 'eig':
vals, vecs = numpy.linalg.eig(m)
order = numpy.argsort(vals)
vals, vecs = vals[order], vecs[:, order]
if half:
vals, vecs = vals[len(vals) // 2:], vecs[:, vecs.shape[1] // 2:]
vecs = vecs[:, ]
vals, vecs = vals[:nroots], vecs[:, :nroots]
else:
raise ValueError("Unknown driver: {}".format(driver))
return vals, vecs
def kernel(eri, driver=None, fast=True, nroots=None, **kwargs):
"""
Calculates eigenstates and eigenvalues of the TDHF problem.
Args:
eri (TDDFTMatrixBlocks): ERI;
driver (str): one of the eigenvalue problem drivers;
fast (bool): whether to run diagonalization on smaller matrixes;
nroots (int): the number of roots to calculate;
**kwargs: arguments to `eri.tdhf_matrix`;
Returns:
Positive eigenvalues and eigenvectors.
"""
if not isinstance(eri, TDMatrixBlocks):
raise ValueError("The argument must be ERI object")
if fast:
logger.debug1(eri.model, "Preparing TDHF matrix (fast) ...")
tdhf_mk, tdhf_k = eri.tdhf_mk_form(**kwargs)
logger.debug1(eri.model, "Diagonalizing a {} matrix with {} ...".format(
'x'.join(map(str, tdhf_mk.shape)),
"'{}'".format(driver) if driver is not None else "a default method",
))
vals, vecs_x = eig(tdhf_mk, driver=driver, nroots=nroots, half=False)
vals = vals ** .5
vecs_y = (1. / vals)[numpy.newaxis, :] * tdhf_k.dot(vecs_x)
vecs_u, vecs_v = vecs_y + vecs_x, vecs_y - vecs_x
return vals, numpy.concatenate((vecs_u, vecs_v), axis=0)
else:
logger.debug1(eri.model, "Preparing TDHF matrix ...")
m = eri.tdhf_full_form(**kwargs)
logger.debug1(eri.model, "Diagonalizing a {} matrix with {} ...".format(
'x'.join(map(str, m.shape)),
"'{}'".format(driver) if driver is not None else "a default method",
))
return eig(m, driver=driver, nroots=nroots)
class TDBase(object):
v2a = None
def __init__(self, mf, frozen=None):
"""
Performs TD calculation. Roots and eigenvectors are stored in `self.e`, `self.xy`.
Args:
mf: the mean-field model;
frozen (int, Iterable): the number of frozen valence orbitals or the list of frozen orbitals;
"""
self._scf = mf
self.driver = None
self.nroots = None
self.eri = None
self.xy = None
self.e = None
self.frozen = frozen
self.fast = not numpy.iscomplexobj(numpy.asanyarray(mf.mo_coeff))
def __kernel__(self, **kwargs):
"""Silent implementation of kernel which does not change attributes."""
if self.eri is None:
self.eri = self.ao2mo()
e, xy = kernel(
self.eri,
driver=self.driver,
nroots=self.nroots,
fast=self.fast,
**kwargs
)
xy = self.vector_to_amplitudes(xy)
return e, xy
def kernel(self):
"""
Calculates eigenstates and eigenvalues of the TDHF problem.
Returns:
Positive eigenvalues and eigenvectors.
"""
self.e, self.xy = self.__kernel__()
return self.e, self.xy
def ao2mo(self):
"""
Picks ERI: either 4-fold or 8-fold symmetric.
Returns:
A suitable ERI.
"""
raise NotImplementedError
def vector_to_amplitudes(self, vectors):
"""
Transforms (reshapes) and normalizes vectors into amplitudes.
Args:
vectors (numpy.ndarray): raw eigenvectors to transform;
Returns:
Amplitudes with the following shape: (# of roots, 2 (x or y), # of occupied orbitals, # of virtual orbitals).
"""
return self.v2a(vectors, self.eri.nocc, self.eri.nmo)
def format_mask(x):
"""
Formats a mask into a readable string.
Args:
x (ndarray): an array with the mask;
Returns:
A readable string with the mask.
"""
x = numpy.asanyarray(x)
if len(x) == 0:
return "(empty)"
if x.dtype == bool:
x = numpy.argwhere(x)[:, 0]
grps = tuple(list(g) for _, g in groupby(x, lambda n, c=count(): n-next(c)))
return ",".join("{:d}-{:d}".format(i[0], i[-1]) if len(i) > 1 else "{:d}".format(i[0]) for i in grps)
| {
"repo_name": "gkc1000/pyscf",
"path": "pyscf/tdscf/common_slow.py",
"copies": "1",
"size": "24029",
"license": "apache-2.0",
"hash": 1366597248966445800,
"line_mean": 29.8064102564,
"line_max": 121,
"alpha_frac": 0.567647426,
"autogenerated": false,
"ratio": 3.6139269063016997,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46815743323017,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Artem Sliusar'
#suits idx
heart = 0
diamond = 1
club = 2
spade = 3
# ranks idx
Two = 0
Three = 1
Four = 2
Five = 3
Six = 4
Seven = 5
Eight = 6
Nine = 7
Ten = 8
Jack = 9
Queen = 10
King = 11
Ace = 12
# combinations idx
Royal_F = 9
Straight_F = 8
Four_of_K = 7
Full_House = 6
Flash = 5
Straight = 4
Three_of_a_Kind = 3
Two_pair = 2
One_pair = 1
convertDictRanks = {
"2":Two,
"3":Three,
"4":Four,
"5":Five,
"6":Six,
"7":Seven,
"8":Eight,
"9":Nine,
"10":Ten,
"J" :Jack,
"Q" :Queen,
"K" :King,
"A" :Ace
}
convertDictRanks = {
"2":Two,
"3":Three,
"4":Four,
"5":Five,
"6":Six,
"7":Seven,
"8":Eight,
"9":Nine,
"10":Ten,
"J" :Jack,
"Q" :Queen,
"K" :King,
"A" :Ace
}
converDictSuit = {
"hearts":heart,
"spades":spade,
"clubs":club,
"diamonds":diamond
}
def server_to_propobility_gen(card_list):
resList = []
for card in card_list:
a = tuple
a = (converDictSuit[card["suit"]],convertDictRanks[card["rank"]])
resList.append(a)
return resList
def server_to_table(hand):
""" IN: list of dicts
OUT: ATs ..."""
ranks = []
suits = []
for i in xrange(2):
rank = hand[i]["rank"]
suit = hand[i]["suit"]
if rank == "10":
rank = "T"
ranks.append(rank)
suits.append(suit)
hand_converted = ranks[0] + ranks[1]
if ranks[0] == ranks[1]:
return hand_converted
if suits[0] == suits[1]:
hand_converted += "s"
else:
hand_converted += "o"
return hand_converted
| {
"repo_name": "Yarmorgun/poker-player-kraken",
"path": "converters.py",
"copies": "1",
"size": "2421",
"license": "mit",
"hash": 1803984931747145500,
"line_mean": 20.8108108108,
"line_max": 73,
"alpha_frac": 0.3453118546,
"autogenerated": false,
"ratio": 3.513788098693759,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43590999532937585,
"avg_score": null,
"num_lines": null
} |
import os
from os.path import join
import matplotlib.pyplot as plt
from sacred import Experiment
from sacred.observers import FileStorageObserver
from modl.datasets.image import load_image
from modl.decomposition.image import ImageDictFact, DictionaryScorer
from modl.feature_extraction.image import LazyCleanPatchExtractor
from modl.plotting.image import plot_patches
from modl.utils.system import get_output_dir
exp = Experiment('decompose_images')
base_artifact_dir = join(get_output_dir(), 'decompose_images')
exp.observers.append(FileStorageObserver.create(basedir=base_artifact_dir))
@exp.config
def config():
batch_size = 400
learning_rate = 0.92
reduction = 10
alpha = 0.08
n_epochs = 12
n_components = 100
test_size = 4000
max_patches = 10000
patch_size = (32, 32)
n_threads = 2
verbose = 10
method = 'sgd'
step_size = 10
setting = 'dictionary learning'
source = 'lisboa'
gray = False
scale = 1
@exp.automain
def decompose_images(batch_size,
learning_rate,
reduction,
alpha,
n_epochs,
n_components,
test_size,
max_patches,
patch_size,
n_threads,
verbose,
method,
step_size,
setting,
source,
gray,
scale,
_run):
basedir = join(_run.observers[0].basedir, str(_run._id))
artifact_dir = join(basedir, 'artifacts')
if not os.path.exists(artifact_dir):
os.makedirs(artifact_dir)
print('Loading data')
image = load_image(source, scale=scale, gray=gray)
print('Done')
width, height, n_channel = image.shape
patch_extractor = LazyCleanPatchExtractor(patch_size=patch_size,
max_patches=test_size,
random_state=1)
test_data = patch_extractor.transform(image[:, :height // 2, :])
cb = DictionaryScorer(test_data, info=_run.info)
dict_fact = ImageDictFact(method=method,
setting=setting,
alpha=alpha,
step_size=step_size,
n_epochs=n_epochs,
random_state=1,
n_components=n_components,
learning_rate=learning_rate,
max_patches=max_patches,
batch_size=batch_size,
patch_size=patch_size,
reduction=reduction,
callback=cb,
verbose=verbose,
n_threads=n_threads,
)
dict_fact.fit(image[:, height // 2:, :])
fig = plt.figure()
patches = dict_fact.components_
plot_patches(fig, patches)
fig.suptitle('Dictionary')
fig.savefig(join(artifact_dir, 'dictionary.png'))
fig, ax = plt.subplots(1, 1)
ax.plot(cb.time, cb.score, marker='o')
ax.legend()
ax.set_xscale('log')
ax.set_xlabel('Time (s)')
ax.set_ylabel('Test objective value')
fig.savefig(join(artifact_dir, 'score.png'))
| {
"repo_name": "arthurmensch/modl",
"path": "exps/exp_decompose_images.py",
"copies": "1",
"size": "3480",
"license": "bsd-2-clause",
"hash": 1594253594662026800,
"line_mean": 32.1428571429,
"line_max": 75,
"alpha_frac": 0.5232758621,
"autogenerated": false,
"ratio": 4.306930693069307,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5330206555169308,
"avg_score": null,
"num_lines": null
} |
import warnings
from nilearn.input_data import NiftiMasker
warnings.filterwarnings("ignore", category=DeprecationWarning)
import os
from os.path import expanduser, join
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from joblib import Memory, dump
from joblib import Parallel, delayed
from sklearn.model_selection import train_test_split
from sklearn.utils import check_random_state
from modl.datasets import fetch_adhd
from modl.decomposition.fmri import fMRIDictFact
from modl.decomposition.stability import mean_amari_discrepency
from modl.plotting.fmri import display_maps
from nilearn.datasets import fetch_atlas_smith_2009
from modl.utils.system import get_cache_dirs
batch_size = 200
learning_rate = .92
method = 'masked'
step_size = 0.01
reduction_ = 8
alpha = 1e-3
n_epochs = 4
verbose = 15
n_jobs = 70
smoothing_fwhm = 6
components_list = [20, 40, 80, 120, 200, 300, 500]
n_runs = 20
dict_init = fetch_atlas_smith_2009().rsn20
dataset = fetch_adhd(n_subjects=40)
data = dataset.rest.values
train_data, test_data = train_test_split(data, test_size=2, random_state=0)
train_imgs, train_confounds = zip(*train_data)
test_imgs, test_confounds = zip(*test_data)
mask = dataset.mask
mem = Memory(location=get_cache_dirs()[0])
masker = NiftiMasker(mask_img=mask).fit()
def fit_single(train_imgs, test_imgs, n_components, random_state):
dict_fact = fMRIDictFact(smoothing_fwhm=smoothing_fwhm,
method=method,
step_size=step_size,
mask=mask,
memory=mem,
memory_level=2,
verbose=verbose,
n_epochs=n_epochs,
n_jobs=1,
random_state=random_state,
n_components=n_components,
positive=True,
learning_rate=learning_rate,
batch_size=batch_size,
reduction=reduction_,
alpha=alpha,
callback=None,
)
dict_fact.fit(train_imgs, confounds=train_confounds)
score = dict_fact.score(test_imgs)
return dict_fact.components_, score
def fit_many_runs(train_imgs, test_imgs, components_list, n_runs=10, n_jobs=1):
random_states = check_random_state(0).randint(0, int(1e7), size=n_runs)
cached_fit = mem.cache(fit_single)
res = Parallel(n_jobs=n_jobs)(delayed(cached_fit)(
train_imgs, test_imgs, n_components, random_state)
for n_components in components_list
for random_state in random_states
)
components, scores = zip(*res)
shape = (len(components_list), len(random_states))
components = np.array(components).reshape(shape).tolist()
scores = np.array(scores).reshape(shape).tolist()
discrepencies = []
var_discrepencies = []
best_components = []
for n_components, these_components, these_scores in zip(components_list,
components,
scores):
discrepency, var_discrepency = mean_amari_discrepency(
these_components)
best_estimator = these_components[np.argmin(these_scores)]
discrepencies.append(var_discrepency)
var_discrepencies.append(var_discrepency)
best_components.append(best_estimator)
discrepencies = np.array(discrepencies)
var_discrepencies = np.array(var_discrepencies)
best_components = np.array(best_components)
components = best_components[np.argmin(discrepencies)]
return discrepencies, var_discrepencies, components
output_dir = expanduser('~/output_drago4/modl/fmri_stability2')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
discrepencies, var_discrepencies, components = fit_many_runs(
train_imgs, test_imgs,
components_list,
n_jobs=n_jobs,
n_runs=n_runs)
components_img = masker.inverse_transform(components)
components_img.to_filename(
join(output_dir, 'components.nii.gz'))
dump((components_list, discrepencies, var_discrepencies),
join(output_dir, 'discrepencies.pkl'))
fig = plt.figure()
display_maps(fig, components_img)
plt.savefig(join(output_dir, 'components.pdf'))
fig, ax = plt.subplots(1, 1)
ax.fill_between(components_list, discrepencies - var_discrepencies,
discrepencies + var_discrepencies, alpha=0.5)
ax.plot(components_list, discrepencies, marker='o')
ax.set_xlabel('Number of components')
ax.set_ylabel('Mean Amari discrepency')
sns.despine(fig)
fig.suptitle('Stability selection using DL')
plt.savefig(join(output_dir, 'discrepencies.pdf'))
| {
"repo_name": "arthurmensch/modl",
"path": "examples/decompose_fmri_stability.py",
"copies": "1",
"size": "4960",
"license": "bsd-2-clause",
"hash": -6497502614826837000,
"line_mean": 34.1773049645,
"line_max": 79,
"alpha_frac": 0.633266129,
"autogenerated": false,
"ratio": 3.5053003533568905,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46385664823568906,
"avg_score": null,
"num_lines": null
} |
"""Author: Arthur Mensch
Benchmarks of sklearn SAGA vs lightning SAGA vs Liblinear. Shows the gain
in using multinomial logistic regression in term of learning time.
"""
import json
import time
from os.path import expanduser
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import fetch_rcv1, load_iris, load_digits, \
fetch_20newsgroups_vectorized
from sklearn.utils import delayed, Parallel, Memory
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import log_loss
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer, LabelEncoder
from sklearn.utils.extmath import safe_sparse_dot, softmax
def fit_single(solver, X, y, penalty='l2', single_target=True, C=1,
max_iter=10, skip_slow=False):
if skip_slow and solver == 'lightning' and penalty == 'l1':
print('skip_slowping l1 logistic regression with solver lightning.')
return
print('Solving %s logistic regression with penalty %s, solver %s.'
% ('binary' if single_target else 'multinomial',
penalty, solver))
if solver == 'lightning':
from lightning.classification import SAGAClassifier
if single_target or solver not in ['sag', 'saga']:
multi_class = 'ovr'
else:
multi_class = 'multinomial'
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42,
stratify=y)
n_samples = X_train.shape[0]
n_classes = np.unique(y_train).shape[0]
test_scores = [1]
train_scores = [1]
accuracies = [1 / n_classes]
times = [0]
if penalty == 'l2':
alpha = 1. / (C * n_samples)
beta = 0
lightning_penalty = None
else:
alpha = 0.
beta = 1. / (C * n_samples)
lightning_penalty = 'l1'
for this_max_iter in range(1, max_iter + 1, 2):
print('[%s, %s, %s] Max iter: %s' %
('binary' if single_target else 'multinomial',
penalty, solver, this_max_iter))
if solver == 'lightning':
lr = SAGAClassifier(loss='log', alpha=alpha, beta=beta,
penalty=lightning_penalty,
tol=-1, max_iter=this_max_iter)
else:
lr = LogisticRegression(solver=solver,
multi_class=multi_class,
C=C,
penalty=penalty,
fit_intercept=False, tol=1e-24,
max_iter=this_max_iter,
random_state=42,
)
t0 = time.clock()
lr.fit(X_train, y_train)
train_time = time.clock() - t0
scores = []
for (X, y) in [(X_train, y_train), (X_test, y_test)]:
try:
y_pred = lr.predict_proba(X)
except NotImplementedError:
# Lightning predict_proba is not implemented for n_classes > 2
y_pred = _predict_proba(lr, X)
score = log_loss(y, y_pred, normalize=False) / n_samples
score += (0.5 * alpha * np.sum(lr.coef_ ** 2) +
beta * np.sum(np.abs(lr.coef_)))
scores.append(score)
train_score, test_score = tuple(scores)
y_pred = lr.predict(X_test)
accuracy = np.sum(y_pred == y_test) / y_test.shape[0]
test_scores.append(test_score)
train_scores.append(train_score)
accuracies.append(accuracy)
times.append(train_time)
return lr, times, train_scores, test_scores, accuracies
def _predict_proba(lr, X):
pred = safe_sparse_dot(X, lr.coef_.T)
if hasattr(lr, "intercept_"):
pred += lr.intercept_
return softmax(pred)
def exp(solvers, penalties, single_target, n_samples=30000, max_iter=20,
dataset='rcv1', n_jobs=1, skip_slow=False):
mem = Memory(cachedir=expanduser('~/cache'), verbose=0)
if dataset == 'rcv1':
rcv1 = fetch_rcv1()
lbin = LabelBinarizer()
lbin.fit(rcv1.target_names)
X = rcv1.data
y = rcv1.target
y = lbin.inverse_transform(y)
le = LabelEncoder()
y = le.fit_transform(y)
if single_target:
y_n = y.copy()
y_n[y > 16] = 1
y_n[y <= 16] = 0
y = y_n
elif dataset == 'digits':
digits = load_digits()
X, y = digits.data, digits.target
if single_target:
y_n = y.copy()
y_n[y < 5] = 1
y_n[y >= 5] = 0
y = y_n
elif dataset == 'iris':
iris = load_iris()
X, y = iris.data, iris.target
elif dataset == '20newspaper':
ng = fetch_20newsgroups_vectorized()
X = ng.data
y = ng.target
if single_target:
y_n = y.copy()
y_n[y > 4] = 1
y_n[y <= 16] = 0
y = y_n
X = X[:n_samples]
y = y[:n_samples]
cached_fit = mem.cache(fit_single)
out = Parallel(n_jobs=n_jobs, mmap_mode=None)(
delayed(cached_fit)(solver, X, y,
penalty=penalty, single_target=single_target,
C=1, max_iter=max_iter, skip_slow=skip_slow)
for solver in solvers
for penalty in penalties)
res = []
idx = 0
for solver in solvers:
for penalty in penalties:
if not (skip_slow and solver == 'lightning' and penalty == 'l1'):
lr, times, train_scores, test_scores, accuracies = out[idx]
this_res = dict(solver=solver, penalty=penalty,
single_target=single_target,
times=times, train_scores=train_scores,
test_scores=test_scores,
accuracies=accuracies)
res.append(this_res)
idx += 1
with open('bench_saga.json', 'w+') as f:
json.dump(res, f)
def plot():
import pandas as pd
with open('bench_saga.json', 'r') as f:
f = json.load(f)
res = pd.DataFrame(f)
res.set_index(['single_target', 'penalty'], inplace=True)
grouped = res.groupby(level=['single_target', 'penalty'])
colors = {'saga': 'blue', 'liblinear': 'orange', 'lightning': 'green'}
for idx, group in grouped:
single_target, penalty = idx
fig = plt.figure(figsize=(12, 4))
ax = fig.add_subplot(131)
train_scores = group['train_scores'].values
ref = np.min(np.concatenate(train_scores)) * 0.999
for scores, times, solver in zip(group['train_scores'], group['times'],
group['solver']):
scores = scores / ref - 1
ax.plot(times, scores, label=solver, color=colors[solver])
ax.set_xlabel('Time (s)')
ax.set_ylabel('Training objective (relative to min)')
ax.set_yscale('log')
ax = fig.add_subplot(132)
test_scores = group['test_scores'].values
ref = np.min(np.concatenate(test_scores)) * 0.999
for scores, times, solver in zip(group['test_scores'], group['times'],
group['solver']):
scores = scores / ref - 1
ax.plot(times, scores, label=solver, color=colors[solver])
ax.set_xlabel('Time (s)')
ax.set_ylabel('Test objective (relative to min)')
ax.set_yscale('log')
ax = fig.add_subplot(133)
for accuracy, times, solver in zip(group['accuracies'], group['times'],
group['solver']):
ax.plot(times, accuracy, label=solver, color=colors[solver])
ax.set_xlabel('Time (s)')
ax.set_ylabel('Test accuracy')
ax.legend()
name = 'single_target' if single_target else 'multi_target'
name += '_%s' % penalty
plt.suptitle(name)
name += '.png'
fig.tight_layout()
fig.subplots_adjust(top=0.9)
plt.savefig(name)
plt.close(fig)
if __name__ == '__main__':
solvers = ['saga', 'liblinear', 'lightning']
penalties = ['l1', 'l2']
single_target = True
exp(solvers, penalties, single_target, n_samples=None, n_jobs=1,
dataset='20newspaper', max_iter=20)
plot()
| {
"repo_name": "vortex-ape/scikit-learn",
"path": "benchmarks/bench_saga.py",
"copies": "7",
"size": "8463",
"license": "bsd-3-clause",
"hash": -6474303244771855000,
"line_mean": 33.6844262295,
"line_max": 79,
"alpha_frac": 0.5358619875,
"autogenerated": false,
"ratio": 3.6588845654993514,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7694746552999351,
"avg_score": null,
"num_lines": null
} |
__author__ = 'arthur'
import os
import sys
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5.QtWebKitWidgets import *
from warehouse import WareHouse
class Window(QWidget):
def __init__(self):
super(Window, self).__init__()
self.setGeometry(300, 300, 1024, 800)
self.setWindowTitle('WAREHOUSE')
layout = QVBoxLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
self. web_view = QWebView(self)
layout.addWidget(self.web_view)
pyDir = os.path.abspath(os.path.dirname(__file__))
htmlUrl = QUrl.fromLocalFile(os.path.join(pyDir, "static/index.html"))
self.web_view.setUrl(htmlUrl)
self.web_view.urlChanged.connect(self.urlChanged)
def urlChanged(self):
self.warehouse = WareHouse()
self.web_view.page().mainFrame().addToJavaScriptWindowObject("warehouseObject", self.warehouse)
def main():
app = QApplication(sys.argv)
window = Window()
window.show()
app.exec_()
if __name__ == "__main__":
main()
| {
"repo_name": "Arthraim/warehouse",
"path": "app.py",
"copies": "1",
"size": "1058",
"license": "mit",
"hash": 6612306280471325000,
"line_mean": 23.6279069767,
"line_max": 103,
"alpha_frac": 0.640831758,
"autogenerated": false,
"ratio": 3.4688524590163934,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9531374447730891,
"avg_score": 0.01566195385710047,
"num_lines": 43
} |
__author__ = 'Arthur'
"""
This class is used as a data structure for passenger/driver.
"""
class Member(object):
name = None
address = None
coord = None
isDriver = None
psg_list = None
def __init__(self, name, addr, is_driver=False):
if not name or not addr :
print("Error: Member input must has name, address")
return None;
self.name = name
self.address = addr
self.isDriver = is_driver
if self.isDriver:
self.psg_list = list()
# getters
def get_name(self):
return self.name
def get_address(self):
return self.address
def get_coord(self):
if self.coord:
return self.coord
else:
print("Error: Coord not set yet")
return None
def isDriver(self):
return self.isDriver
def get_psg_number(self):
return len(self.psg_list)
# setter for coord
def set_coord(self, coord):
self.coord = coord
def set_address(self, addr):
if addr:
self.address = addr
def add_psg(self, member):
self.psg_list.append(member)
def show_member(self):
print("Member: " + self.name +
"; Address: " + self.address +
"- " + self.coord.get("lat") +
", " + self.coord.get("lng"))
if __name__ == '__main__':
arthur = Member("Arthur", "GTA")
arthur.show_member() | {
"repo_name": "GesusK/multi-driver_carpool",
"path": "MDC/Member.py",
"copies": "1",
"size": "1461",
"license": "mit",
"hash": 1967275530004801000,
"line_mean": 21.4923076923,
"line_max": 63,
"alpha_frac": 0.5386721424,
"autogenerated": false,
"ratio": 3.7270408163265305,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9757168464005566,
"avg_score": 0.001708898944193062,
"num_lines": 65
} |
__author__ = 'Arthur'
"""
This module is used to cluster the passengers into different group,
each of which is led by a driver.
"""
import googlemaps
from datetime import datetime
from APIkey import APIkey
import data_input
import constants
def psgr_cluster(dvr_list, psg_list, car_capacity):
myname = "psgr_cluster"
key = APIkey()
gmap = googlemaps.Client(key=key.get_key())
if len(dvr_list) * car_capacity < len(psg_list):
message("Error: Not enough driver, Failed to do optimization.")
return None
for psg in psg_list:
restrict_area(psg)
for dr in dvr_list:
restrict_area(dr)
for psg in psg_list:
best_dr = None
best_DandT = (0,0)
for dr in dvr_list:
if dr.get_psg_number() == car_capacity:
continue
tmp = gmap.directions(dr.get_address(), psg.get_address(), mode="driving",
departure_time=datetime.now(), avoid="tolls", units="metric")
if len(tmp) == 0:
message("Distance between " + dr.get_address() + " and " + psg.get_address() + " is not calculable")
return None
tmp_dis = tmp[0].get("legs")[0].get("distance").get("value")
tmp_time = tmp[0].get("legs")[0].get("duration_in_traffic").get("value")
if not best_dr:
best_dr = dr
best_DandT = (tmp_dis, tmp_time)
else:
if tmp_dis < best_DandT[0] or tmp_time < best_DandT[1]:
best_dr = dr
best_DandT = (tmp_dis, tmp_time)
best_dr.add_psg(psg);
def set_member_coord(member_list, gmap):
for member in member_list:
if not member.get_address():
coord = gmap.geocode(member.get_address())
coord = coord.get("geometry").get("bounds").get("northeast")
member.set_coord(coord)
else:
message("Member " + member.get_name() + " address is missing")
return
def restrict_area(member, region=constants.REGION, country=constants.COUNTRY):
import re
reg_match = re.compile("[a-zA-Z]")
no_region = False
no_country = False
origin_addr = member.get_address().lower()
region_index = origin_addr.rfind(region[1].lower())
region_str_len = len(region[1])
reg_match.match(origin_addr[region_index - 1])
if region_index > 0 and not reg_match.match(origin_addr[region_index - 1]):
if region_index + region_str_len == len(origin_addr):
member.set_address(origin_addr[:region_index] + region[0])
elif not reg_match.match(origin_addr[region_index + region_str_len]):
member.set_address(origin_addr[:region_index] + region[0] + origin_addr[region_index + region_str_len:])
else:
no_region = True
origin_addr = member.get_address().lower()
ctr_index = origin_addr.rfind(country[1].lower())
ctr_str_len = len(country[1])
if ctr_index > 0 and not reg_match.match(origin_addr[ctr_index - 1]):
if ctr_index + ctr_str_len == len(origin_addr):
member.set_address(origin_addr[:ctr_index]+ country[0])
elif not reg_match.match(origin_addr[ctr_index + ctr_str_len]):
member.set_address(origin_addr[:ctr_index]+ country[0] + origin_addr[ctr_index + ctr_str_len])
else:
no_country = True
if no_region:
member.set_address(origin_addr + ", " + region[0])
if no_country:
member.set_address(member.get_address() + ", " + country[0])
def message(msg, func_name="default"):
print("Error - " + func_name +
": " + msg)
if __name__ == '__main__':
[d_list, p_list] = data_input.read_from_csv("test2.csv")
psgr_cluster(d_list, p_list, 4)
print("End")
| {
"repo_name": "GesusK/multi-driver_carpool",
"path": "MDC/psgr_cluster.py",
"copies": "1",
"size": "3805",
"license": "mit",
"hash": -2952060516911233500,
"line_mean": 31.5213675214,
"line_max": 116,
"alpha_frac": 0.5844940867,
"autogenerated": false,
"ratio": 3.343585237258348,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9419557559902931,
"avg_score": 0.0017043528110833595,
"num_lines": 117
} |
__author__ = 'arthurvandermerwe'
AUTH_CODE_MAP = {}
AUTH_CODE_MAP["000"] = "Transaction Approved"
AUTH_CODE_MAP["001"] = "Expired Card"
AUTH_CODE_MAP["002"] = "Unauthorized Usage"
AUTH_CODE_MAP["003"] = "Pin Error"
AUTH_CODE_MAP["004"] = "Invalid Pin"
AUTH_CODE_MAP["005"] = "Bank Unavailable"
AUTH_CODE_MAP["006"] = "Card Not Supported"
AUTH_CODE_MAP["007"] = "Insufficient Funds"
AUTH_CODE_MAP["008"] = "Ineligible Transaction"
AUTH_CODE_MAP["009"] = "Ineligible Account"
AUTH_CODE_MAP["010"] = "Number of Daily Withdrawals Exceeded"
AUTH_CODE_MAP["011"] = "Cannot Process Transaction"
AUTH_CODE_MAP["012"] = "Amount to Large"
AUTH_CODE_MAP["013"] = "Account Closed"
AUTH_CODE_MAP["014"] = "PIN Tries Exceeded"
AUTH_CODE_MAP["015"] = "Database Problem"
AUTH_CODE_MAP["016"] = "Withdrawal Limit Already Reached"
AUTH_CODE_MAP["017"] = "Invalid Amount"
AUTH_CODE_MAP["018"] = "External Decline"
AUTH_CODE_MAP["019"] = "System Error"
AUTH_CODE_MAP["020"] = "Contact Card Issuer"
AUTH_CODE_MAP["021"] = "Routing Lookup Problem"
AUTH_CODE_MAP["022"] = "Message Edit Error"
AUTH_CODE_MAP["023"] = "Transaction Not Supported"
AUTH_CODE_MAP["024"] = "Insufficient Funds"
AUTH_CODE_MAP["025"] = "Western Union sender data Error"
AUTH_CODE_MAP["026"] = "Western Union receiver data Error"
AUTH_CODE_MAP["027"] = "CRC Error"
AUTH_CODE_MAP["028"] = "Pre-pay Transaction Failed"
AUTH_CODE_MAP["029"] = "Pre-pay Transaction rejected"
| {
"repo_name": "sabit/ATM-Transaction-Trace",
"path": "MiddlewareServer/src/AuthCodeMapping.py",
"copies": "2",
"size": "1423",
"license": "mit",
"hash": -2993238271053437400,
"line_mean": 42.1212121212,
"line_max": 61,
"alpha_frac": 0.6964160225,
"autogenerated": false,
"ratio": 2.857429718875502,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.45538457413755024,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.