text stringlengths 0 1.05M | meta dict |
|---|---|
# Purpose: Set windows to lock itself(upon timeout) with a screensaver
# if no internet connection found.
from _winreg import *
import urllib2, socket
debug = False
########################## TO RUN ####################################
# schedule to run every x mins
########################### DEF ######################################
def locker(set):
# make set in terms of 1/0
set = 1 if set else 0
subkey = r'Control Panel\Desktop'
# to ensure screensaver is set to 'none' (straight to lock screen)
deleteRegistryKey(HKEY_CURRENT_USER, subkey, r'SCRNSAVE.EXE')
data= [('ScreenSaverIsSecure', REG_SZ, str(set)),
('ScreenSaveTimeOut', REG_SZ, '420')]
for valueName, valueType, value in data:
modifyRegistry(HKEY_CURRENT_USER, subkey, valueName,
valueType, value)
if debug: message = 'changed to locked' if set else 'changed to unlocked'
if debug: print message
def modifyRegistry(key, sub_key, valueName, valueType, value):
"""
A simple function used to change values in
the Windows Registry.
"""
try:
key_handle = OpenKey(key, sub_key, 0, KEY_ALL_ACCESS)
except WindowsError:
key_handle = CreateKey(key, sub_key)
SetValueEx(key_handle, valueName, 0, valueType, value)
CloseKey(key_handle)
def deleteRegistryKey(key, sub_key, name):
"""
A simple function used to delete values in
the Windows Registry if present. Silently ignores failure
if value doesn't exist.
"""
try:
key_handle = OpenKey(key, sub_key, 0, KEY_ALL_ACCESS)
except WindowsError:
if debug: print 'No such key'
return
try:
DeleteValue(key_handle, name)
except WindowsError:
if debug: print "Value doesn't exist"
return
CloseKey(key_handle)
def internet_on():
# list of sites not likely to go down soon: google.com, microsoft.com etc
sites = ['173.194.79.94', '74.125.113.99', '64.4.11.20',
'173.194.33.21', '96.16.97.11']
for i in sites:
try:
site = 'http://%s' % (i)
response=urllib2.urlopen(site,timeout=1)
return True
# if urllib error, cant find site etc
except urllib2.URLError as err:
continue
# if timeout - occurs on some connections occasionally
except socket.timeout:
continue
return False
################################## CODE ######################################
# if connected to internet
if internet_on():
# set windows to unlocked
locker(False)
else:
# set windows to locked
locker(True)
| {
"repo_name": "ActiveState/code",
"path": "recipes/Python/578200_Set_windows_7_lock_itself_uptimeout_if_no/recipe-578200.py",
"copies": "1",
"size": "2753",
"license": "mit",
"hash": -8702876904705356000,
"line_mean": 28.2872340426,
"line_max": 82,
"alpha_frac": 0.5706501998,
"autogenerated": false,
"ratio": 3.9105113636363638,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.983863350422374,
"avg_score": 0.028505611842524696,
"num_lines": 94
} |
__author__ = 'cadu'
from django.db import models
import os
import logging
# Get logger.
logger = logging.getLogger(__name__)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "webservices.settings")
FONT_SIZE_OPTIONS = (
('S', 'Small'),
('M', 'Medium'),
('L', 'Large'),
)
LANGUAGE_OPTIONS = (
('ES', 'Spanish'),
('PT', 'Portuguese'),
('EN', 'English'),
)
PROFILE_OPTIONS = (
(0, 'SciELO'),
(1, 'Editor'),
(2, 'Operador'),
)
class User (models.Model):
id = models.AutoField(primary_key=True)
create_time = models.DateTimeField(auto_now_add=True)
update_time = models.DateTimeField(auto_now=True)
email = models.CharField(max_length=255, null=True, blank=True)
name = models.CharField(max_length=255)
facebook_id = models.CharField(max_length=255, null=True)
google_id = models.CharField(max_length=255, null=True)
language = models.CharField(max_length=2, choices=LANGUAGE_OPTIONS)
font_size = models.CharField(max_length=1, choices=FONT_SIZE_OPTIONS)
def to_dict(self):
return {'id': self.id, 'create_time': str(self.create_time), 'update_time': str(self.update_time),
'email': self.email, 'name': self.name, 'facebook_id': self.facebook_id,
'google_id': self.google_id, 'language': self.language, 'font_size': self.font_size}
class Meta:
db_table = "mobile_user"
class UserFavorite(models.Model):
id = models.AutoField(primary_key=True)
user = models.ForeignKey(User)
article_id = models.CharField(max_length=255)
class Meta:
db_table = "mobile_user_favorite"
class Category (models.Model):
id = models.AutoField(primary_key=True)
category_name_en = models.CharField(max_length=255)
category_name_pt = models.CharField(max_length=255)
category_name_es = models.CharField(max_length=255)
def to_dict(self):
return {"id": self.id, "category_name_en": self.category_name_en, "category_name_es": self.category_name_es,
"category_name_pt": self.category_name_pt}
class Meta:
db_table = "common_category"
class Feed (models.Model):
id = models.AutoField(primary_key=True)
feed_name = models.CharField(max_length=255)
user = models.ForeignKey(User)
magazines = models.ManyToManyField('Magazine', blank=True)
def to_dict(self):
return {"id": self.id, "feed_name": self.feed_name}
class Meta:
db_table = "mobile_feed"
class Magazine (models.Model):
id = models.AutoField(primary_key=True)
magazine_name = models.CharField(max_length=255)
magazine_issn = models.CharField(max_length=255)
magazine_domain = models.CharField(max_length=255)
magazine_acronym = models.CharField(max_length=255)
magazine_abbreviated_title = models.CharField(max_length=255)
categories = models.ManyToManyField(Category)
feeds = models.ManyToManyField('Feed', through=Feed.magazines.through, blank=True)
def to_dict(self):
return {"id": self.id, "magazine_name": self.magazine_name, "magazine_issn": self.magazine_issn,
"magazine_domain": self.magazine_domain, "magazine_acronym": self.magazine_acronym,
"magazine_abbreviated_title": self.magazine_abbreviated_title}
class Meta:
db_table = "common_magazine"
class Administrator (models.Model):
id = models.AutoField(primary_key=True)
create_time = models.DateTimeField(auto_now_add=True)
update_time = models.DateTimeField(auto_now=True)
profile = models.IntegerField(max_length=1, default=0, choices=PROFILE_OPTIONS)
name = models.CharField(max_length=255)
email = models.CharField(max_length=255, unique=True)
password = models.CharField(max_length=255, null=True)
active = models.BooleanField(default=True)
magazines = models.ManyToManyField('Magazine', blank=True)
class Meta:
db_table = "backoffice_administrator"
def generate_filename(self, filename):
url = "uploads/covers/{dynamic_path}/magazine_{magazine_id}/{filename}".format(dynamic_path=self.upload_time.strftime("%Y/%m"), filename=filename, magazine_id=self.magazine.id)
return url
class CoverArticle (models.Model):
id = models.AutoField(primary_key=True)
upload_time = models.DateTimeField(auto_now=True)
image = models.ImageField(upload_to=generate_filename)
article_id = models.CharField(max_length=255)
administrator = models.ForeignKey(Administrator)
magazine = models.ForeignKey(Magazine)
class Meta:
db_table = "backoffice_cover_article" | {
"repo_name": "scieloorg/pulsemob_webservices",
"path": "pulsemob_webservices/webservices/models.py",
"copies": "2",
"size": "4587",
"license": "bsd-2-clause",
"hash": 4055441822873019000,
"line_mean": 32.9851851852,
"line_max": 180,
"alpha_frac": 0.6801831262,
"autogenerated": false,
"ratio": 3.321506154960174,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5001689281160174,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cagataytengiz'
import configparser
from common import init_app
import bottle
from common import appconf, baseApp
def do_setup():
"""
:return:
"""
#todo: setup
"""
if bottle.request.method == 'GET':
return bottle.template('setup')
else:
prms = bottle.request.POST
aconfig = configparser.ConfigParser()
aconfig['system'] = {
'login_required': prms.get('login_required', default = 0),
}
with open('%s/config.ini' % appconf.basepath, 'w+', encoding='utf-8') as f:
aconfig.write(f)
bottle.redirect('/setup_ok')
"""
aconfig = configparser.ConfigParser()
aconfig['system'] = {
'login_required': 0,
}
with open('%s/config.ini' % appconf.basepath, 'w+', encoding='utf-8') as f:
aconfig.write(f)
from app import index
init_app()
baseApp.route('/', method=['GET', 'POST'], callback=index)
return index()
def setup_ok():
from app import index
init_app()
baseApp.route('/', method=['GET', 'POST'], callback=index)
return bottle.template('setup_ok')
| {
"repo_name": "ctengiz/firewad",
"path": "firstrun.py",
"copies": "1",
"size": "1140",
"license": "mit",
"hash": -5315543160328054000,
"line_mean": 20.9230769231,
"line_max": 83,
"alpha_frac": 0.5815789474,
"autogenerated": false,
"ratio": 3.5514018691588785,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4632980816558878,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cagataytengiz'
import os
import sys
import importlib
from bottle import BaseTemplate, debug, run, template, static_file, request, redirect
from beaker.middleware import SessionMiddleware
from sub import db, ddl, login, mon, qry
from common import appconf, baseApp, init_app, init_session, render, highlight_sql, get_rdb_type
import firstrun
def run_app(do_debug=True):
if hasattr(sys, 'frozen'):
appconf.basepath = os.path.dirname(os.path.abspath(sys.argv[0]))
else:
appconf.basepath = os.path.abspath(os.path.dirname(__file__))
if not(os.path.exists(appconf.basepath + '/config.ini')):
baseApp.route('/', method=['GET', 'POST'], callback=firstrun.do_setup)
baseApp.route('/setup_ok', method=['GET', 'POST'], callback=firstrun.setup_ok)
else:
init_app()
baseApp.route('/', method=['GET', 'POST'], callback=index)
#template defaults
BaseTemplate.defaults['appconf'] = appconf
BaseTemplate.defaults['highlight_sql'] = highlight_sql
BaseTemplate.defaults['get_rdb_type'] = get_rdb_type
return SessionMiddleware(baseApp, {'session.type': 'file',
'session.cookie_expires': 18000,
'session.data_dir': appconf.basepath + '/sessions',
'session.auto': True})
@baseApp.hook('before_request')
def setup_request():
s = request.environ['beaker.session']
_path = request.urlparts.path.split('/')[1]
if 'logged_in' not in s:
init_session()
BaseTemplate.defaults['session'] = s
if appconf.login_required and s['logged_in'] is False and _path != 'login':
redirect('/login')
def index():
"""
:return:
"""
return render()
@baseApp.get('/static/<filepath:path>')
def server_static(filepath):
"""
:param filepath:
:return:
"""
return static_file(filepath, root='%s/static' % appconf.basepath)
if __name__ == '__main__':
run_app()
debug(True)
appM = run_app()
run(app=appM, host="127.0.0.1", port=18022, reloader=False)
else:
appM = run_app()
application = appM
| {
"repo_name": "ctengiz/firewad",
"path": "app.py",
"copies": "1",
"size": "2173",
"license": "mit",
"hash": 2999111756139001000,
"line_mean": 26.5063291139,
"line_max": 96,
"alpha_frac": 0.6148182237,
"autogenerated": false,
"ratio": 3.499194847020934,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46140130707209337,
"avg_score": null,
"num_lines": null
} |
__author__ = 'caioseguin'
from ..query_data_structures.constraint import Constraint
from ..query_data_structures.element import Variable, Constant, Wildcard
from ..query_data_structures.query import *
from ..query_data_structures.relation import RelationInQuery
# Operators used in this module
ruleOperator = ':-'
disjunctionOperator = ';'
greaterOperator = '>'
lessOperator = '<'
greaterEqualOperator = '>='
lessEqualOperator = '<='
equalOperator = '='
negationOperator = 'not'
forbiddenSymbol = '$'
assertCommand = '/assert '
class RuleHandler:
def __init__(self):
pass
def handleFact(self, fact):
relation_object_list, constraint_object_list = self.handleBody(fact)
return ConjunctiveQuery(relation_object_list, constraint_object_list, None)
def handleDisjunctiveRule(self, rule):
assert isinstance(rule, list)
query_object_list = []
head = self.extractHead(rule)
body = self.extractBody(rule)
conjunctive_body_list = self.splitDisjunctiveRule(body)
for conjunctive_body in conjunctive_body_list:
conjunctive_rule = []
conjunctive_rule.append(head)
conjunctive_rule.append(ruleOperator)
conjunctive_rule.append(conjunctive_body)
query_object_list.append(self.handleConjunctiveRule(conjunctive_rule))
return DisjunctiveQuery(query_object_list)
def handleConjunctiveRule(self, rule):
assert isinstance(rule, list)
#if len(rule) != 1:
# raise Exception("list with exactly one element expected")
#else:
# rule = rule[0] # Peal list
head = self.extractHead(rule)
body = self.extractBody(rule)
relation_object_list, constraint_object_list = self.handleBody(body)
query_object = self.handleHead(head, relation_object_list, constraint_object_list)
return query_object
def handleHead(self, head, relation_object_list, constraint_object_list):
head_relation = self.handleRelation(head)
return ConjunctiveQuery(relation_object_list, constraint_object_list, head_relation)
def handleBody(self, body):
assert isinstance(body, list)
relation_list = []
constraint_list = []
for body_part in body:
if self.isConstraint(body_part):
constraint_list.append(self.handleConstraint(body_part))
else:
relation_list.append(self.handleRelation(body_part))
return relation_list, constraint_list
def handleRelation(self, relation):
assert isinstance(relation, list)
if len(relation) == 1:
raise Exception("Atoms not supported")
if self.isNegatedRelation(relation):
is_negated = True
relation = relation[1:len(relation)][0]
else:
is_negated = False
relation_symbol = relation[0]
if not isinstance(relation[1], list):
raise Exception("list of terms expected")
else:
term_list = relation[1]
ast_term_list = self.handleTermList(term_list)
new_relation_in_query = RelationInQuery(relation_symbol, ast_term_list, is_negated)
return new_relation_in_query
# Will have to be updated to deal with strings as constants
def handleTermList(self, term_list):
ast_term_list = []
for term in term_list:
if self.isVariable(term):
ast_term_list.append(Variable(term))
if self.isConstant(term):
ast_term_list.append(Constant(self.handleConstant(term)))
if self.isWildcard(term):
ast_term_list.append(Wildcard())
return ast_term_list
# Will have to be updated to deal with strings as constants
def handleConstraint(self, constraint):
if self.isVariable(constraint[0]):
left_side = Variable(constraint[0])
else:
left_side = Constant(self.handleConstant(constraint[0]))
if self.isVariable(constraint[2]):
right_side = Variable(constraint[2])
else:
right_side = Constant(self.handleConstant(constraint[2]))
return Constraint(left_side, right_side, constraint[1])
def extractHead(self, rule):
return rule[0]
def extractBody(self, rule):
if not len(rule) >= 3:
raise Exception("list with three or more elements expected for a body")
return rule[2:len(rule)]
def splitDisjunctiveRule(self, disjunctive_query):
conjunctive_query_list = []
conjunctive_query = []
for element in disjunctive_query:
if element == disjunctionOperator:
conjunctive_query_list.append(conjunctive_query)
conjunctive_query = []
else:
conjunctive_query.extend(element)
conjunctive_query_list.append(conjunctive_query)
return conjunctive_query_list
def isRule(self, statement):
return ruleOperator in statement
def isDisjunctiveRule(self, statement):
return disjunctionOperator in statement
def isAssertion(self, statement):
return statement[0] == assertCommand
def isNegatedRelation(self, relation):
return relation[0] == negationOperator
def isConstraint(self, body_part):
return (len(body_part) == 3) and ( ( body_part[1] == greaterOperator ) or
( body_part[1] == greaterEqualOperator ) or
( body_part[1] == lessOperator ) or
( body_part[1] == lessEqualOperator ) or
( body_part[1] == equalOperator ))
def isVariable(self, term):
return term[0].isupper()
def isConstant(self, term):
return term[0].islower() or term.isdigit() or (term[0] == "'" and term[len(term)-1] == "'")
def isWildcard(self, term):
return term == '_'
def handleConstant(self, constant):
if constant[0] == "'" and constant[len(constant)-1] == "":
return constant[1:len(constant)-1]
else:
return constant
| {
"repo_name": "saltzm/yadi",
"path": "yadi/datalog2sql/tokens2ast/rule_handler.py",
"copies": "1",
"size": "6261",
"license": "bsd-3-clause",
"hash": -5334839601984947000,
"line_mean": 30.305,
"line_max": 99,
"alpha_frac": 0.6152371826,
"autogenerated": false,
"ratio": 4.094833224329627,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5210070406929627,
"avg_score": null,
"num_lines": null
} |
__author__ = 'caioseguin'
from .rule_handler import RuleHandler
from ..query_data_structures.query import *
# This class acts as the middle man between the Datalog parser and the SQL translator.
# It transforms the parser's output into a list of conjunctiveQueries and a list of disjunctiveQueries.
class ASTBuilder:
# Each element of the list program_ast consists in a list. Each sub-list start with a conjunctiveQuery or a
# disjunctiveQuery Object, followed by a number of relationInQuery Objects. A new sub-list is appended every time a
# new line from the parsed datalog program is handled.
# The program_ast is going to be passed along statement by statement.
# The history_ast is the whole history of datalog programs handled by the ASTFactory.
# The RuleHandler is responsible of translating every rule into the ast form
history_ast = []
program_ast = []
rule_handler = RuleHandler()
def __init__(self):
history_ast = []
program_ast = []
rule_handler = RuleHandler()
def buildAST(self, parsed_datalog_program, is_assertion):
assert isinstance(parsed_datalog_program, list)
self.program_ast = []
for code_line in parsed_datalog_program:
x = self.handleStatement(code_line, is_assertion)
self.program_ast.append(x)
self.history_ast.append(self.program_ast)
return self.program_ast
def handleStatement(self, statement, is_assertion):
assert isinstance(statement, list)
if is_assertion:
if self.rule_handler.isRule(statement):
statement_ast = AssertedQuery(self.handleRule(statement))
else:
statement_ast = AssertedQuery(self.handleFact(statement), True)
else:
if self.rule_handler.isRule(statement):
statement_ast = self.handleRule(statement)
else:
statement_ast = self.handleFact(statement)
return statement_ast
def handleRule(self, statement):
assert isinstance(statement, list)
if self.rule_handler.isDisjunctiveRule(statement):
return self.rule_handler.handleDisjunctiveRule(statement)
else:
return self.rule_handler.handleConjunctiveRule(statement)
def handleFact(self, statement):
assert isinstance(statement, list)
return self.rule_handler.handleFact(statement)
| {
"repo_name": "saltzm/yadi",
"path": "yadi/datalog2sql/tokens2ast/ast_builder.py",
"copies": "1",
"size": "2447",
"license": "bsd-3-clause",
"hash": -6967365647376108000,
"line_mean": 33.9571428571,
"line_max": 119,
"alpha_frac": 0.6738863915,
"autogenerated": false,
"ratio": 4.20446735395189,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.537835374545189,
"avg_score": null,
"num_lines": null
} |
__author__ = 'caja'
import rospy
import time
import shlex
import subprocess
from threading import Thread
from BAL.Interfaces.Runnable import Runnable
TIME_OUT = 5000
class KeepAliveHandler(Runnable):
is_init = False
def __init__(self, topic_name, msg_type):
if not KeepAliveHandler.is_init:
KeepAliveHandler.is_init = True
self._watch_dog_time = int(round(time.time() * 1000))
rospy.Subscriber(topic_name, msg_type, self.callback_to_watch)
Thread(target=self.run, args=()).start()
else: pass
def run(self):
rate = rospy.Rate(50)
send_err = False
while not rospy.is_shutdown() and not send_err:
if (int(round(time.time() * 1000)) - self._watch_dog_time) > TIME_OUT:
rospy.logerr("RiC Board is not responding")
subprocess.Popen(shlex.split("pkill -f ros"))
send_err = True
rate.sleep()
def callback_to_watch(self, msg):
self._watch_dog_time = int(round(time.time() * 1000))
| {
"repo_name": "robotican/ric",
"path": "ric_board/scripts/RiCTraffic/BAL/Handlers/keepAliveHandler.py",
"copies": "1",
"size": "1062",
"license": "bsd-3-clause",
"hash": 3849739717352863000,
"line_mean": 30.2352941176,
"line_max": 82,
"alpha_frac": 0.604519774,
"autogenerated": false,
"ratio": 3.6122448979591835,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9707252940824117,
"avg_score": 0.0019023462270133162,
"num_lines": 34
} |
__author__ = 'Caleb Madrigal'
__email__ = 'caleb.madrigal@gmail.com'
__version__ = '0.0.2'
__apiversion__ = 1
__config__ = {'power': -100, 'log_level': 'ERROR', 'trigger_cooldown': 1}
class Trigger:
def __init__(self):
# dev_id -> [timestamp1, timestamp2, ...]
self.packets_seen = 0
self.unique_mac_addrs = set()
def __call__(self,
dev_id=None,
dev_type=None,
num_bytes=None,
data_threshold=None,
vendor=None,
power=None,
power_threshold=None,
bssid=None,
ssid=None,
iface=None,
channel=None,
frame_type=None,
frame=None,
**kwargs):
self.packets_seen += 1
self.unique_mac_addrs |= {dev_id}
print('[!] Total packets: {}, Unique devices: {}'.format(self.packets_seen, len(self.unique_mac_addrs)))
print('\tdev_id = {}, dev_type = {}, num_bytes = {}, data_threshold = {}, vendor = {}, '
'power = {}, power_threshold = {}, bssid = {}, ssid = {}, iface = {}, channel = {}, '
'frame_types = {}, frame = {}'
.format(dev_id, dev_type, num_bytes, data_threshold, vendor,
power, power_threshold, bssid, ssid, iface, channel,
frame_type, frame))
| {
"repo_name": "calebmadrigal/trackerjacker",
"path": "plugin_examples/plugin_template.py",
"copies": "2",
"size": "1426",
"license": "mit",
"hash": -2862591626625458000,
"line_mean": 37.5405405405,
"line_max": 112,
"alpha_frac": 0.4670406732,
"autogenerated": false,
"ratio": 3.8230563002680964,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007880766452064692,
"num_lines": 37
} |
__author__ = 'Caleb'
from urllib2 import urlopen, HTTPError, URLError
import datetime
from math import sqrt, sin,cos,atan,degrees,radians
import numpy as np
import sys
from os import path
import json
import argparse
def localDataSpec(buoy):
try:
f = open(path.join('c:\\node', buoy + '.data_spec'), 'r')
d = f.read(1024)
f.close()
return d.split('\n')[1]
except IOError as e:
sys.stderr.write(str(e))
sys.exit(1)
def httpDataSpec(buoy, dataType='data_spec'):
"""fetches most recent buoy spectra observation from ndbc
buoy id - the buoy
dataType - can be 'data_spec', 'swdir', or 'swdir2' for energy, mean wave direction, or primary wave direction respectively"""
dataUrl = ''.join(['http://www.ndbc.noaa.gov/data/realtime2/', buoy, '.', dataType])
try:
response = urlopen(dataUrl)
except HTTPError, e:
sys.stderr.write(str(e))
sys.exit(1)
except URLError, e:
sys.stderr.write(str(e))
sys.exit(1)
except Exception as e:
sys.stderr.write(str(e))
sys.exit(1)
d = response.read(1024)
return d.split('\n')[1]
def arrayDataSpec(ds, e=[0,0,0,.028,.154,1.148,.28,.28,.168,.336,.924,1.4,.63,1.078,.756,1.302,.476,.364,
.21,.196,.308,.266,.168,.154,.266,.252,.154,.224,.126,.112,.07,.07,.056,.042,.042,.028,.028,
.014,.014,.014,.014,.014,.014,.014] + [0] * 20):
"""needs another data set to add new energy values to. e = energy density"""
newds = [(en, i[1], i[2]) for en, i in zip(e, ds)]
return newds
def data_spec(datas):
"""
returns a list of tuples for each frequency band from raw data dump
datas is straight out of httpDataSpec
coded for energy but works with wave direction too.
returns energy/direction, frequency, and bandwidth
# sep_freq = '9.999'
# datas = data.split(sep_freq)[1]
# bandwidths = [.005,.0075,.01,.015,.2]
"""
errlog = []
l = []
# semi-hacky way of determining if data is energy or direction
if datas.find('(') > 23:
dstart = 23
else:
dstart = 16
datas = datas[dstart:].split(') ')
i=0
while i < len(datas):
# for i in datas.split(') '):
if datas[i]:
t = datas[i].split()
e = float(t[0])
f = float(t[1].strip('()'))
if i is 0:
b = .005
else:
b = f - l[i-1][1]
# adjust the bandwidth where bandwidth transitions from .005 to .01 and .02
if .0058 < round(b,3) < .008:
b = .0075
elif .012 < round(b,3) < .018:
b = .015
errlog.append(b)
# b2 = {j: abs(b1-j) for j in bandwidths}
# v = b2.values()
# k = b2.keys()
# b = k[v.index(min(v))]
l.append((e,f,b))
i+=1
if __name__ != '__main__':
print [i for i in errlog]
return l
# def e():
# e = [0,0,0,1.663,2.16,.648,.281,.346,.95,1.858,1.296,1.188,1.296,1.188,1.274,1.145,.389,.346,.302,.151,.13,.108,.086,.086,.086,.13,.065,.43,.086,.22,.22,.22,.22,
# .22,.22,.22,.22,0,.22,.22,0,.22,0,0,0,0,0,0]
# e= [0,0,0,2.511,2.7,.459,.432,.756,1.026,1.107,1.242,1.242,.891,.432,.648,.324,.351,.189,.108,.135,.081,.162,.081,.108,.054,.081,.054,.027,.054,.027,0,.027,.027] + [0]*13
# e = [0]*6 + [.065,.52,.65,.1495,2.6,3.445,3.445,4.225,6.5,5.265,5.33,2.47,1.69,1.69,1.69,1.56,1.17,1.17,.52,.26,.585,.78,.325,.39,.455,.26,.26,.195,.13,.13,.13,.065,.13,.065] + [0]*4
# # return e
def meanDegree(angles, e):
"""
:param angles: a list of degrees for each frequency in the band
:param e: the amount of energy corresponding to the angle
This function will throw out the angle value if the energy for that value is very low.
:return: the mean of the freq degrees. May be NoneType (null)
"""
angles = [radians(a[0]) for a in zip(angles,e) if a[0] != 999 and a[1] > 0.005]
alength = len(angles)
if alength < 1:
return None
i,s,c=(0,)*3
while i < alength:
s += sin(angles[i])
c += cos(angles[i])
i+=1
sbar = s / alength
cbar = c / alength
abar = degrees(atan(sbar/cbar))
if cbar < 0:
abar += 180
elif sbar < 0:
abar += 360
return round(abar,1)
def band(spec, fences):
"""spec is multidim numpy array, fences is tuple containing high and low frequency for band"""
errlog = []
errlog.append("high / low freq fences: " + str(round(1.0/fences[1], 1)) + "(" + str(round(fences[1], 6)) + ")" \
+ ' ' + str(round(1.0/fences[0], 1)) + "(" + str(round(fences[0], 6)) + ")")
e = spec['e']
f = spec['f']
b = spec['b']
d = spec['md']
i = 0
if fences[1] == spec['f'][-1]: # this is the shortest frequency so gonna use all of it
partial1percent = 1
i = len(f) - 1
else:
while i < len(f):
# if round(b[i], 2) == .075:
# fend = f[i] + .004 # should come out to .105 for most buoys
# elif round(b[i],2) == .015:
# fend = f[i] + .01 # should come out to .375 for 46086 & similar buoys
# else:
fend = f[i] + .5 * b[i] # get high frequency / low period end of equency band
if round(fend, 3) > fences[1]:
# i -= 1
break
elif round(fend, 3) == fences[1]:
# print f[i], i
i += 1
break # i is index of last full band now
i+=1
partial1 = abs(fend - fences[1]) # partial1 is band of spectra between low end of freq band and the high freq side of the fence
partial1percent = 1 - partial1 / b[i] # find how much of the partial band we are taking, then multiply the energy by it. if fend & fences[1] are equal value of b[i] irrelevant: 0/x
partial1e = e[i] * partial1percent # if equal this will be zero
partial1eb = partial1e * b[i] # need to get the bandwidth part of the equation in here
errlog.append("high freq fenced frequency band " + str(round(1.0/f[i], 1)) + "(" + str(round(f[i], 4)) + ") is " + str(round(partial1percent*100, 1)) + "%")
# same as above for opposite end of fence
j = 0
if fences[0] == 1.0/40:
partial2percent = 1
else:
while j < len(f):
# if round(b[j], 2) == .075:
# fbegin = f[j] - .0035 # should come out to .105 for most buoys
# elif round(b[j],2) == .015:
# fbegin = f[j] - .01 # should come out to .355 for 46086 & similar buoys
# else:
fbegin = f[j] + .5 * b[j] # get low frequency / high period end of frequency band
if round(fbegin, 3) > fences[0]:
# j -= 1
break
elif round(fbegin, 3) == fences[0]:
# print f[i], i
j += 1
break # i is index of last full band now
j+=1
partial2 = fbegin - fences[0]
# if round(f[j], 3) >= fences[0]:
# j -= 1
# # print f[j], j
# break
# j +=1
# fbegin = f[j] + .5 * b[j]
# partial2 = fbegin - fences[0]
partial2percent = abs(partial2 / b[j])
partial2e = e[j] * partial2percent
partial2eb = partial2e * b[j]
errlog.append("low freq fenced frequency band " + str(round(1.0/f[j], 1)) + " (" + str(round(f[j], 4)) + ") is " + str(round(partial2percent*100, 1)) + "%")
mide = np.sum(e[j+1:i]* b[j+1:i]) # add up energy * bandwidth for each freq
fullBands = f[j+1:i]
printFullBands = ''.join([str(round(1.0/fb,4)) + ' (' + str(round(fb,4)) + ') \n' for fb in fullBands])
errlog.append("middle, full frequency bands: \n" + printFullBands)
bande = (partial2eb + mide + partial1eb) * 10000
# provisional direction data
meanDirection = meanDegree(d[j:i],e[j:i])
errlog.append('mean direction: %s, # of values: %i' % (meanDirection, len(d[j:i])))
if __name__ != "__main__":
print [i for i in errlog]
return bande, meanDirection
class ndbcSpectra(object):
def __init__(self, buoy='46232',datasource='http',e=[], **kwargs):
self.buoy = buoy
if datasource == 'local':
self.data = localDataSpec(buoy)
else:
self.data = httpDataSpec(buoy)
self.dataPDirection = httpDataSpec(buoy, 'swdir2')
self.dataMDirection = httpDataSpec(buoy, 'swdir')
self.nineHeights = []
self.nineEnergy = []
self.nineDirections = []
u = kwargs.get('units', 'ft') # default is to convert m to ft
if u in ['m', 'metric', 'meters']:
self.units = 1
else:
self.units = 3.28
td = self.data[:23].split()
self.timestamp = datetime.datetime(int(td[0]),int(td[1]),int(td[2]),int(td[3]))
self.json = json.dumps({'buoy':self.buoy,'timestamp':self.timestamp.isoformat()})
if e:
ds = arrayDataSpec(data_spec(self.data),e)
else:
ds = data_spec(self.data)
dsd = [d + (0,0) for d in ds] # faster to initialize
self.spectra = np.array(dsd,[('e', 'float16'),('f','float16'),('b','float16'),('pd','int16'),('md','int16')])
self.spectra['pd'] = [i[0] for i in data_spec(self.dataPDirection)]
self.spectra['md'] = [i[0] for i in data_spec(self.dataMDirection)]
self.Hs = self.units*4.01*np.sqrt(np.sum(self.spectra['e']*self.spectra['b'])) # google Rayleigh distribution ocean waves for explanation of formula
self.json = self.jsonify()
def jsonify(self, dataType='spectra'):
js = {'timestamp': self.timestamp.isoformat(' '), 'buoy': self.buoy, 'disclaimer': "Data in this object has not been validated and should be considered a placeholder"}
jsList = []
digits = 3
if dataType is 'spectra':
b = self.spectra.tolist()
keys = ['energy density', 'frequency', 'bandwidth', 'period', 'peak direction', 'mean direction']
for i in b:
ip = list(i)
ip.append(1.0/i[1])
dip = {k:round(d,digits) for k, d in zip(keys, ip)}
jsList.append(dip)
elif dataType is '9band':
b = self.nineBand()
keys = ['22+','20','17','15','13','11','9','7','4']
jsList = {k:{'height':v,'direction':v2} for k,v,v2 in zip(keys,b[0],b[1])}
elif dataType in ['hp', 'heightPeriod', 'heightPeriodDirection', 'HeightPeriodDirections']:
b = self.heightPeriodDirections()
jsList = {round(p,digits):{'height':round(h,digits),'peak direction':round(pd,0),'mean direction':round(md,0)} for p,h,pd,md in zip(b[:,1],b[:,0],b[:,2],b[:,3])}
js[dataType] = jsList
return json.dumps(js)
def heightPeriodDirections(self):
"""takes numpy energy, frequency, bandwidth array and returns the height for each spectral band
waverider buoys return 64 bands, others return ~46"""
# hp = 3.28*4*np.sqrt(spectra2['e']*spectra2['b'])
spectra = self.spectra
return np.column_stack((self.units*4*2*np.sqrt(spectra['e']*spectra['b']), 1/spectra['f'], self.spectra['pd'], self.spectra['md']))
def nineBand(self):
# (0.04545-0.0425)/0.005 = 0.59 or 59%
# 1.0/22 - spectra2['f'][5] - spectra2['b'][5] / spectra2['b'][5]
# band22 = np.sum(spectra2['e'][0:4]) + (spectra2['e'][4] * (1.0/22 - (spectra2['f'][4]-.5 * spectra2['b'][4])) / spectra2['b'][4])
spectra = self.spectra
o = 1.0
endBand = o/2
if spectra['f'][-1] < endBand:
endBand = spectra['f'][-1]
nineBands = (o/40,o/22,o/18,o/16,o/14,o/12,o/10,o/8,o/6,endBand)
fence = 0
# energyList = []
while fence < 9:
self.nineEnergy.append(band(spectra, (nineBands[fence], nineBands[fence+1]))) # removed 10000 * .005 from this line on 4/26/16
fence +=1
self.nineHeights = [round(2*4*self.units*.01*sqrt(int(v[0])), 2) for v in self.nineEnergy]
self.nineDirections = [v[1] for v in self.nineEnergy]
if __name__ != "__main__":
print self.nineEnergy
print 'buoy: ', self.buoy
print 'time: ', self.timestamp.isoformat()
print '9-band: ', self.nineHeights
print 'Hs: ', self.Hs
return self.nineHeights, self.nineDirections
def main():
parser = argparse.ArgumentParser(description='Process data from National Data Buoy Center (ndbc) buoys')
parser.add_argument('--buoy', '-b', default='46232', help='Enter the buoy you want to access')
parser.add_argument('--datasource', '-ds', default='http', choices=['http', 'local'], help='use http or local for remote / local data file')
parser.add_argument('--json', action='store_true', help='return json data')
parser.add_argument('--datatype', '-dt', choices=['spectra', '9band', 'hp'], help='returns raw buoy spectra, wave heights in 9 bands of wave periods, or wave heights and corresponding period')
parser.add_argument('--units', '-u', choices=['m','metric','meters','feet','english','ft'], default='feet', help='Choose the units of measurement for wave heights')
args = vars(parser.parse_args())
bs = ndbcSpectra(**args)
units = 3.28
if args['units'] in ['m','metric','meters']:
units = 1
if args['json']:
if args['datatype'] == 'spectra' or args['datatype'] is None:
print bs.json
elif args['datatype'] == '9band':
print bs.jsonify('9band')
elif args['datatype'] == 'hp':
print bs.jsonify('hp')
else:
data = ''
if args['datatype'] == 'spectra' or args['datatype'] is None:
data = bs.spectra
elif args['datatype'] == '9band':
data = bs.nineBand()
elif args['datatype'] == 'hp':
data = bs.heightPeriodDirections()
print data
return data
if __name__ == "__main__":
main()
| {
"repo_name": "calebvandenberg/py-ndbc-buoy-spectra",
"path": "ndbc.py",
"copies": "1",
"size": "14708",
"license": "mit",
"hash": -1333847909483867600,
"line_mean": 42.3012048193,
"line_max": 196,
"alpha_frac": 0.5265841719,
"autogenerated": false,
"ratio": 3.2332380743020446,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4259822246202044,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Calle'
from builtins import bytes, str, filter, map
import unittest
from zetacrypt import ciphers, utility
from zetacrypt.conversions import *
from collections import OrderedDict
class TestXORFunctions(unittest.TestCase):
def test_single_byte_xor(self):
plaintext = ascii_to_bytes("abcd")
ciphertext = ascii_to_bytes("ABCD")
key = ord(' ')
self.assertEqual(ciphertext, iterator_to_bytes(ciphers.xor_seq_byte(plaintext, key)))
def test_key_xor_byte(self):
plaintext = ascii_to_bytes("a c ")
key = ascii_to_bytes(" b")
ciphertext = ascii_to_bytes("ABCB")
self.assertEqual(ciphertext, iterator_to_bytes(ciphers.xor_seq_key(plaintext, key)))
def set1_problem2(self):
"""Set 1 problem 2"""
ciphertext = hex_to_bytes("746865206b696420646f6e277420706c6179")
key = hex_to_bytes("686974207468652062756c6c277320657965")
plaintext = hex_to_bytes("1c0111001f010100061a024b53535009181c")
self.assertEqual(ciphertext, ciphers.xor_seq_key(plaintext, key))
class TestPrepareFunctions(unittest.TestCase):
def text_generate_key(self):
for keylen in range(10):
k = ciphers.generate_key(keylen)
self.assertEqual(keylen, len(k))
def test_pkcs7_pad(self):
"""Set 2 problem 1"""
plaintext = ascii_to_bytes("YELLOW SUBMARINE")
ciphertext = b"YELLOW SUBMARINE\x04\x04\x04\x04"
c1 = ciphers.pkcs7_pad(plaintext, 20)
self.assertEqual(ciphertext, c1)
c2 = ciphers.pkcs7_pad(plaintext, 10)
self.assertEqual(ciphertext, c2)
def test_pkcs7_verify(self):
"""Set 2 Problem 7"""
blocklen = 10
ciphertext1 = ascii_to_bytes("YELLOW SUBMARINE\x04\x04\x04\x04")
self.assertTrue(ciphers.pkcs7_verify(ciphertext1, blocklen))
ciphertext2 = ascii_to_bytes("YELLOW SUBMARINE\x05\x05\x05\x05")
self.assertFalse(ciphers.pkcs7_verify(ciphertext2, blocklen))
ciphertext3 = ascii_to_bytes("YELLOW SUBMARIN\x04\x04\x04\x04")
self.assertFalse(ciphers.pkcs7_verify(ciphertext3, blocklen))
def test_pkcs7_strip(self):
"""Set 2 Problem 7"""
plaintext = ascii_to_bytes("YELLOW SUBMARINE")
ciphertext = ascii_to_bytes("YELLOW SUBMARINE\x04\x04\x04\x04")
m = ciphers.pkcs7_strip(ciphertext)
self.assertEqual(plaintext, m)
class TestModernCiphersFunctions(unittest.TestCase):
def test_aes_128_cbc_decrypt(self):
plaintext = "I'm back and I'm ringin' the bell \nA rockin' on "
ciphertext = base64_to_bytes(utility.readfile('test_data/test_aes_cbc_128.txt'))
m = ciphers.aes_128_cbc_decrypt(ciphertext, "YELLOW SUBMARINE", hex_to_bytes("00000000000000000000000000000000"))
m = bytes_to_ascii(m)
self.assertEqual(plaintext, m)
def test_aes_128_cbc_encrypt(self):
plaintext = "I'm back and I'm ringin' the bell \nA rockin' on "
ciphertext = base64_to_bytes(utility.readfile('test_data/test_aes_cbc_128.txt'))
c = ciphers.aes_128_cbc_encrypt(ascii_to_bytes(plaintext), "YELLOW SUBMARINE", hex_to_bytes("00000000000000000000000000000000"))
self.assertEqual(ciphertext, c)
def test_aes_128_ecb_decrypt(self):
blocklen = 16
key = "YELLOW SUBMARINE"
plaintext = utility.readfile('data/play_that_funky_music.txt')
ciphertext = base64_to_bytes(utility.readfile('data/7.txt'))
m = ciphers.aes_128_ecb_decrypt(ciphertext, key)
self.assertTrue(ciphers.pkcs7_verify(m, blocklen))
m = ciphers.pkcs7_strip(m)
m = bytes_to_ascii(m)
self.assertEqual(plaintext, m)
def test_aes_128_ecb_encrypt(self):
key = "YELLOW SUBMARINE"
plaintext = ascii_to_bytes(utility.readfile('data/play_that_funky_music.txt'))
ciphertext = base64_to_bytes(utility.readfile('data/7.txt'))
m = ciphers.pkcs7_pad(plaintext, 16)
c = ciphers.aes_128_ecb_encrypt(m, key)
self.assertEqual(ciphertext, c)
class TestProblemSpecificCiphers(unittest.TestCase):
def test_profile_encoder(self):
encoder = ciphers.ProfileEncoder1()
user = {
'email': 'foo@bar.com',
'uid': '1',
'role': 'user'
}
user2 = {
'email': 'foo@bar.comroleadmin',
'uid': '2',
'role': 'user'
}
user_string = encoder.create_profile('foo@bar.com')
self.assertEqual('email=foo@bar.com&uid=1&role=user', user_string)
self.assertEqual(user, encoder.parse_profile(user_string))
user2_string = encoder.create_profile('foo@bar.com&role=admin')
self.assertEqual('email=foo@bar.comroleadmin&uid=2&role=user', user2_string)
self.assertEqual(user2, encoder.parse_profile(user2_string))
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "ZetaTwo/zetacrypto",
"path": "tests/test_ciphers.py",
"copies": "1",
"size": "5043",
"license": "mit",
"hash": -8857690431121592000,
"line_mean": 37.7086614173,
"line_max": 136,
"alpha_frac": 0.6313702161,
"autogenerated": false,
"ratio": 3.4352861035422344,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9559753189979971,
"avg_score": 0.0013806259324525996,
"num_lines": 127
} |
__author__ = 'Calle Svensson <calle.svensson@zeta-two.com>'
import math, scipy
def levenshtein_swap(seq1, seq2):
"""Returns the number of pairwise swaps are needed to turn seq1 into seq2"""
res = 0
for i1 in range(len(seq1)):
i2 = seq2.index(seq1[i1])
res += abs(i1 - i2)
return res / 2
def mean(seq):
l = list(seq)
return sum(l)/len(l)
def hamming_distance_char(seq1, seq2):
"""Returns the character hamming distance of two sequences of equal length"""
return sum(map(lambda x: x[0] != x[1], zip(seq1, seq2)))
def hamming_weight(number):
"""Returns the number of bits set in number"""
return bin(number).count("1")
def hamming_distance_bit(seq1, seq2):
"""Returns the bit hamming distance of two sequences of equal length"""
if type(seq1) == str:
seq1 = map(ord, seq1)
if type(seq2) == str:
seq2 = map(ord, seq2)
return sum(map(lambda x: hamming_weight(x[0] ^ x[1]), zip(seq1, seq2)))
def rms_error(seq1, seq2):
"""Returns the RMS error between two lists of values"""
assert len(seq1) == len(seq2)
return math.sqrt(sum((x - y) ** 2 for x, y in zip(seq1, seq2)) / len(seq1))
def rms_error_dict(dict1, dict2):
"""Returns the RMS error between two dictionaries with the same keys"""
assert sorted(dict1.keys()) == sorted(dict2.keys())
keys = dict1.keys()
val1 = [dict1[k] for k in keys]
val2 = [dict2[k] for k in keys]
return rms_error(val1, val2)
| {
"repo_name": "ZetaTwo/zetacrypto",
"path": "zetacrypt/mathtools.py",
"copies": "1",
"size": "1532",
"license": "mit",
"hash": 825495650909233000,
"line_mean": 29.2653061224,
"line_max": 81,
"alpha_frac": 0.6103133159,
"autogenerated": false,
"ratio": 3.057884231536926,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4168197547436926,
"avg_score": null,
"num_lines": null
} |
__author__ = "Calvin Huang"
import threading
import time
class Sensor(object):
"""
Abstract sensor class.
Stores data as class attributes, updated when poll() is called.
Take care to not accidentally override vital class attributes
with update_state.
"""
def __init__(self):
self.listeners = set() # set of listeners
def get(self, name):
"""
Returns the datum associated with some name.
"""
return self.__dict__[name]
def add_listener(self, l):
"""
Add a listener, a method that will be called
when this sensor's state changes.
Method has format l(sensor, state_id, datum)
"""
self.listeners.add(l)
def remove_listener(self, l):
"""
Remove a listener.
"""
self.listeners.remove(l)
def __setattr__(self, key, value):
self.update_state(key, value)
def update_state(self, state_id, datum):
"""
Updates the state of this sensor.
Updates state state_id with data datum.
Also notifies listeners of state change (on change, not add).
"""
if state_id not in self.__dict__:
self.__dict__[state_id] = datum
elif self.__dict__[state_id] != datum:
self.__dict__[state_id] = datum
for l in self.listeners:
l(self, state_id, datum)
def poll(self):
"""
Polls the sensor, notifies any listeners if necessary.
Should be overridden by all sensors.
"""
pass
class SensorPoller(object):
"""
Class that periodically polls sensors.
"""
def __init__(self, sensors):
"""
Sets the polltime (in seconds) and the initial set of sensors.
"""
self.sensors = sensors
def add_sensor(self, sensor):
"""
Adds a sensor to poll.
"""
self.sensors.append(sensor)
def remove_sensor(self, sensor):
"""
Removes a sensor.
"""
self.sensors.remove(sensor)
def poll(self):
"""
Polls all sensors in this sensorpoller.
"""
for sensor in self.sensors:
sensor.poll()
class Constants(Sensor):
"""
Class for reading, and keeping track of, constants in a file, implemented as a singleton.
Retrieve singleton object by calling Constants().
File is read line by line, in [key],[number] format.
Lines starting with '/' are treated as add'l constants files,
and are read recursively.
Blank lines, and lines starting with '#' are ignored.
Behaves more or less like a sensor.
Access datum like a dictionary.
"""
file_loc = '/c/constants.txt'
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(Constants, cls).__new__(cls, *args, **kwargs)
super(Constants, cls._instance).__init__()
cls._instance.poll()
return cls._instance
def __init__(self, file_loc=None):
# No super call on purpose
if file_loc:
self.file_loc = file_loc
def __getitem__(self, key):
return self.get(key)
def __contains__(self, key):
return self.__dict__.__contains__(key)
def poll(self):
"""
Reloads file data.
"""
self.load_file(self.file_loc)
def load_file(self, file_loc):
"""
Loads file data from a file.
If other files are listed inside this file,
recursively loads them.
(Beware of "include loops" that will cause a crash)
"""
f = open(file_loc)
for line in f:
line = line.strip()
if not line or line.startswith('#'):
continue
if line.startswith('/'): # is a file, load it!
self.load_file(line)
continue
# continue with getting data
key, _, value = line.partition(',')
try:
self.update_state(key, float(value))
except ValueError:
print("Malformed constants file " + file_loc +
" with key " + key + " and value " + value)
f.close()
class GRTMacro(object):
"""
Abstract macro class.
daemon flag specifies whether or not it will run on its own
in a concurrent.
"""
timed_out = False
started = False
start_time = None
timeout_timer = None
_disabled_flag = None # Flag that is true if not running
@property
def running(self):
return not self._disabled_flag.is_set()
@running.setter
def running(self, value):
if value:
self._disabled_flag.clear()
else:
self._disabled_flag.set()
def __init__(self, timeout=0, poll_time=0.05, daemon=False):
"""
Creates a macro with timeout (infinite by default)
and poll interval, in seconds (0.05s by default)
"""
self.timeout = timeout
self.poll_time = poll_time
self.daemon = daemon
self._disabled_flag = threading.Event()
def run(self):
"""
Start macro in new thread.
See execute() for more details on macro execution.
"""
self.thread = threading.Thread(target=self.execute)
self.thread.start()
def _wait(self, duration):
"""
Sleeps for some time.
To be used within macros instead of time.sleep()
so that a StopIteration exception will be raised
when it is interrupted during a sleep cycle.
"""
self._disabled_flag.wait(duration)
if not self.running:
raise StopIteration()
def execute(self):
"""
Starts macro in current thread.
First calls initialize(), then calls perform()
periodically until timeout or completion.
After completion, calls die().
"""
if not self.started:
self.started = True
self.running = True
def _timeout():
self.timed_out = True
self.kill()
if self.timeout:
timeout_timer = threading.Timer(self.timeout, _timeout)
timeout_timer.start()
else:
timeout_timer = None
try:
self.initialize()
while self.running:
self.perform()
time.sleep(self.poll_time)
except StopIteration:
pass
self.running = False
if timeout_timer:
timeout_timer.cancel()
self.die()
def reset(self):
"""
Resets a macro, allowing it to be started again
"""
self.running = self.started = self.timed_out = False
def initialize(self):
"""
Run once, at the beginning of macro execution.
"""
pass
def perform(self):
"""
Macro execution body, run periodically.
"""
pass
def die(self):
"""
Cleanup after macro execution.
"""
pass
def kill(self):
"""
Stop macro execution.
"""
if self.running:
self.running = False
| {
"repo_name": "grt192/2012rebound-rumble",
"path": "py/grt/core.py",
"copies": "1",
"size": "7343",
"license": "mit",
"hash": -4488672601108266000,
"line_mean": 25.5090252708,
"line_max": 93,
"alpha_frac": 0.539425303,
"autogenerated": false,
"ratio": 4.482905982905983,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0004923183796332703,
"num_lines": 277
} |
__author__ = "Calvin Huang"
from wpilib import CounterBase
from wpilib import Encoder as WEncoder
from grt.core import Sensor
class Encoder(Sensor):
"""
Sensor wrapper for a quadrature encoder.
Has double attributes distance, rate (distance/second);
boolean attributes stopped and direction.
"""
distance = rate = 0
stopped = direction = True
def __init__(self, channel_a, channel_b, pulse_dist=1.0,
reverse=False, modnum=1, cpr=128,
enctype=CounterBase.k4X):
"""
Initializes the encoder with two channels,
distance per pulse (usu. feet, default 1), no reversing,
on module number 1, 128 CPR, and with 4x counting.
"""
super().__init__()
self.e = WEncoder(modnum, channel_a, modnum, channel_b, reverse, enctype)
self.cpr = cpr
self.e.SetDistancePerPulse(pulse_dist)
self.e.Start()
def poll(self):
self.distance = self.e.GetDistance()
self.rate = self.e.GetRate()
self.stopped = self.e.GetStopped()
self.direction = self.e.GetDirection()
| {
"repo_name": "grt192/2012rebound-rumble",
"path": "py/grt/sensors/encoder.py",
"copies": "1",
"size": "1126",
"license": "mit",
"hash": -6215613438262429000,
"line_mean": 29.4324324324,
"line_max": 81,
"alpha_frac": 0.6198934281,
"autogenerated": false,
"ratio": 3.7039473684210527,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9822192807068185,
"avg_score": 0.00032959789057350036,
"num_lines": 37
} |
__author__ = "Calvin Huang"
from wpilib import DriverStation
from grt.core import Sensor
# button/pin pair list
BUTTON_TABLE = [('button1', 1), ('button2', 3), ('button3', 5),
('button4', 7), ('button5', 9), ('button6', 11),
('button7', 13), ('button8', 15),
('l_toggle', 10), ('r_toggle', 12),
('orange_button', 14), ('green_button', 16)]
REGISTER_CLK = 2
REGISTER_D1 = 6
REGISTER_D2 = 8
REGISTER_LOAD = 4
IOBOARD = DriverStation.GetInstance().GetEnhancedIO()
class ButtonBoard(Sensor):
"""
Sensor wrapper for the HH buttonboard.
Has 8 buttons.
"""
button1 = button2 = button3 = button4 = button5 = button6 = \
button7 = button8 = l_toggle = r_toggle = \
orange_button = green_button = False
def __init__(self):
"""
Constructs a new ButtonBoard. Only one should be instantiated.
"""
super().__init__()
for name, pin in BUTTON_TABLE:
IOBOARD.SetDigitalConfig(pin, IOBOARD.tDigitalConfig.kInputPullUp)
for i in (REGISTER_CLK, REGISTER_D1, REGISTER_D2, REGISTER_LOAD):
IOBOARD.SetDigitalConfig(i, IOBOARD.tDigitalConfig.kOutput)
def poll(self):
diostate = IOBOARD.GetDigitals() # bit-packed button states
for name, pin in BUTTON_TABLE:
self.update_state(name, ((diostate >> (pin - 1)) & 1) == 0)
# TODO LEDs
| {
"repo_name": "grt192/2012rebound-rumble",
"path": "py/grt/sensors/buttonboard.py",
"copies": "1",
"size": "1430",
"license": "mit",
"hash": 1120790211648922000,
"line_mean": 29.4255319149,
"line_max": 78,
"alpha_frac": 0.5874125874,
"autogenerated": false,
"ratio": 3.4541062801932365,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45415188675932366,
"avg_score": null,
"num_lines": null
} |
__author__ = "Calvin Huang"
from wpilib import Joystick
from grt.core import Sensor
BUTTON_TABLE = ['a_button', 'b_button', 'x_button', 'y_button',
'l_shoulder', 'r_shoulder', 'back_button',
'start_button']
class XboxJoystick(Sensor):
"""
Sensor wrapper for the Xbox Controller.
Has boolean attributes for buttons: a/b/x/y/back/start_button,
l/r_shoulder
Attributes l/r_x/y_axis for thumbstick positions
trigger_pos and keypad_pos for trigger and keypad position
"""
l_x_axis = l_y_axis = r_x_axis = r_y_axis = 0
trigger_pos = keypad_pos = 0
a_button = b_button = x_button = y_button = False
l_shoulder = r_shoulder = back_button = start_button = False
def __init__(self, port):
"""
Initializes the joystick with some USB port.
"""
super().__init__()
self.j = Joystick(port)
def poll(self):
for i, state_id in enumerate(BUTTON_TABLE, 1):
self.update_state(state_id,
self.j.GetRawButton(i))
# button index is offset by 1 due to wpilib 1-indexing
self.l_x_axis = self.j.GetX()
self.l_y_axis = self.j.GetY()
self.r_x_axis = self.j.GetRawAxis(4)
self.r_y_axis = self.j.GetRawAxis(5)
self.trigger_pos = self.j.GetZ()
self.keypad_pos = self.j.GetRawAxis(6)
| {
"repo_name": "grt192/2012rebound-rumble",
"path": "py/grt/sensors/xbox_joystick.py",
"copies": "1",
"size": "1394",
"license": "mit",
"hash": -8848539947045994000,
"line_mean": 30.6818181818,
"line_max": 66,
"alpha_frac": 0.5846484935,
"autogenerated": false,
"ratio": 3.1325842696629214,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42172327631629214,
"avg_score": null,
"num_lines": null
} |
__author__ = "Calvin Huang"
from wpilib import Joystick
from grt.core import Sensor
BUTTON_TABLE = ['trigger', 'button2', 'button3',
'button4', 'button5', 'button6',
'button7', 'button8', 'button9',
'button10', 'button11']
class Attack3Joystick(Sensor):
"""
Sensor wrapper for the Attack 3 Joystick.
Has boolean attributes for buttons: trigger, button2-9
and double x_axis, y_axis for joystick position
"""
x_axis = y_axis = 0
trigger = button2 = button3 = \
button4 = button5 = button6 = \
button7 = button8 = button9 = \
button10 = button11 = False
def __init__(self, port):
"""
Initializes the joystick with some USB port.
"""
super().__init__()
self.j = Joystick(port)
def poll(self):
for i, state_id in enumerate(BUTTON_TABLE, 1):
self.update_state(state_id,
self.j.GetRawButton(i))
# button index is offset by 1 due to wpilib 1-indexing
self.x_axis = self.j.GetX()
self.y_axis = self.j.GetY()
| {
"repo_name": "grt192/2012rebound-rumble",
"path": "py/grt/sensors/attack_joystick.py",
"copies": "1",
"size": "1134",
"license": "mit",
"hash": -2572354073711708000,
"line_mean": 27.35,
"line_max": 66,
"alpha_frac": 0.5582010582,
"autogenerated": false,
"ratio": 3.5772870662460567,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9635488124446057,
"avg_score": 0,
"num_lines": 40
} |
__author__ = "Calvin Huang"
"""
Executes macros in a list/tuple/whatever sequentially.
"""
from grt.core import GRTMacro
class SequentialMacros(GRTMacro):
"""
Executes macros sequentially. Less efficient compared to GRTMacroController,
but has timeout functionality.
"""
curr_macro = None
curr_macro_index = 0
def __init__(self, macros, timeout=20, daemon=False):
super().__init__(timeout, daemon=daemon)
self.macros = macros
def initialize(self):
self.curr_macro = None
self.macro_queue = list(self.macros)
def perform(self):
if self.curr_macro is None or not self.curr_macro.running:
if not self.macro_queue: # no more child macros
self.kill()
return
self.curr_macro = self.macro_queue.pop(0)
print('starting sequential macro')
self.curr_macro.reset()
self.curr_macro.run()
# if curr_macro is still running, do nothing
def die(self):
if self.curr_macro is not None:
self.curr_macro.kill()
for m in self.macros:
m.kill()
| {
"repo_name": "grt192/2012rebound-rumble",
"path": "py/grt/macro/sequential_macros.py",
"copies": "1",
"size": "1151",
"license": "mit",
"hash": -6887749457581128000,
"line_mean": 27.0731707317,
"line_max": 80,
"alpha_frac": 0.5994787142,
"autogenerated": false,
"ratio": 3.8754208754208754,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49748995896208753,
"avg_score": null,
"num_lines": null
} |
__author__ = "Calvin Huang, Sidd Karamcheti"
class DriveTrain:
"""
Standard 6-motor drivetrain, with standard tankdrive.
"""
power = 1.0
def __init__(self,
left_motor, right_motor,
left_shifter=None, right_shifter=None,
left_encoder=None, right_encoder=None):
"""
Initializes the drivetrain with some motors (or MotorSets),
optional shifters and encoders
"""
self.left_motor = left_motor
self.right_motor = right_motor
self.left_shifter = left_shifter
self.right_shifter = right_shifter
self.left_encoder = left_encoder
self.right_encoder = right_encoder
def set_dt_output(self, left_output, right_output):
"""
Sets the DT output values; should be between -1 and 1.
"""
left_output *= self.power
right_output *= self.power
self.left_motor.Set(left_output)
self.right_motor.Set(right_output)
def set_power(self, power):
"""
Sets the power level of the DT (should be between 0-1)
Scales all the motor outputs by this factor.
"""
self.power = sorted([0, power, 1])[1] # clamp :)
def upshift(self):
"""
Upshifts, if shifters are present.
"""
if self.left_shifter:
self.left_shifter.Set(False)
if self.right_shifter:
self.right_shifter.Set(False)
def downshift(self):
"""
Downshifts, if shifters are present.
"""
if self.left_shifter:
self.left_shifter.Set(True)
if self.right_shifter:
self.right_shifter.Set(True)
| {
"repo_name": "grt192/2012rebound-rumble",
"path": "py/grt/mechanism/drivetrain.py",
"copies": "1",
"size": "1709",
"license": "mit",
"hash": -3928417575894446000,
"line_mean": 28.9824561404,
"line_max": 67,
"alpha_frac": 0.5664131071,
"autogenerated": false,
"ratio": 3.6991341991341993,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4765547306234199,
"avg_score": null,
"num_lines": null
} |
__author__ = 'calvin'
from weakref import WeakSet
from future.utils import iteritems
from . import Property
"""
Create a dictionary of conversion methods between US Standard and Metric distance unit systems.
"""
from_meter_conversions = {'km': 1 / 1000., 'm': 1, 'cm': 100., 'mm': 1000.,
'ft': 3.28084, 'yards': 1.09361, 'miles': 0.000621371, 'inches': 39.3701, 'in': 39.3701}
ConversionFactors = {}
for v, v_per_meter in iteritems(from_meter_conversions):
# For each entry, add the conversion to and from meters
fmt1 = "m_to_{}".format(v)
inv1 = "{}_to_m".format(v)
ConversionFactors[fmt1] = v_per_meter
ConversionFactors[inv1] = 1. / v_per_meter
for q, q_per_meter in from_meter_conversions.items():
# for each entry, add the conversion to and from other entries
v_per_q = v_per_meter / q_per_meter
fmt2 = "{}_to_{}".format(q, v)
inv2 = "{}_to_{}".format(v, q)
ConversionFactors[fmt2] = v_per_q
ConversionFactors[inv2] = 1. / v_per_q
class UnitProperty(Property):
unit_properties = WeakSet()
def __init__(self, default_value, units):
super(UnitProperty, self).__init__(default_value, units=units)
@staticmethod
def get_units(dispatcher, property_name):
return dispatcher.event_dispatcher_properties[property_name]['units']
@staticmethod
def convert_to(dispatcher, property_name, units):
info = dispatcher.event_dispatcher_properties[property_name]
if info['units'] == units:
return
c = ConversionFactors["{}_to_{}".format(info['units'], units)]
setattr(dispatcher, property_name, c * info['value'])
info['units'] = units
def register(self, instance, property_name, default_value):
super(UnitProperty, self).register(instance, property_name, default_value)
# Keep track of all the UnitProperties so that we can change them all when the unit system changes
self.unit_properties.add(self)
| {
"repo_name": "lobocv/eventdispatcher",
"path": "eventdispatcher/unitproperty.py",
"copies": "1",
"size": "2016",
"license": "mit",
"hash": -2556336244475727000,
"line_mean": 37.7692307692,
"line_max": 114,
"alpha_frac": 0.6438492063,
"autogenerated": false,
"ratio": 3.536842105263158,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46806913115631577,
"avg_score": null,
"num_lines": null
} |
__author__ = 'calvin'
import collections
from functools import partial
from . import Property
class ObservableSet(collections.MutableSet):
def __init__(self, dictionary, dispatch_method):
self.set = dictionary.copy()
self.dispatch = dispatch_method
def __repr__(self):
return self.set.__repr__()
def __get__(self, instance, owner):
return self.set
def __contains__(self, item):
return item in self.set
def __len__(self):
return len(self.set)
def __getitem__(self, item):
return self.set[item]
def __setitem__(self, key, value):
try:
prev = self.set[key]
check = prev != value
except KeyError:
check = True
self.set[key] = value
if check:
self.dispatch(self.set)
def __eq__(self, other):
# Must be this order and not self.set == other, otherwise unittest.assertEquals fails
return other == self.set
def __cmp__(self, other):
return self.set == other
def __ne__(self, other):
return self.set != other
def __delitem__(self, key):
del self.set[key]
self.dispatch(self.set)
def __iter__(self):
return iter(self.set)
def __nonzero__(self):
return bool(self.set)
def __getstate__(self):
return self.set
def __reduce__(self):
return (set, (tuple(self.set),), None, None, None)
def add(self, value):
self.set.add(value)
def discard(self, value):
self.set.discard(value)
def copy(self):
return self.__class__(self.set, self.dispatch)
def get(self, key, default=None):
self.set.get(key, default)
def remove(self, item):
self.set.remove(item)
self.dispatch(self.set)
def update(self, *items):
if self.set != items:
self.set.update(*items)
self.dispatch(self.set)
def pop(self):
item = self.set.pop()
self.dispatch(self.set)
return item
def difference(self, items):
return self.set.difference(items)
class SetProperty(Property):
def __init__(self, default_value, **kwargs):
super(SetProperty, self).__init__(default_value, **kwargs)
if not isinstance(default_value, set):
raise ValueError('SetProperty takes sets only.')
def register(self, instance, property_name, value):
self.value = ObservableSet(value, dispatch_method=partial(instance.dispatch, property_name, instance))
super(SetProperty, self).register(instance, property_name, self.value)
def __set__(self, obj, value):
p = self.instances[obj]
do_dispatch = p['value'] != value
p['value'].set.clear()
p['value'].set.update(value) # Assign to the ObservableDict's value
if do_dispatch:
for callback in p['callbacks']:
if callback(obj, value):
break | {
"repo_name": "lobocv/eventdispatcher",
"path": "eventdispatcher/setproperty.py",
"copies": "1",
"size": "2995",
"license": "mit",
"hash": 4538365209133760500,
"line_mean": 25.5132743363,
"line_max": 110,
"alpha_frac": 0.576293823,
"autogenerated": false,
"ratio": 3.993333333333333,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006319647147230298,
"num_lines": 113
} |
__author__ = 'calvin'
import collections
from future.utils import iteritems, iterkeys, itervalues
from functools import partial
from . import Property
class __DoesNotExist__:
# Custom class used as a flag
pass
class ObservableDict(collections.MutableMapping):
def __init__(self, dictionary, dispatch_method):
self.dictionary = dictionary.copy()
self.dispatch = dispatch_method
def __repr__(self):
return self.dictionary.__repr__()
def __get__(self, instance, owner):
return self.dictionary
def __contains__(self, item):
return item in self.dictionary
def __getitem__(self, item):
return self.dictionary[item]
def __setitem__(self, key, value):
prev = self.dictionary.get(key, __DoesNotExist__)
self.dictionary[key] = value
try:
# Ensure that the comparison evaluates as a scalar boolean (unlike numpy arrrays)
dispatch = bool(prev != value)
except Exception:
dispatch = True
if dispatch:
self.dispatch(self.dictionary)
def clear(self):
if len(self.dictionary):
self.dictionary.clear()
self.dispatch(self.dictionary)
def __len__(self):
return len(self.dictionary)
def __eq__(self, other):
return self.dictionary == other
def __cmp__(self, other):
return self.dictionary == other
def __ne__(self, other):
return self.dictionary != other
def __delitem__(self, key):
del self.dictionary[key]
self.dispatch(self.dictionary)
def __iter__(self):
return iter(self.dictionary)
def __nonzero__(self):
return bool(self.dictionary)
def __getstate__(self):
return self.dictionary
def __reduce__(self):
return dict, tuple(), None, None, iter(iteritems(self.dictionary))
def copy(self):
return self.dictionary.copy()
def get(self, key, default=None):
return self.dictionary.get(key, default)
def itervalues(self):
return itervalues(self.dictionary)
def iterkeys(self):
return iterkeys(self.dictionary)
def iteritems(self):
return iteritems(self.dictionary)
def update(self, _dict=None, **kwargs):
if _dict:
try:
not_equal = bool(self.dictionary != _dict)
except Exception:
not_equal = True
if not_equal:
self.dictionary.update(_dict)
self.dispatch(self.dictionary)
elif kwargs:
try:
not_equal = bool(self.dictionary != kwargs)
except Exception:
not_equal = True
if not_equal:
self.dictionary.update(kwargs)
self.dispatch(self.dictionary)
def keys(self):
return self.dictionary.keys()
def values(self):
return self.dictionary.values()
def items(self):
return self.dictionary.items()
def pop(self, key):
item = self.dictionary.pop(key)
self.dispatch(self.dictionary)
return item
class DictProperty(Property):
def __init__(self, default_value, **kwargs):
super(DictProperty, self).__init__(default_value, **kwargs)
if not isinstance(default_value, dict):
raise ValueError('DictProperty takes dict only.')
def register(self, instance, property_name, value):
self.value = ObservableDict(value, dispatch_method=partial(instance.dispatch, property_name, instance))
super(DictProperty, self).register(instance, property_name, self.value)
def __set__(self, obj, value):
p = self.instances[obj]
try:
# Ensure that the comparison evaluates as a scalar boolean (unlike numpy arrrays)
do_dispatch = bool(p['value'] != value)
except Exception:
do_dispatch = True
if do_dispatch:
p['value'].dictionary.clear()
p['value'].dictionary.update(value) # Assign to the ObservableDict's value
for callback in p['callbacks']:
if callback(obj, value):
break | {
"repo_name": "lobocv/eventdispatcher",
"path": "eventdispatcher/dictproperty.py",
"copies": "1",
"size": "4221",
"license": "mit",
"hash": 7021586282009213000,
"line_mean": 27.9178082192,
"line_max": 111,
"alpha_frac": 0.5922767117,
"autogenerated": false,
"ratio": 4.480891719745223,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5573168431445223,
"avg_score": null,
"num_lines": null
} |
author = 'calvin'
import gettext
from builtins import str as basestring, str as unicode
from eventdispatcher import Property
# The translation (gettext) function to be used
def no_translation(s):
return s
translator = no_translation
def fake_translation(s):
"""
A fake translation function to 'french' that can help you verify that you have tagged all text in the program
"""
return '#%s#' % s if s != '\n' else '\n'
class StringProperty(Property):
observers = set()
def __init__(self, default_value):
super(StringProperty, self).__init__(default_value)
self.translatables = set()
if not isinstance(default_value, (str, basestring, unicode)):
raise ValueError('StringProperty can only accepts strings.')
def register(self, instance, property_name, default_value):
super(StringProperty, self).register(instance, property_name, default_value)
if isinstance(default_value, _):
prop = instance.event_dispatcher_properties[property_name]
prop.update({'_': default_value, 'obj': instance})
StringProperty.observers.add(self.translate)
self.translatables.add(instance)
def __get__(self, obj, owner):
value = obj.event_dispatcher_properties[self.name]['value']
if isinstance(value, _):
return _.translate(value)
else:
return value
def __set__(self, obj, value):
prop = obj.event_dispatcher_properties[self.name]
if isinstance(value, _):
# If it is tagged as translatable, register this object as an observer
prop.update({'_': value, 'obj': obj})
StringProperty.observers.add(self.translate)
self.translatables.add(obj)
else:
if obj in self.translatables:
self.translatables.remove(obj)
if '_' in prop:
del prop['_']
if value != prop['value']:
prop['value'] = value
for callback in prop['callbacks']:
if callback(obj, value):
break
def translate(self):
for obj in self.translatables:
prop = obj.event_dispatcher_properties[self.name]
prop['value'] = _.translate(prop['_'])
for callback in prop['callbacks']:
callback(prop['obj'], prop['value'])
@staticmethod
def remove_translation():
"""
Remove the currently set translation function and return the language back to english (or default)
"""
StringProperty.set_translator(no_translation)
@staticmethod
def get_translation_function():
global translator
return translator
@staticmethod
def load_fake_translation(func=None):
"""
Load a fake translation function to that can help you verify that you have tagged all text in the program.
Adds 'Le' to the beginning of every string.
"""
StringProperty.set_translator(func or fake_translation)
@staticmethod
def get_translator(domain, localedir=None, languages=None, class_=None, fallback=False, codeset=None):
# Create the translation class from gettext
translation = gettext.translation(domain, localedir, languages, class_, fallback, codeset)
return translation.ugettext
@staticmethod
def set_translator(translator_func):
global translator
translator = translator_func
# Dispatch the changes to all the observers
for callback in StringProperty.observers:
callback()
class _(unicode):
"""
This is a wrapper to the gettext translation function _(). This wrapper allows the eventdispatcher.StringProperty
to be automatically updated when the language (translation function) changes. In this way, all labels will be
re-translated automatically.
"""
def __new__(cls, s, *args, **kwargs):
if isinstance(s, _):
s = unicode(s.untranslated)
if translator:
trans = translator(s, *args, **kwargs)
obj = super(_, cls).__new__(cls, trans, *args, **kwargs)
else:
obj = super(_, cls).__new__(cls, s, *args, **kwargs)
obj.untranslated = unicode(s)
obj._additionals = []
return obj
def __eq__(self, other):
"""
Compare the fully joined string (summation of the _additionals) if comparing _ objects, otherwise compare
the untranslated strings
"""
if isinstance(other, _):
return (self.untranslated == other.untranslated) and \
(self._additionals == other._additionals)
else:
return self.untranslated == other
def __ne__(self, other):
"""
Compare the fully translated string (including the _additionals) if comparing _ objects, otherwise compare
the english strings
"""
s = _.translate(self)
if isinstance(other, _):
return s != _.translate(other)
else:
return s != other
def __add__(self, other):
"""
Rather than creating a new _ instance of the sum of the two strings, keep the added string as a reference so
that we can translate each individually. In this way we can make sure the following translates correctly:
eg.
_('Show Lines') + '\n' + (_('On') if show_lines else _('Off'))
"""
if isinstance(other, _):
self._additionals.append(other)
self._additionals.extend(other._additionals)
else:
self._additionals.append(other)
return self
def __mul__(self, other):
if type(other) is bool:
if other:
return self
else:
return ''
if type(other) is int:
self._additionals.extend([self] * other)
else:
raise TypeError("can't multiply sequence by non-int of type %s" % type(other))
def __repr__(self):
return u"{trans} ({orig})".format(trans=_.translate(self), orig=self.untranslated)
def __str__(self):
return _.translate(self)
def __unicode__(self):
return _.translate(self)
def __contains__(self, item):
return item in self.untranslated or any(item in a for a in self._additionals)
def center(self, width, fillchar=None):
s = _.translate(self)
return s.center(width, fillchar)
@staticmethod
def join_additionals(s, func=None):
"""
Translate and return a string that contains the _ instance plus anything that was added to it.
:param s: _ instance
:param func: translation function
:return: joined unicode string
"""
l = [(func or translator)(s.untranslated)]
for a in s._additionals:
l.append((func or translator)(a.untranslated) if isinstance(a, _) else a)
return u''.join(l)
@property
def translated(self):
""" Return the string translated into the globally set language. """
return _.translate(self)
@classmethod
def translate(cls, s, func=None):
""" Translate a string with the specified translation function, otherwise use the globally set language. """
if isinstance(s, cls):
# If we were passed a translatable string object _
if s._additionals:
return cls.join_additionals(s, func)
else:
return (func or translator)(s.untranslated)
else:
return (func or translator)(s)
@classmethod
def join(cls, sep, iterable):
"""
Method used for joining _ objects such that they return translatable strings.
:param sep:
:param iterable:
:return:
"""
for ii, s in enumerate(iterable):
if ii == 0:
t = cls(s)
if isinstance(s, _):
t._additionals = s._additionals[:]
else:
t += sep
t += s
try:
return t
except UnboundLocalError:
return _('\r') | {
"repo_name": "lobocv/eventdispatcher",
"path": "eventdispatcher/stringproperty.py",
"copies": "1",
"size": "8219",
"license": "mit",
"hash": 6443505379839428000,
"line_mean": 33.1078838174,
"line_max": 117,
"alpha_frac": 0.5869327169,
"autogenerated": false,
"ratio": 4.533370104798676,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0018521204787604055,
"num_lines": 241
} |
__author__ = 'calvin'
import unittest
from eventdispatcher import EventDispatcher, BindError
from eventdispatcher import StringProperty, _
from . import EventDispatcherTest
class Dispatcher(EventDispatcher):
p1 = StringProperty(_('abc'))
p2 = StringProperty(_('xyz'))
class StringPropertyTest(EventDispatcherTest, unittest.TestCase):
def __init__(self, *args):
super(StringPropertyTest, self).__init__(*args)
self.dispatcher = Dispatcher()
self.dispatcher2 = Dispatcher()
self.dispatcher.bind(p1=self.assert_callback, p2=self.assert_callback)
def tearDown(self):
# Always switch the language back to English, make sure to unbind first.
try:
self.dispatcher.unbind(p1=self.assert_callback, p2=self.assert_callback)
except BindError:
pass
StringProperty.remove_translation()
self.dispatcher.bind(p1=self.assert_callback, p2=self.assert_callback)
@staticmethod
def create_different_value(value):
different_value = 'new ' + str(value)
return different_value
def test_translate(self):
d = self.dispatcher
self.assertEquals(d.p1, 'abc')
self.assertEquals(d.p2, 'xyz')
StringProperty.load_fake_translation()
self.assertEquals(d.p1, '#abc#')
self.assertEquals(d.p2, '#xyz#')
def test_additionals(self):
d = self.dispatcher
d.p1 = _('abc') + ' def ' + _('ghi')
StringProperty.load_fake_translation()
# Notice that 'abc' and 'ghi' get translated but 'def' does not
self.assertEquals(d.p1, '#abc# def #ghi#')
StringProperty.remove_translation()
self.assertEquals(d.p1, 'abc def ghi')
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "lobocv/eventdispatcher",
"path": "tests/test_stringproperty.py",
"copies": "1",
"size": "1778",
"license": "mit",
"hash": 4025437366396801000,
"line_mean": 32.5471698113,
"line_max": 84,
"alpha_frac": 0.6445444319,
"autogenerated": false,
"ratio": 3.799145299145299,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4943689731045299,
"avg_score": null,
"num_lines": null
} |
__author__ = 'calvin'
from copy import deepcopy
from eventdispatcher import Property
from weakref import ref
class WeakRefProperty(Property):
"""
Property that stores it's values as weak references in order to facilitate garbage collection.
"""
def __init__(self, default_value, **additionals):
self.instances = {}
try:
self.default_value = ref(default_value)
except TypeError:
self.default_value = None
try:
self.value = ref(deepcopy(default_value))
except TypeError:
self.value = None
self._additionals = additionals
def __get__(self, obj, objtype=None):
value = obj.event_dispatcher_properties[self.name]['value']
if value:
return value()
else:
return value
def __set__(self, obj, value):
wr = ref(value) if value is not None else None
if wr != obj.event_dispatcher_properties[self.name]['value']:
prop = obj.event_dispatcher_properties[self.name]
prop['value'] = wr
for callback in prop['callbacks']:
if callback(obj, value):
break
def register(self, instance, property_name, default_value):
wr = None if default_value is None else ref(default_value)
super(WeakRefProperty, self).register(instance, property_name, wr) | {
"repo_name": "lobocv/eventdispatcher",
"path": "eventdispatcher/weakrefproperty.py",
"copies": "1",
"size": "1405",
"license": "mit",
"hash": 5078926944512989000,
"line_mean": 30.9545454545,
"line_max": 98,
"alpha_frac": 0.6014234875,
"autogenerated": false,
"ratio": 4.376947040498442,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5478370527998442,
"avg_score": null,
"num_lines": null
} |
__author__ = 'calvin'
from crashreporter_hq import app
import getopt
import sys
def usage():
print "Command line parameters:"
print " -d Sets debug"
print " -h host ip"
print " -p port"
print " -s enable profiling"
print " --help Display help"
try:
opts, args = getopt.getopt(sys.argv[1:], "sh:p:d:")
except getopt.GetoptError:
print "Invalid command line arguments"
usage()
sys.exit()
app_kwargs = {'debug': False, 'host': '0.0.0.0', 'port': 5010}
profile = False
for opt, arg in opts:
if opt == "-d" and arg:
app_kwargs['debug'] = True
elif opt == "-p":
app_kwargs['port'] = int(arg)
elif opt == "-s":
profile = True
elif opt == "-h":
app_kwargs['host'] = arg
elif opt == "--help":
usage()
sys.exit()
if profile:
"""
This module provides a simple WSGI profiler middleware for finding
bottlenecks in web application. It uses the profile or cProfile
module to do the profiling and writes the stats to the stream provided
To use, run `flask_profiler.py` instead of `app.py`
see: http://werkzeug.pocoo.org/docs/0.9/contrib/profiler/
and: http://blog.miguelgrinberg.com/post/the-flask-mega-tutorial-part-xvi-debugging-testing-and-profiling
"""
from werkzeug.contrib.profiler import ProfilerMiddleware
app.config['PROFILE'] = True
app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[30])
app.run(**app_kwargs)
| {
"repo_name": "lobocv/crashreporter_hq",
"path": "run_hq.py",
"copies": "1",
"size": "1505",
"license": "mit",
"hash": -8281816247016882000,
"line_mean": 25.875,
"line_max": 109,
"alpha_frac": 0.6265780731,
"autogenerated": false,
"ratio": 3.5,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9529612538634464,
"avg_score": 0.01939310689310689,
"num_lines": 56
} |
__author__ = 'calvin'
from . import Property
class LimitProperty(Property):
def __init__(self, default_value, min, max):
super(LimitProperty, self).__init__(default_value, min=min, max=max)
def __get__(self, obj, objtype=None):
return obj.event_dispatcher_properties[self.name]['value']
def __set__(self, obj, value):
info = obj.event_dispatcher_properties[self.name]
if value != info['value']:
# Clip the value to be within min/max
if value < info['min']:
# Only dispatch if the current value is not already clipped to the minimum
if info['value'] != info['min']:
info['value'] = info['min']
else:
return
elif value > info['max']:
# Only dispatch if the current value is not already clipped to the maximum
if info['value'] != info['max']:
info['value'] = value = info['max']
else:
return
else:
info['value'] = value
# Dispatch callbacks
for callback in info['callbacks']:
if callback(obj, value):
break
def __delete__(self, obj):
raise AttributeError("Cannot delete properties")
@staticmethod
def get_min(inst, name):
return inst.event_dispatcher_properties[name]['min']
@staticmethod
def set_min(inst, name, new_min):
inst.event_dispatcher_properties[name]['min'] = new_min
if getattr(inst, name) < new_min:
setattr(inst, name, new_min)
@staticmethod
def get_max(inst, name):
return inst.event_dispatcher_properties[name]['max']
@staticmethod
def set_max(inst, name, new_max):
inst.event_dispatcher_properties[name]['max'] = new_max
if getattr(inst, name) > new_max:
setattr(inst, name, new_max)
| {
"repo_name": "lobocv/eventdispatcher",
"path": "eventdispatcher/limitproperty.py",
"copies": "1",
"size": "1969",
"license": "mit",
"hash": -8540679286268938000,
"line_mean": 32.9482758621,
"line_max": 90,
"alpha_frac": 0.5464702895,
"autogenerated": false,
"ratio": 4.308533916849015,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5355004206349014,
"avg_score": null,
"num_lines": null
} |
__author__ = 'calvin'
from time import time
from .clock import Clock
import threading
import logging
class ScheduledEvent(object):
""" Creates a trigger to the scheduler generator that is thread-safe."""
RUNNING = 1
KILL = 0
clock = None
def __init__(self, func, timeout=0):
self.func = func
self.t0 = time()
self.timeout = timeout
self.lock = threading.Lock()
self._active = 1
ScheduledEvent.clock = Clock.get_running_clock()
@classmethod
def set_debug(cls):
"""
Tracks the traceback at the time of triggering in the scheduled event
This must be called on startup as it only affects ScheduledEvents created after this function is called.
"""
import traceback
def debug_trigger_generator(self, *args):
g = self._trigger_generator_real(*args)
g.next()
while 1:
self.traceback = traceback.extract_stack()
signal = yield
g.send(signal)
cls._trigger_generator_real = cls._trigger_generator
cls._trigger_generator = debug_trigger_generator
logging.debug('ScheduledEvent debugging turned on.')
def __iter__(self):
return self
def stop(self):
"""
Stops the scheduled event from being called.
"""
self._active = 0
def start(self):
"""
Allows the scheduled event from being called again.
"""
self._active = 1
self.t0 = time()
if self.clock.scheduled_funcs[self.next] == 0:
self._schedule(self.next)
def kill(self):
"""
Send a kill signal to the generator, kicking it out of it's while loop and closing the generator.
We must catch the StopIteration exception so that the main loop does not fail.
"""
def _kill():
try:
self.generator.send(ScheduledEvent.KILL)
except StopIteration:
pass
# We need to schedule the kill, in case it is being called from within the function/generator
self._schedule(_kill)
def reset_timer(self):
"""
Reset the time reference, delaying any scheduled events (schedule_once, schedule_interval).
"""
self.t0 = time()
# schedule the call to the generator, ensuring only one function is added to the queue
if not self.clock.scheduled_funcs[self.next]:
self._schedule(self.next)
def reset_trigger(self, reschedule=False):
"""Reset a triggered scheduled event. """
if self.clock.scheduled_funcs[self.func]:
try:
self._unschedule(self.func)
except ValueError as e:
logging.debug('Scheduled trigger was already removed from the queue. ')
if reschedule:
self.next()
def _schedule(self, func):
"""Add a function to the scheduled events. """
clock = ScheduledEvent.clock
clock.scheduled_funcs[func] += 1
clock.queue.append(func)
def _unschedule(self, func):
"""Remove a function from the scheduled events. """
clock = ScheduledEvent.clock
clock.queue.remove(func)
clock.scheduled_funcs[func] -= 1
@property
def is_scheduled(self):
return bool(self.clock.scheduled_funcs[self.func]) or bool(self.clock.scheduled_funcs[self.next])
def __repr__(self):
return "ScheduledEvent for {}{}".format(self.func, ' (scheduled)' if self.is_scheduled else '')
def __next__(self, *args):
try:
with self.lock:
self.generator.send(ScheduledEvent.RUNNING)
except StopIteration:
pass
next = __next__
@staticmethod
def unschedule_event(func):
"""
Unschedule an event in the queue. Fails safely if the scheduled function is not in the queue.
Be sure to use the same reference object if the scheduled function was a lambda or partial.
:param func: scheduled function in the queue
:return: True if the function was removed from the queue
"""
clock = Clock.get_running_clock()
if clock.scheduled_funcs[func]:
clock.scheduled_funcs[func] -= 1
clock.queue.remove(func)
return True
else:
return False
@staticmethod
def schedule_once(func, timeout=0, start=True):
"""
Schedule a function to be called `interval` seconds later.
:param func: Scheduled Function
:param interval: Time interval in seconds
"""
s = ScheduledEvent(func, timeout)
s.generator = s._timeout_generator(func)
next(s.generator)
s.start() if start else s.stop()
return s
@staticmethod
def create_trigger(func):
"""
Create a trigger that schedules a function to be called on the next cycle of the main loop.
Calling the trigger more than once will not schedule the function multiple times.
:param func: Scheduled Function
"""
s = ScheduledEvent(func, timeout=0)
s.generator = s._trigger_generator(func)
next(s.generator)
return s
@staticmethod
def schedule_interval(func, interval, start=False):
"""
Schedule a function to be called every `interval` seconds. Must call start() to activate.
:param func: Scheduled Function
:param start: start right away.
:param interval: Time interval in seconds
"""
s = ScheduledEvent(func, timeout=interval)
s.generator = s._interval_generator(func)
next(s.generator)
s.start() if start else s.stop()
return s
"""
Generators for the different types of ScheduledEvents
"""
def _interval_generator(self, f):
"""
Generator. The function f is called every `timeout` number of seconds.
"""
interval = self.timeout
scheduled_funcs = ScheduledEvent.clock.scheduled_funcs
append = ScheduledEvent.clock.queue.append
_next = self.next
running = yield
while running:
t = time()
dt = t - self.t0
if dt > interval and self._active:
# If we have past the timeout time, call the function, reset the reference time (t0)
f()
self.t0 = t
# Add another call to this generator to the scheduled functions
if not scheduled_funcs[_next]:
scheduled_funcs[_next] += 1
append(_next)
running = yield
# When the loop breaks, we still have one scheduled call to the generator.
yield
def _timeout_generator(self, f):
"""
Generator. The function f is called after `timeout` number of seconds
"""
timeout = self.timeout
scheduled_funcs = ScheduledEvent.clock.scheduled_funcs
append = ScheduledEvent.clock.queue.append
_next = self.next
running = yield
while running:
dt = time() - self.t0
if dt > timeout and self._active:
# If we have pasted the timeout time, call the function
f()
running = yield
else:
# Add another call to this generator to the scheduled functions
if not scheduled_funcs[_next]:
scheduled_funcs[_next] += 1
append(_next)
running = yield
# When the loop breaks, we still have one scheduled call to the generator.
yield
def _trigger_generator(self, f):
"""
Generator. The function f is called on the next Clock cycle. A function can be scheduled at most once per
clock cycle.
"""
scheduled_funcs = ScheduledEvent.clock.scheduled_funcs
append = ScheduledEvent.clock.queue.append
running = yield
while running:
if not scheduled_funcs[f] and self._active:
scheduled_funcs[f] += 1
append(f)
running = yield
else:
running = yield
| {
"repo_name": "lobocv/eventdispatcher",
"path": "eventdispatcher/scheduledevent.py",
"copies": "1",
"size": "8274",
"license": "mit",
"hash": -8008861290460042000,
"line_mean": 32.9098360656,
"line_max": 113,
"alpha_frac": 0.5837563452,
"autogenerated": false,
"ratio": 4.599221789883268,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5682978135083268,
"avg_score": null,
"num_lines": null
} |
__author__ = 'calvin'
import collections
from functools import partial
from copy import copy
import numpy as np
from . import Property
class ObservableList(collections.MutableSequence):
def __init__(self, l, dispatch_method, dtype=None):
if not type(l) == list and not type(l) == tuple and not isinstance(l, ObservableList):
raise ValueError('Observable list must only be initialized with sequences as arguments')
if dtype:
self.list = np.array(l, dtype=dtype)
else:
self.list = list(l)
self.dispatch = dispatch_method
def __repr__(self):
return self.list.__repr__()
def __get__(self, instance, owner):
return self.list
def __getitem__(self, item):
return self.list[item]
def __setitem__(self, key, value):
try:
not_equal = bool(self.list[key] != value)
except Exception:
not_equal = True
if not_equal:
self.list[key] = value
self.dispatch(self.list)
def __reversed__(self):
return reversed(self.list)
def __delitem__(self, key):
del self.list[key]
self.dispatch(self.list)
def __len__(self):
return len(self.list)
def __iter__(self):
return iter(self.list)
def __nonzero__(self):
return bool(self.list)
def __getstate__(self):
return self.list
def __reduce__(self):
return (list, tuple(), None, iter(self.list), None)
def insert(self, index, value):
self.list.insert(index, value)
self.dispatch(self.list)
def append(self, value):
self.list.append(value)
self.dispatch(self.list)
def extend(self, values):
self.list.extend(values)
self.dispatch(self.list)
def pop(self, index=-1):
value = self.list.pop(index)
self.dispatch(self.list)
return value
def copy(self):
return copy(self.list)
def __eq__(self, other):
return self.list == other
def __ne__(self, other):
return self.list != other
def __nonzero__(self):
return bool(self.list)
class ListProperty(Property):
def register(self, instance, property_name, value, dtype=None):
self.value = ObservableList(value,
dispatch_method=partial(instance.dispatch, property_name, instance),
dtype=self._additionals.get('dtype'))
super(ListProperty, self).register(instance, property_name, self.value)
def __set__(self, obj, value):
p = self.instances[obj]
# Check if we need to dispatch
do_dispatch = len(p['value'].list) != len(value) or not ListProperty.compare_sequences(p['value'], value)
# do_dispatch = not ListProperty.compare_sequences(p['value'], value)
p['value'].list[:] = value # Assign to ObservableList's value
if do_dispatch:
for callback in p['callbacks']:
if callback(obj, p['value'].list):
break
@staticmethod
def compare_sequences(iter1, iter2):
"""
Compares two iterators to determine if they are equal. Used to compare lists and tuples
# """
try:
for a, b in zip(iter1, iter2):
if a != b:
return False
except Exception:
# A ValueError is usually raised if comparing numpy arrays because they
# return an array of booleans rather than a scalar value.
# If any error occurs during comparison just assume they are not equal.
return False
return True
| {
"repo_name": "lobocv/eventdispatcher",
"path": "eventdispatcher/listproperty.py",
"copies": "1",
"size": "3703",
"license": "mit",
"hash": -2892995514664435700,
"line_mean": 28.624,
"line_max": 113,
"alpha_frac": 0.5779098029,
"autogenerated": false,
"ratio": 4.184180790960452,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007264510285615027,
"num_lines": 125
} |
__author__ = 'calvin'
import ConfigParser
import datetime
import logging
import os
import csv
import re
import json
import sqlite3
import threading
import time
import socket
import requests
from tables import Table, Statistic, State, Timer, Sequence, NO_STATE
from .exceptions import TableConflictError
from .tools import *
CHECK_INTERVAL = datetime.timedelta(minutes=30)
logger = logging.getLogger('AnonymousUsage')
class AnonymousUsageTracker(object):
HQ_DEFAULT_TIMEOUT = 10
MAX_ROWS_PER_TABLE = 1000
def __init__(self, uuid, filepath, submit_interval_s=0, check_interval_s=0, enabled=True,
application_name='', application_version='', debug=False):
"""
Create a usage tracker database with statistics from a unique user defined by the uuid.
:param uuid: unique identifier
:param filepath: path to store the database
:param application_name: Name of the application as a string
:param application_version: Application version as a string
:param check_interval_s: How often the tracker should check to see if an upload is required (seconds)
:param submit_interval_s: How often the usage statistics should be uploaded (seconds)
"""
if debug:
logger.setLevel(logging.DEBUG)
self.uuid = str(uuid)
self.filename = os.path.splitext(filepath)[0]
self.filepath = self.filename + '.db'
self.submit_interval_s = submit_interval_s
self.check_interval_s = check_interval_s
self.application_name = application_name
self.application_version = application_version
self.regex_db = re.compile(r'%s_\d+.db' % self.uuid)
self._tables = {}
self._hq = {}
self._enabled = enabled
self._watcher = None
self._watcher_enabled = False
self._open_sockets = {}
self._discovery_socket_port = None
# Create the data base connections to the master database and partial database (if submit_interval)
self.dbcon_master = sqlite3.connect(self.filepath, check_same_thread=False)
self.dbcon_master.row_factory = sqlite3.Row
# If a submit interval is given, create a partial database that contains only the table entries since
# the last submission. Merge this partial database into the master after a submission.
# If no submit interval is given, just use a single (master) database.
if submit_interval_s:
self.filepath_part = self.filename + '.part.db'
self.dbcon_part = sqlite3.connect(self.filepath_part, check_same_thread=False)
self.dbcon_part.row_factory = sqlite3.Row
self.dbcon = self.dbcon_part
else:
self.dbcon_part = None
self.filepath_part = None
self.dbcon = self.dbcon_master
self.track_statistic('__submissions__', description='The number of statistic submissions to the server.')
if self._hq and self._requires_submission():
self.submit_statistics()
if check_interval_s and submit_interval_s:
self.start_watcher()
def __getitem__(self, item):
"""
Returns the Table object with name `item`
"""
return self._tables.get(item, None)
def __setitem__(self, key, value):
"""
Insert a new row into the table of name `key` with value `value`
"""
table = self._tables.get(key)
if table:
if isinstance(table, Statistic) and isinstance(value, (float, int)):
# Due to Statistic.__add__ returning itself, we must check that the value is a number,
# otherwise we could be adding a object to a number
diff = value - table.count
table += diff
elif isinstance(table, (State, Sequence)):
table.insert(value)
@property
def states(self):
return [t for t in self._tables.itervalues() if type(t) is State]
@property
def statistics(self):
return [t for t in self._tables.itervalues() if type(t) is Statistic]
@property
def timers(self):
return [t for t in self._tables.itervalues() if type(t) is Timer]
@property
def sequences(self):
return [t for t in self._tables.itervalues() if type(t) is Sequence]
def close(self):
self.dbcon_part.commit()
self.dbcon_part.close()
self.dbcon_master.commit()
self.dbcon_master.close()
def setup_hq(self, host, api_key):
self._hq = dict(host=host, api_key=api_key)
def register_table(self, tablename, uuid, type, description):
exists_in_master = check_table_exists(self.dbcon_master, '__tableinfo__')
exists_in_partial = self.dbcon_part and check_table_exists(self.dbcon_part, '__tableinfo__')
if not exists_in_master and not exists_in_partial:
# The table doesn't exist in master, create it in partial so it can be merged in on submit
# (if partial exists) otherwise, create it in the master
if self.dbcon_part:
db = self.dbcon_part
else:
db = self.dbcon_master
exists_in_master = True
create_table(db, '__tableinfo__', [("TableName", "TEXT"), ("Type", "TEXT"), ("Description", "TEXT")])
# Check if info is already in the table
dbconn = self.dbcon_master if exists_in_master else self.dbcon_part
tableinfo = dbconn.execute("SELECT * FROM __tableinfo__ WHERE TableName='{}'".format(tablename)).fetchall()
# If the info for this table is not in the database, add it
if len(tableinfo) == 0:
dbconn.execute("INSERT INTO {name} VALUES{args}".format(name='__tableinfo__',
args=(str(tablename), type, description)))
elif len(tableinfo) == 1 and tableinfo[0][2] != unicode(description):
# Update the description if it has changed
dbconn.execute("UPDATE {name} SET Description ='{description}' WHERE TableName = '{tablename}'" \
.format(name='__tableinfo__', tablename=tablename, description=description))
def get_table_info(self, field=None):
rows = []
if check_table_exists(self.dbcon_master, '__tableinfo__'):
rows = get_rows(self.dbcon_master, '__tableinfo__')
elif check_table_exists(self.dbcon_part, '__tableinfo__'):
rows = get_rows(self.dbcon_part, '__tableinfo__')
if field:
idx = ('type', 'description').index(field.lower()) + 1
tableinfo = {r[0]: r[idx] for r in rows}
else:
tableinfo = {r[0]: {'type': r[1], 'description': r[2]} for r in rows}
return tableinfo
def track_statistic(self, name, description='', max_rows=None):
"""
Create a Statistic object in the Tracker.
"""
if name in self._tables:
raise TableConflictError(name)
if max_rows is None:
max_rows = AnonymousUsageTracker.MAX_ROWS_PER_TABLE
self.register_table(name, self.uuid, 'Statistic', description)
self._tables[name] = Statistic(name, self, max_rows=max_rows)
def track_state(self, name, initial_state, description='', max_rows=None, **state_kw):
"""
Create a State object in the Tracker.
"""
if name in self._tables:
raise TableConflictError(name)
if max_rows is None:
max_rows = AnonymousUsageTracker.MAX_ROWS_PER_TABLE
self.register_table(name, self.uuid, 'State', description)
self._tables[name] = State(name, self, initial_state, max_rows=max_rows, **state_kw)
def track_time(self, name, description='', max_rows=None):
"""
Create a Timer object in the Tracker.
"""
if name in self._tables:
raise TableConflictError(name)
if max_rows is None:
max_rows = AnonymousUsageTracker.MAX_ROWS_PER_TABLE
self.register_table(name, self.uuid, 'Timer', description)
self._tables[name] = Timer(name, self, max_rows=max_rows)
def track_sequence(self, name, checkpoints, description='', max_rows=None):
"""
Create a Sequence object in the Tracker.
"""
if name in self._tables:
raise TableConflictError(name)
if max_rows is None:
max_rows = AnonymousUsageTracker.MAX_ROWS_PER_TABLE
self.register_table(name, self.uuid, 'Sequence', description)
self._tables[name] = Sequence(name, self, checkpoints, max_rows=max_rows)
def get_row_count(self):
info = {}
for db in (self.dbcon_master, self.dbcon_part):
cursor = db.cursor()
for table, stat in self._tables.items():
row_count_query = "SELECT Count() FROM %s" % table
try:
cursor.execute(row_count_query)
except sqlite3.OperationalError:
continue
nrows = cursor.fetchone()[0]
if table in info:
info[table]['nrows'] += nrows
else:
info[table] = {'nrows': nrows}
return info
def submit_statistics(self):
"""
Upload the database to the FTP server. Only submit new information contained in the partial database.
Merge the partial database back into master after a successful upload.
"""
if not self._hq.get('api_key', False) or not self._enabled:
return
for r in ('uuid', 'application_name', 'application_version'):
if not getattr(self, r, False):
return False
self['__submissions__'] += 1
try:
# To ensure the usage tracker does not interfere with script functionality, catch all exceptions so any
# errors always exit nicely.
tableinfo = self.get_table_info()
# Get the last row from each table
json_data = database_to_json(self.dbcon_master, tableinfo)
json_data.update(database_to_json(self.dbcon_part, tableinfo))
payload = {'API Key': self._hq['api_key'],
'User Identifier': self.uuid,
'Application Name': self.application_name,
'Application Version': self.application_version,
'Data': json_data
}
# For tables with data that has not yet been writen to the database (ie inital values),
# add them manually to the payload
for name, info in tableinfo.iteritems():
if name not in payload['Data']:
table = self[name]
if table is None:
continue
if isinstance(table, State):
data = 'No State' if table._state == NO_STATE else table._state
else:
data = table.count
tableinfo[name]['data'] = data
payload['Data'][name] = tableinfo[name]
try:
response = requests.post(self._hq['host'] + '/usagestats/upload',
data=json.dumps(payload),
timeout=self.HQ_DEFAULT_TIMEOUT)
except Exception as e:
logging.error(e)
response = False
if response and response.status_code == 200:
success = True
logger.debug('Submission to %s successful.' % self._hq['host'])
else:
success = False
# If we have a partial database, merge it into the local master and create a new partial
if self.dbcon_part and success:
merge_databases(self.dbcon_master, self.dbcon_part)
# Clear the partial database now that the stats have been uploaded
for table in get_table_list(self.dbcon_part):
clear_table(self.dbcon_part, table)
return success
except Exception as e:
logger.error(e)
self['__submissions__'].delete_last()
self.stop_watcher()
return False
def database_to_csv(self, path, orderby='type'):
"""
Create a CSV file for the latest usage stats.
:param path: path to output CSV file
:param dbconn_master: master database connection
:param dbconn_part: partial database connection
:param tableinfo: table header information
"""
tableinfo = self.get_table_info()
stats_master = database_to_json(self.dbcon_master, tableinfo)
stats_partial = database_to_json(self.dbcon_part, tableinfo)
with open(path, 'w') as f:
csvfile = csv.writer(f)
csvfile.writerow(['Name', 'Type', 'Value', 'Description'])
rows = []
for tablename, info in tableinfo.iteritems():
# Attempt to get the latest stat (from partial), if it doesn't exist get it from master
value = stats_partial.get(tablename, {}).get('data', ValueError) or stats_master.get(tablename, {}).get('data', ValueError)
if value is ValueError:
# The trackable was registered but no table values are found
value = self[tablename].current_value
if value is NO_STATE:
value = 'No Initial State'
rows.append([tablename, info['type'], value, info['description']])
if orderby == 'type':
rows.sort(key=lambda x: x[1]) # Sort by type
elif orderby == 'name':
rows.sort(key=lambda x: x[0]) # Sort by type
csvfile.writerows(rows)
def to_file(self, path, precision='%.2g'):
"""
Create a CSV report of the trackables
:param path: path to file
:param precision: numeric string formatter
"""
table_info = self.get_table_info()
def dump_rows(rows):
if len(rows) > 1:
for row in rows:
csv_writer.writerow(row)
csv_writer.writerow([])
with open(path, 'wb') as _f:
csv_writer = csv.writer(_f)
state_rows = [['States']]
state_rows += [['Name', 'Description', 'State', 'Number of Changes']]
for state in self.states:
state_rows.append([state.name, table_info[state.name]['description'], state.state, state.count])
dump_rows(state_rows)
stat_rows = [['Statistics']]
stat_rows += [['Name', 'Description', 'Total', 'Average']]
for stat in self.statistics:
if stat.name == '__submissions__':
continue
stat_rows.append([stat.name, table_info[stat.name]['description'], stat.count, stat.get_average(0)])
dump_rows(stat_rows)
timer_rows = [['Timers']]
timer_rows += [['Name', 'Description', 'Average Seconds', 'Total Seconds', 'Total Minutes', 'Total Hours', 'Total Days']]
for timer in self.timers:
timer_rows.append([timer.name, table_info[timer.name]['description'],
precision % timer.get_average(0), precision % timer.total_seconds, precision % timer.total_minutes,
precision % timer.total_hours, precision % timer.total_days])
dump_rows(timer_rows)
sequence_rows = [['Sequences']]
sequence_rows += [['Name', 'Description', 'Sequence', 'Number of Completions']]
for sequence in self.sequences:
checkpoints = '-->'.join(map(str, sequence.get_checkpoints()))
sequence_rows.append([sequence.name, table_info[sequence.name]['description'], checkpoints, sequence.count])
dump_rows(sequence_rows)
@classmethod
def load_from_configuration(cls, path, uuid, **kwargs):
"""
Load FTP server credentials from a configuration file.
"""
cfg = ConfigParser.ConfigParser()
kw = {}
with open(path, 'r') as _f:
cfg.readfp(_f)
if cfg.has_section('General'):
general = dict(cfg.items('General'))
kw['filepath'] = kwargs.get('filepath', False) or general['filepath']
kw['application_name'] = general.get('application_name', '')
kw['application_version'] = general.get('application_version', '')
kw['submit_interval_s'] = int(general.get('submit_interval_s', 0))
kw['check_interval_s'] = int(general.get('check_interval_s', 0))
kw['debug'] = bool(general.get('debug', False))
if cfg.has_section('HQ'):
hq_params = dict(cfg.items('HQ'))
else:
hq_params = None
kw.update(**kwargs)
tracker = cls(uuid, **kw)
if hq_params:
tracker.setup_hq(**hq_params)
return tracker
def enable(self):
"""
Gives the tracker permission to upload statistics
"""
logger.debug('Enabled.')
self._enabled = True
self.start_watcher()
return 'Uploading of statistics has been enabled'
def disable(self):
"""
Revokes the tracker's permission to upload statistics
"""
logger.debug('Disabled.')
self._enabled = False
self.stop_watcher()
return 'Uploading of statistics has been disabled'
def start_watcher(self):
"""
Start the watcher thread that tries to upload usage statistics.
"""
if self._watcher and self._watcher.is_alive:
self._watcher_enabled = True
else:
logger.debug('Starting watcher.')
self._watcher = threading.Thread(target=self._watcher_thread, name='usage_tracker')
self._watcher.setDaemon(True)
self._watcher_enabled = True
self._watcher.start()
def stop_watcher(self):
"""
Stop the watcher thread that tries to upload usage statistics.
"""
if self._watcher:
self._watcher_enabled = False
logger.debug('Stopping watcher.')
def _requires_submission(self):
"""
Returns True if the time since the last submission is greater than the submission interval.
If no submissions have ever been made, check if the database last modified time is greater than the
submission interval.
"""
if self.dbcon_part is None:
return False
tables = get_table_list(self.dbcon_part)
nrows = 0
for table in tables:
if table == '__submissions__':
continue
nrows += get_number_of_rows(self.dbcon_part, table)
if nrows:
logger.debug('%d new statistics were added since the last submission.' % nrows)
else:
logger.debug('No new statistics were added since the last submission.')
t0 = datetime.datetime.now()
s = self['__submissions__']
last_submission = s.get_last(1)
if last_submission:
logger.debug('Last submission was %s' % last_submission[0]['Time'])
t_ref = datetime.datetime.strptime(last_submission[0]['Time'], Table.time_fmt)
else:
t_ref = datetime.datetime.fromtimestamp(os.path.getmtime(self.filepath))
submission_interval_passed = (t0 - t_ref).total_seconds() > self.submit_interval_s
submission_required = bool(submission_interval_passed and nrows)
if submission_required:
logger.debug('A submission is overdue.')
else:
logger.debug('No submission required.')
return submission_required
def _watcher_thread(self):
while 1:
time.sleep(self.check_interval_s or 300)
if not self._watcher_enabled:
break
if self._hq and self._requires_submission():
logger.debug('Attempting to upload usage statistics.')
self.submit_statistics()
logger.debug('Watcher stopped.')
self._watcher = None
| {
"repo_name": "lobocv/anonymoususage",
"path": "anonymoususage/anonymoususage.py",
"copies": "1",
"size": "20534",
"license": "mit",
"hash": -944652960972041600,
"line_mean": 40.7357723577,
"line_max": 139,
"alpha_frac": 0.5734391741,
"autogenerated": false,
"ratio": 4.3578098471986415,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0012539107554602733,
"num_lines": 492
} |
__author__ = 'calvin'
import contextlib
from future.utils import iteritems
from .property import Property
from .exceptions import *
class EventDispatcher(object):
def __init__(self, *args, **kwargs):
self.event_dispatcher_event_callbacks = {}
self.event_dispatcher_properties = {}
bindings = EventDispatcher.register_properties(self)
self.bind(**bindings)
@staticmethod
def register_properties(obj, properties=None):
"""
Walk backwards through the MRO looking for event dispatcher Property attributes in the classes.
Then register and bind them to the default handler 'on_<prop_name>' if it exists.
Walking backwards allows you to override the default value for a superclass.
If the 'properties' argument is given, then only register the properties in the dictionary
'properties' must be a dictionary of keys being the attribute name and values being the eventdispatcher
Property object.
"""
bindings = {}
if properties is None:
for cls in reversed(obj.__class__.__mro__):
for prop_name, prop in iteritems(cls.__dict__):
if isinstance(prop, Property):
prop.name = prop_name
prop.register(obj, prop_name, prop.default_value)
if hasattr(obj, 'on_%s' % prop_name):
bindings[prop_name] = getattr(obj, 'on_{}'.format(prop_name))
else:
for prop_name, prop in iteritems(properties):
prop.name = prop_name
prop.register(obj, prop_name, prop.default_value)
if hasattr(obj, 'on_%s' % prop_name):
bindings[prop_name] = getattr(obj, 'on_{}'.format(prop_name))
return bindings
def force_dispatch(self, prop_name, value):
"""
Assigns the value to the property and then dispatches the event, regardless of whether that value is the same
as the previous value.
:param prop_name: property name
:param value: value to assign to the property
"""
previous_value = getattr(self, prop_name)
if previous_value == value:
self.dispatch(prop_name, self, previous_value)
else:
setattr(self, prop_name, value)
def dispatch(self, key, *args, **kwargs):
"""
Dispatch a property. This calls all functions bound to the property.
:param event: property name
:param args: arguments to provide to the bindings
:param kwargs: keyword arguments to provide to the bindings
"""
for callback in self.event_dispatcher_properties[key]['callbacks']:
if callback(*args, **kwargs):
break
def dispatch_event(self, event, *args, **kwargs):
"""
Dispatch an event. This calls all functions bound to the event.
:param event: event name
:param args: arguments to provide to the bindings
:param kwargs: keyword arguments to provide to the bindings
"""
for callback in self.event_dispatcher_event_callbacks[event]:
if callback(*args, **kwargs):
break
def register_event(self, *event_names):
"""
Create an event that can be bound to and dispatched.
:param event_names: Name of the event
"""
for event_name in event_names:
default_dispatcher = getattr(self, 'on_{}'.format(event_name), None)
if default_dispatcher:
self.event_dispatcher_event_callbacks[event_name] = [default_dispatcher]
else:
self.event_dispatcher_event_callbacks[event_name] = []
def unbind(self, **kwargs):
"""
Unbind the specified callbacks associated with the property / event names
:param kwargs: {property name: callback} bindings
"""
all_properties = self.event_dispatcher_properties
for prop_name, callback in iteritems(kwargs):
if prop_name in all_properties:
try:
all_properties[prop_name]['callbacks'].remove(callback)
except ValueError:
raise BindError("No binding for {} in property '{}'".format(callback.__name__, prop_name))
elif prop_name in self.event_dispatcher_event_callbacks:
try:
self.event_dispatcher_event_callbacks[prop_name].remove(callback)
except ValueError:
raise BindError("No binding for {} in event '{}'".format(callback.__name__, prop_name))
else:
raise BindError('No property or event by the name of %s' % prop_name)
def unbind_all(self, *args):
"""
Unbind all callbacks associated with the specified property / event names
:param args: property / event names
"""
all_properties = self.event_dispatcher_properties
for prop_name in args:
if prop_name in all_properties:
del all_properties[prop_name]['callbacks'][:]
elif prop_name in self.event_dispatcher_event_callbacks:
del self.event_dispatcher_event_callbacks[prop_name][:]
else:
raise BindError("No such property or event '%s'" % prop_name)
def bind(self, **kwargs):
"""
Bind a function to a property or event.
:param kwargs: {property name: callback} bindings
"""
for prop_name, callback in iteritems(kwargs):
if prop_name in self.event_dispatcher_properties:
# Queue the callback into the property
self.event_dispatcher_properties[prop_name]['callbacks'].append(callback)
elif prop_name in self.event_dispatcher_event_callbacks:
# If a property was not found, search in events
self.event_dispatcher_event_callbacks[prop_name].append(callback)
else:
raise BindError("No property or event by the name of '%s'" % prop_name)
def bind_once(self, **kwargs):
"""
Bind a function to a property or event and unbind it after the first time the function has been called
:param kwargs: {property name: callback} bindings
"""
for prop_name, callback in iteritems(kwargs.copy()):
def _wrapped_binding(*args):
callback()
self.unbind(**{prop_name: _wrapped_binding})
self.bind(**{prop_name: _wrapped_binding})
kwargs.pop(prop_name)
if kwargs:
self.bind_once(**kwargs)
return
def setter(self, prop_name):
return lambda inst, value: setattr(self, prop_name, value)
def get_dispatcher_property(self, prop_name):
return self.event_dispatcher_properties[prop_name]['property']
@contextlib.contextmanager
def temp_unbind(self, **bindings):
"""
Context manager to temporarily suspend dispatching of a specified callback.
:param bindings: keyword argument of property_name=callback_func
"""
# Enter / With
all_properties = self.event_dispatcher_properties
callbacks = {}
for prop_name, binding in iteritems(bindings):
if prop_name in all_properties:
# Make a copy of the callback sequence so we can revert back
callbacks[prop_name] = all_properties[prop_name]['callbacks'][:]
# Remove the specified bindings
if binding in all_properties[prop_name]['callbacks']:
all_properties[prop_name]['callbacks'].remove(binding)
elif prop_name in self.event_dispatcher_event_callbacks:
callbacks[prop_name] = self.event_dispatcher_event_callbacks[prop_name][:]
self.event_dispatcher_event_callbacks[prop_name].remove(binding)
# Inside of with statement
yield None
# Finally / Exit
for prop_name, cb in iteritems(callbacks):
if prop_name in all_properties:
all_properties[prop_name]['callbacks'] = cb
elif prop_name in self.event_dispatcher_event_callbacks:
self.event_dispatcher_event_callbacks[prop_name] = callbacks[prop_name]
@contextlib.contextmanager
def temp_unbind_all(self, *prop_name):
"""
Context manager to temporarily suspend dispatching of the listed properties or events. Assigning a different
value to these properties or dispatching events inside the with statement will not dispatch the bindings.
:param prop_name: property or event names to suspend
"""
# Enter / With
property_callbacks = {}
event_callbacks = {}
for name in prop_name:
if name in self.event_dispatcher_properties:
property_callbacks[name] = self.event_dispatcher_properties[name]['callbacks']
self.event_dispatcher_properties[name]['callbacks'] = []
if name in self.event_dispatcher_event_callbacks:
event_callbacks[name] = self.event_dispatcher_event_callbacks[name]
self.event_dispatcher_event_callbacks[name] = []
# Inside of with statement
yield None
# Finally / Exit
for name in prop_name:
if name in property_callbacks:
self.event_dispatcher_properties[name]['callbacks'] = property_callbacks[name]
if name in event_callbacks:
self.event_dispatcher_event_callbacks[name] = event_callbacks[name]
| {
"repo_name": "lobocv/eventdispatcher",
"path": "eventdispatcher/eventdispatcher.py",
"copies": "1",
"size": "9709",
"license": "mit",
"hash": -4728681955628951000,
"line_mean": 44.1581395349,
"line_max": 117,
"alpha_frac": 0.6037696982,
"autogenerated": false,
"ratio": 4.663304514889529,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0015702618239152844,
"num_lines": 215
} |
__author__ = 'calvin'
import cProfile
import logging
from pyperform import StringIO
import os
import pstats
import sys
import threading
import multiprocessing
Thread = threading.Thread # Start off using threading.Thread until changed
Process = multiprocessing.Process
BaseThread = threading.Thread # Store the Thread class from the threading module before monkey-patching
BaseProcess = multiprocessing.Process
profiled_thread_enabled = False
logged_thread_enabled = True
def enable_thread_profiling(profile_dir, exception_callback=None):
"""
Monkey-patch the threading.Thread class with our own ProfiledThread. Any subsequent imports of threading.Thread
will reference ProfiledThread instead.
"""
global profiled_thread_enabled, Thread, Process
if os.path.isdir(profile_dir):
_Profiler.profile_dir = profile_dir
else:
raise OSError('%s does not exist' % profile_dir)
_Profiler.exception_callback = exception_callback
Thread = threading.Thread = ProfiledThread
Process = multiprocessing.Process = ProfiledProcess
profiled_thread_enabled = True
def enable_thread_logging(exception_callback=None):
"""
Monkey-patch the threading.Thread class with our own LoggedThread. Any subsequent imports of threading.Thread
will reference LoggedThread instead.
"""
global logged_thread_enabled, Thread
LoggedThread.exception_callback = exception_callback
Thread = threading.Thread = LoggedThread
logged_thread_enabled = True
class _Profiler(object):
"""
A Thread that contains it's own profiler. When the SSI_App closes, all profiles are combined and printed
to a single .profile.
"""
profile_dir = None
exception_callback = None
_type = '_Profiler'
def run(self):
profiler = cProfile.Profile()
try:
logging.debug('{cls}: Starting {cls}: {name}'.format(cls=self._type, name=self.name))
profiler.runcall(super(_Profiler, self).run)
logging.debug('{cls}: Prepating to exit {cls}: {name}'.format(cls=self._type, name=self.name))
except Exception as e:
logging.error('{cls}: Error encountered in {name}'.format(cls=self._type, name=self.name))
logging.error(e)
if self.exception_callback:
e_type, e_value, last_traceback = sys.exc_info()
self.exception_callback(e_type, e_value, last_traceback)
finally:
if self.profile_dir is None:
logging.warning('{cls}: profile_dir is not specified. '
'Profile \'{name}\' will not be saved.'.format(cls=self._type, name=self.name))
return
self.print_stats(profiler)
def print_stats(self, profiler):
name = (self._type + '-' + self.name) if self._type else self.name
filename = os.path.join(self.profile_dir, name)
logging.debug('Printing stats for {name}'.format(name=name))
s = StringIO.StringIO()
sortby = 'cumulative'
ps = pstats.Stats(profiler, stream=s)
# Take out directory names
ps.strip_dirs()
# Sort
ps.sort_stats(sortby)
# Print to the stream
ps.print_stats()
stats_file = filename + '.stats'
profile_file = filename + '.profile'
# Create the stats file
ps.dump_stats(stats_file)
# Create a readable .profile file
with open(profile_file, 'w') as f:
f.write(s.getvalue())
@staticmethod
def combine_profiles(profile_dir, outfile, sortby='cumulative'):
s = StringIO.StringIO()
stat_files = [f for f in os.listdir(profile_dir) if os.path.isfile(os.path.join(profile_dir, f))
and f.endswith('.stats')]
ps = pstats.Stats(os.path.join(profile_dir, stat_files[0]), stream=s)
if len(stat_files) > 1:
for stat in stat_files[1:]:
ps.add(os.path.join(profile_dir, stat))
profile_name = os.path.join(profile_dir, '{}.profile'.format(outfile.replace('.profile', '')))
with open(profile_name, 'w') as f:
ps.strip_dirs()
ps.sort_stats(sortby)
ps.print_stats()
f.write(s.getvalue())
class ProfiledThread(_Profiler, BaseThread):
n_threads = 0
_type = 'Thread'
def __init__(self, *args, **kwargs):
super(ProfiledThread, self).__init__(*args, **kwargs)
ProfiledThread.n_threads += 1
class ProfiledProcess(_Profiler, Process):
n_processes = 0
_type = 'Process'
def __init__(self, *args, **kwargs):
super(ProfiledProcess, self).__init__(*args, **kwargs)
ProfiledProcess.n_processes += 1
class LoggedThread(BaseThread):
exception_callback = None
def run(self):
logging.debug('LoggedThread: Starting LoggedThread {}'.format(self.name))
try:
super(LoggedThread, self).run()
except Exception as e:
logging.error('LoggedThread: Error encountered in Thread {name}'.format(name=self.name))
logging.error(e)
if LoggedThread.exception_callback:
e_type, e_value, last_traceback = sys.exc_info()
LoggedThread.exception_callback(e_type, e_value, last_traceback)
| {
"repo_name": "lobocv/pyperform",
"path": "pyperform/thread.py",
"copies": "1",
"size": "5327",
"license": "mit",
"hash": -861369947747774300,
"line_mean": 34.5133333333,
"line_max": 115,
"alpha_frac": 0.6322507978,
"autogenerated": false,
"ratio": 3.9605947955390333,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5092845593339033,
"avg_score": null,
"num_lines": null
} |
__author__ = 'calvin'
import datetime
import ftplib
import logging
import sqlite3
logger = logging.getLogger('AnonymousUsage')
__all__ = ['create_table', 'get_table_list', 'get_table_columns', 'check_table_exists', 'get_rows',
'merge_databases', 'ftp_download', 'get_datetime_sorted_rows', 'delete_row', 'get_uuid_list',
'get_number_of_rows', 'get_last_row', 'get_first_row', 'fetch', 'rename_table', 'database_to_json',
'clear_table']
def create_table(dbcon, name, columns):
"""
Create a table in the database.
:param dbcon: database
:return: True if a new table was created
"""
try:
colString = ", ".join(["{} {}".format(colName, colType) for colName, colType in columns])
dbcon.execute("CREATE TABLE '{name}'({args})".format(name=name, args=colString))
return True
except sqlite3.OperationalError as e:
return False
def insert_row(dbconn, tablename, *args):
"""
Insert a row into a table
:param dbconn: data base connection
:param table_name: name of the table
:param args: table columns
"""
cur = dbconn.cursor()
cur.execute("INSERT INTO '{name}' VALUES{args}".format(name=tablename, args=args))
dbconn.commit()
def delete_row(dbconn, table_name, field, value):
"""
Delete a row from a table in a database.
:param dbconn: data base connection
:param table_name: name of the table
:param field: field of the table to target
:param value: value of the field in the table to delete
"""
cur = dbconn.cursor()
cur.execute("DELETE FROM '{name}' WHERE {field}='{value}'".format(name=table_name, field=field, value=value))
dbconn.commit()
def clear_table(dbconn, table_name):
"""
Delete all rows from a table
:param dbconn: data base connection
:param table_name: name of the table
:return:
"""
cur = dbconn.cursor()
cur.execute("DELETE FROM '{name}'".format(name=table_name))
dbconn.commit()
def get_table_list(dbconn):
"""
Get a list of tables that exist in dbconn
:param dbconn: database connection
:return: List of table names
"""
cur = dbconn.cursor()
cur.execute("SELECT name FROM sqlite_master WHERE type='table';")
try:
return [item[0] for item in cur.fetchall()]
except IndexError:
return get_table_list(dbconn)
def get_uuid_list(dbconn):
"""
Get a list of tables that exist in dbconn
:param dbconn: master database connection
:return: List of uuids in the database
"""
cur = dbconn.cursor()
tables = get_table_list(dbconn)
uuids = set()
for table in tables:
cur.execute("SELECT (UUID) FROM '{table}'".format(table=table))
uuid = set([i[0] for i in cur.fetchall()])
if uuid:
uuids.update(uuid)
return uuids
def get_table_columns(dbconn, tablename):
"""
Return a list of tuples specifying the column name and type
"""
cur = dbconn.cursor()
cur.execute("PRAGMA table_info('%s');" % tablename)
info = cur.fetchall()
cols = [(i[1], i[2]) for i in info]
return cols
def get_number_of_rows(dbcon, tablename, uuid=None):
"""
Return the number of rows in a table
:param dbcon: database connection
:param tablename: table name
:return: Boolean
"""
dbcur = dbcon.cursor()
if check_table_exists(dbcon, tablename):
if uuid:
dbcur.execute("SELECT COUNT(*) FROM '{name}' WHERE UUID='{uuid}'".format(name=tablename, uuid=uuid))
else:
dbcur.execute("SELECT COUNT(*) FROM '{name}'".format(name=tablename))
try:
result = dbcur.fetchone()[0]
except (TypeError, IndexError) as e:
logger.error(e)
result = 0
dbcur.close()
return result if isinstance(result, (int, long, float)) else 0
else:
return 0
def check_table_exists(dbcon, tablename):
"""
Check if a table exists in the database.
:param dbcon: database connection
:param tablename: table name
:return: Boolean
"""
dbcur = dbcon.cursor()
dbcur.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='%s';" % tablename)
result = dbcur.fetchone()
dbcur.close()
if result is None:
return False
else:
try:
return result[0] == tablename
except IndexError as e:
return check_table_exists(dbcon, tablename)
def get_rows(dbconn, tablename, uuid=None):
"""
Return all the rows in a table from dbconn
:param dbconn: database connection
:param tablename: name of the table
:return: List of sqlite3.Row objects
"""
cursor = dbconn.cursor()
if uuid:
cursor.execute("SELECT * FROM {tablename} WHERE UUID='{uuid}'".format(tablename=tablename, uuid=uuid))
else:
cursor.execute("SELECT * FROM %s" % tablename)
rows = cursor.fetchall()
return rows
def fetch(dbconn, tablename, n=1, uuid=None, end=True):
"""
Returns `n` rows from the table's start or end
:param dbconn: database connection
:param tablename: name of the table
:param n: number of rows to return from the end of the table
:param uuid: Optional UUID to select from
:return: If n > 1, a list of rows. If n=1, a single row
"""
cur = dbconn.cursor()
order = 'DESC' if end else 'ASC'
try:
if uuid:
cur.execute("SELECT * FROM '{}' WHERE UUID='{}' ORDER BY ROWID {} LIMIT {};".format(tablename, uuid, order, n))
else:
cur.execute("SELECT * FROM '{}' ORDER BY ROWID {} LIMIT {};".format(tablename, order, n))
except sqlite3.OperationalError as e:
if 'no such table' not in getattr(e, 'message', ''):
# Suppress logging of errors generated when no table exists
logger.error(e)
return []
rows = cur.fetchall()
return rows
def get_last_row(dbconn, tablename, n=1, uuid=None):
"""
Returns the last `n` rows in the table
"""
return fetch(dbconn, tablename, n, uuid, end=True)
def get_first_row(dbconn, tablename, n=1, uuid=None):
"""
Returns the first `n` rows in the table
"""
return fetch(dbconn, tablename, n, uuid, end=False)
def merge_databases(master, part):
"""
Merge the partial database into the master database.
:param master: database connection to the master database
:param part: database connection to the partial database
"""
mcur = master.cursor()
pcur = part.cursor()
logger.debug("Merging databases...")
tables = get_table_list(part)
for table in tables:
cols = get_table_columns(part, table)
pcur.execute("SELECT * FROM '%s'" % table)
rows = pcur.fetchall()
if rows:
try:
logger.debug("Found {n} rows of table '{name}' in master".format(name=table, n=rows[0][1]-1))
except Exception as e:
logging.error(e)
if not check_table_exists(master, table):
create_table(master, table, cols)
args = ("?," * len(cols))[:-1]
query = "INSERT INTO '{name}' VALUES ({args})".format(name=table, args=args)
mcur.executemany(query, tuple(tuple(r) for r in rows))
logger.debug("Merging {m} rows of table '{name}' into master".format(name=table, m=len(rows)))
master.commit()
def get_datetime_sorted_rows(dbconn, table_name, uuid=None, column=None):
"""
Get a list of datetime sorted rows from a table in the database
:param dbconn: database connection
:param table_name: name of table in the database
:param uuid: optional uuid to pull from
:param column: optional column/field in the table to pull instead of rows
:returns: a list of tuples containing (datetime, row) pairs or (datetime, column) pairs if columns is specified.
"""
rows = get_rows(dbconn, table_name, uuid=uuid)
data = []
for r in rows:
dt = datetime.datetime.strptime(r['Time'], "%d/%m/%Y %H:%M:%S")
if column is None:
data.append((dt, r))
else:
data.append((dt, r[column]))
data.sort()
return data
def rename_table(dbconn, original, new):
"""
Rename a table in the database
:param dbconn: database connection
:param original: original table name
:param new: new table name
"""
cur = dbconn.cursor()
cur.execute("ALTER TABLE '{original}' RENAME TO '{new}'".format(original=original, new=new))
def login_hq(host, user, passwd, path='', acct='', port=21, timeout=5):
"""
Create and return a logged in FTP object.
:return:
"""
ftp = ftplib.FTP()
ftp.connect(host=host, port=port, timeout=timeout)
ftp.login(user=user, passwd=passwd, acct=acct)
ftp.cwd(path)
logger.debug('Login to %s successful.' % host)
return ftp
def ftp_download(ftp, ftp_path, local_path):
"""
Download the master database
:param ftp: ftp connection
:param ftp_path: path to file on the ftp server
:param local_path: local path to download file
:return:
"""
with open(local_path, 'wb') as _f:
ftp.retrbinary('RETR %s' % ftp_path, _f.write)
def database_to_json(dbconn, tableinfo):
dbconn.row_factory = None
js = {}
for tablename, info in tableinfo.iteritems():
rows = get_last_row(dbconn, tablename)
if rows:
info['data'] = rows[0][2 if info['type'] == 'State' else 1]
js[tablename] = info
dbconn.row_factory = sqlite3.Row
return js
| {
"repo_name": "lobocv/anonymoususage",
"path": "anonymoususage/tools.py",
"copies": "1",
"size": "9624",
"license": "mit",
"hash": -3781117665796098000,
"line_mean": 30.5540983607,
"line_max": 123,
"alpha_frac": 0.6183499584,
"autogenerated": false,
"ratio": 3.7259001161440186,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48442500745440187,
"avg_score": null,
"num_lines": null
} |
__author__ = 'calvin'
import datetime
import logging
import sqlite3
from .table import Table
from ..tools import insert_row
logger = logging.getLogger('AnonymousUsage')
class Statistic(Table):
"""
Tracks the usage of a certain statistic over time.
Usage:
tracker.track_statistic(stat_name)
tracker[stat_name] += 1
"""
def __init__(self, *args, **kwargs):
super(Statistic, self).__init__(*args, **kwargs)
self.startup_value = self.count
@property
def current_value(self):
return self.count
@property
def difference_from_startup(self):
return self.count - self.startup_value
def __add__(self, i):
dt = datetime.datetime.now().strftime(self.time_fmt)
count = self.count + i
try:
with Table.lock:
if self.get_number_of_rows() >= self.max_rows:
self.delete_first()
insert_row(self.tracker.dbcon, self.name, self.tracker.uuid, count, dt)
except sqlite3.Error as e:
logger.error(e)
else:
self.count = count
logging.debug('{s.name} count set to {s.count}'.format(s=self))
return self
def __sub__(self, i):
self += -i
return self
def set(self, value):
delta = float(value) - self.count
self += delta
return self.count
def increment(self, by=1):
self += float(by)
return self.count
def decrement(self, by=1):
self -= float(by)
return self.count
def __repr__(self):
return "Statistic ({s.name}): {s.count}".format(s=self)
def get_average(self, default=None):
"""
Return the statistic's count divided by the number of rows in the table. If it cannot be calculated return
`default`.
:return: The average count value (count / table rows) or `default` if it cannot be calculated.
"""
try:
first_row = self.get_first()
if first_row:
count0 = first_row[0]['Count']
else:
count0 = 0
average = (self.count - count0) / (self.get_number_of_rows() - 1)
except Exception as e:
logging.error(e)
return default
else:
return average | {
"repo_name": "lobocv/anonymoususage",
"path": "anonymoususage/tables/statistic.py",
"copies": "1",
"size": "2343",
"license": "mit",
"hash": 1203164716134595800,
"line_mean": 25.9425287356,
"line_max": 114,
"alpha_frac": 0.5565514298,
"autogenerated": false,
"ratio": 3.9846938775510203,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007842478719904084,
"num_lines": 87
} |
__author__ = 'calvin'
import datetime
import logging
import time
logger = logging.getLogger('AnonymousUsage')
from .statistic import Statistic
class Timer(Statistic):
"""
A timer is a special case of a Statistic where the count is the number of elapsed seconds. A Timer object can be
started and stopped in order to record the time it takes for certain tasks to be completed.
"""
def __init__(self, name, tracker, *args, **kwargs):
super(Timer, self).__init__(name, tracker, *args, **kwargs)
self._start_time = None
self.paused = False
self._delta_seconds = 0
def start_timer(self):
self._start_time = datetime.datetime.now()
self._delta_seconds = 0
logger.debug('AnonymousUsage: Starting %s timer' % self.name)
def pause_timer(self):
timedelta = datetime.datetime.now() - self._start_time
self._delta_seconds += timedelta.total_seconds()
self.paused = True
logger.debug('AnonymousUsage: Pausing %s timer' % self.name)
def resume_timer(self):
self.paused = False
self._start_time = datetime.datetime.now()
logger.debug('AnonymousUsage: Resuming %s timer' % self.name)
def stop_timer(self):
if self._start_time is None:
logger.debug('AnonymousUsage: Cannot stop timer that has not been started.')
return
if not self.paused:
timedelta = datetime.datetime.now() - self._start_time
self._delta_seconds += timedelta.total_seconds()
self += self._delta_seconds
delta_seconds = self._delta_seconds
self._delta_seconds = 0
self._start_time = None
self.paused = False
logger.debug('AnonymousUsage: Stopping %s timer' % self.name)
return delta_seconds
@property
def elapsed_time_s(self):
"""
Return the amount of time that has elapsed since the timer was started.
Only works if the timer is active.
"""
if self._start_time:
return (datetime.datetime.now() - self._start_time).total_seconds()
else:
return 0
@property
def total_minutes(self):
return self.count / 60.
@property
def total_hours(self):
return self.count / 3600.
@property
def total_seconds(self):
return self.count
@property
def total_days(self):
return self.count / 86400.
def strftime(self, format, average=False):
seconds = self.get_average(0) if average else self.count
return time.strftime(format, time.gmtime(seconds))
def formatted_total_time(self, **kwargs):
return self.format_time(self.count, **kwargs)
def formatted_average_time(self, **kwargs):
return self.format_time(self.get_average(default=0), **kwargs)
@staticmethod
def format_time(n_seconds, seconds=True, minutes=True, hours=True, days=True, years=True):
y = d = h = m = 0
if years:
y, n_seconds = divmod(n_seconds, 31536000)
if days:
d, n_seconds = divmod(n_seconds, 86400)
if hours:
h, n_seconds = divmod(n_seconds, 3600)
if minutes:
m, n_seconds = divmod(n_seconds, 60)
fmt = ' '.join([bool(y) * years * ('%d years' % y),
bool(d) * days * ('%d days' % d),
bool(h) * hours * ('%d hours' % h),
bool(m) * minutes * ('%d minutes' % m),
bool(n_seconds) * seconds * ('%d seconds' % n_seconds)]).strip()
return fmt
def __sub__(self, other):
raise NotImplementedError('Cannot subtract from timer.')
def __repr__(self):
if hasattr(self, 'count'):
last_two = self.get_last(2)
if len(last_two) == 1:
last_time = last_two[0]['Count']
elif len(last_two) == 0:
return "Timer ({s.name}): Total 0 s".format(s=self)
else:
last_time = abs(last_two[1]['Count'] - last_two[0]['Count'])
average = self.get_average('None')
return "Timer ({s.name}): Total {s.count} s, last {} s, average {} s".format(last_time,
average,
s=self)
else:
return "Timer ({s.name})".format(s=self)
| {
"repo_name": "lobocv/anonymoususage",
"path": "anonymoususage/tables/timer.py",
"copies": "1",
"size": "4509",
"license": "mit",
"hash": 3819615764258477000,
"line_mean": 33.6846153846,
"line_max": 116,
"alpha_frac": 0.5522288756,
"autogenerated": false,
"ratio": 4.073170731707317,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0008675253184746714,
"num_lines": 130
} |
__author__ = 'calvin'
import datetime
import sqlite3
import logging
from itertools import imap
from operator import eq
from collections import deque
from .table import Table
from ..tools import insert_row
from anonymoususage.exceptions import InvalidCheckpointError
logger = logging.getLogger('AnonymousUsage')
class Sequence(Table):
"""
Tracks the number of times a user performs a certain sequence of events.
Usage:
tracker.track_sequence(stat_name, ['first', 'second', 'third'])
tracker[stat_name] = 'first' # First check point reached
tracker[stat_name] = 'second' # Second check point reached
tracker[stat_name] = 'third' # Third check point reached. At this point the database is updated.
"""
def __init__(self, name, tracker, checkpoints, *args, **kwargs):
super(Sequence, self).__init__(name, tracker, *args, **kwargs)
self._checkpoints = checkpoints
self._sequence = deque([], maxlen=len(checkpoints))
@property
def current_value(self):
return self.count
def insert(self, checkpoint):
if checkpoint in self._checkpoints:
self._sequence.append(checkpoint)
logging.debug('{cp} added to sequence "{s.name}"'.format(cp=checkpoint, s=self))
if len(self._sequence) == len(self._checkpoints) and all(imap(eq, self._sequence, self._checkpoints)):
# Sequence is complete. Increment the database
dt = datetime.datetime.now().strftime(self.time_fmt)
count = self.count + 1
try:
with Table.lock:
if self.get_number_of_rows() >= self.max_rows:
self.delete_first()
insert_row(self.tracker.dbcon, self.name, self.tracker.uuid, count, dt)
except sqlite3.Error as e:
logger.error(e)
else:
self.count = count
self._sequence.clear()
logging.debug("Sequence {s.name} complete, count set to {s.count}".format(s=self))
else:
raise InvalidCheckpointError(checkpoint)
def set(self, checkpoint):
self.insert(checkpoint)
@property
def checkpoint(self):
try:
return self._sequence[-1]
except IndexError:
return None
@checkpoint.setter
def checkpoint(self, checkpoint):
self.insert(checkpoint)
@property
def sequence(self):
return tuple(self._sequence)
@property
def checkpoints(self):
"""
return a list of checkpoints (copy)
"""
return self._checkpoints[:]
def remove_checkpoint(self):
"""
Remove the last check point.
"""
if len(self._sequence):
return self._sequence.pop()
def clear_checkpoints(self):
"""
Clear all completed check points.
"""
self._sequence.clear()
def advance_to_checkpoint(self, checkpoint):
"""
Advance to the specified checkpoint, passing all preceding checkpoints including the specified checkpoint.
"""
if checkpoint in self._checkpoints:
for cp in self._checkpoints:
self.insert(cp)
if cp == checkpoint:
return cp
else:
raise InvalidCheckpointError(checkpoint)
| {
"repo_name": "lobocv/anonymoususage",
"path": "anonymoususage/tables/sequence.py",
"copies": "1",
"size": "3465",
"license": "mit",
"hash": -1988268276325119500,
"line_mean": 30.7889908257,
"line_max": 114,
"alpha_frac": 0.5847041847,
"autogenerated": false,
"ratio": 4.62,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.57047041847,
"avg_score": null,
"num_lines": null
} |
__author__ = 'calvin'
import datetime
import sqlite3
import logging
from .table import Table
from ..tools import insert_row
logger = logging.getLogger('AnonymousUsage')
NO_STATE = type('NO_STATE', (object, ), {})
class State(Table):
"""
Tracks the state of a certain attribute over time.
Usage:
tracker.track_state(state_name)
tracker[state_name] = 'ON'
tracker[state_name] = 'OFF'
"""
table_args = ("UUID", "INTEGER"), ("Count", "REAL"), ("State", "TEXT"), ("Time", "TEXT")
def __init__(self, name, tracker, initial_state=NO_STATE, keep_redundant=False, *args, **kwargs):
super(State, self).__init__(name, tracker, *args, **kwargs)
self.keep_redundant = keep_redundant
if self.count == 0:
# This is a new table
self._state = initial_state
if initial_state is not NO_STATE:
# If the initial state was not NO_STATE, then add it to the database
self.insert(self._state)
else:
self._state = self.get_last(1)[0]['State']
@property
def current_value(self):
return self.state
@property
def state(self):
return self._state
@state.setter
def state(self, value):
self.insert(value)
def set(self, value):
self.state = value
return self.state
def insert(self, value):
if not self.keep_redundant and value == self._state:
# Don't add redundant information, ie if the state value is the same as the previous do not insert a new row
return
dt = datetime.datetime.now().strftime(self.time_fmt)
try:
with Table.lock:
if self.get_number_of_rows() >= self.max_rows:
self.delete_first()
insert_row(self.tracker.dbcon, self.name, self.tracker.uuid, self.count + 1, str(value), dt)
except sqlite3.Error as e:
logger.error(e)
else:
self._state = value
self.count += 1
logger.debug("{name} state set to {value}".format(name=self.name, value=value))
return self
def __repr__(self):
state = self._state if self._state is not NO_STATE else 'No State'
return "State ({s.name}): {state}".format(s=self, state=state)
| {
"repo_name": "lobocv/anonymoususage",
"path": "anonymoususage/tables/state.py",
"copies": "1",
"size": "2343",
"license": "mit",
"hash": 8459950217837209000,
"line_mean": 28.6582278481,
"line_max": 120,
"alpha_frac": 0.5770379855,
"autogenerated": false,
"ratio": 3.8472906403940885,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4924328625894089,
"avg_score": null,
"num_lines": null
} |
__author__ = 'calvin'
import inspect
import logging
import re
import traceback
from types import FunctionType, MethodType, ModuleType, BuiltinMethodType, BuiltinFunctionType
try:
import numpy as np
_NUMPY_INSTALLED = True
except ImportError:
_NUMPY_INSTALLED = False
obj_ref_regex = re.compile("[A-z]+[0-9]*\.(?:[A-z]+[0-9]*\.?)+(?!\')(?:\[(?:\'|\").*(?:\'|\")\])*(?:\.[A-z]+[0-9]*)*")
dict_lookup_regex = re.compile("(?<=\[)(?:\'|\")([^\'\"]*)(?:\'|\")(?=\])")
_repr = repr
def repr(object):
try:
return _repr(object)
except Exception as e:
logging.error(e)
return 'String Representation not found'
def string_variable_lookup(tb, s):
"""
Look up the value of an object in a traceback by a dot-lookup string.
ie. "self.crashreporter.application_name"
Returns ValueError if value was not found in the scope of the traceback.
:param tb: traceback
:param s: lookup string
:return: value of the
"""
refs = []
dot_refs = s.split('.')
DOT_LOOKUP = 0
DICT_LOOKUP = 1
for ii, ref in enumerate(dot_refs):
dict_refs = dict_lookup_regex.findall(ref)
if dict_refs:
bracket = ref.index('[')
refs.append((DOT_LOOKUP, ref[:bracket]))
refs.extend([(DICT_LOOKUP, t) for t in dict_refs])
else:
refs.append((DOT_LOOKUP, ref))
scope = tb.tb_frame.f_locals.get(refs[0][1], ValueError)
if scope is ValueError:
return scope
for lookup, ref in refs[1:]:
try:
if lookup == DOT_LOOKUP:
scope = getattr(scope, ref, ValueError)
else:
scope = scope.get(ref, ValueError)
except Exception as e:
logging.error(e)
scope = ValueError
if scope is ValueError:
return scope
elif isinstance(scope, (FunctionType, MethodType, ModuleType, BuiltinMethodType, BuiltinFunctionType)):
return ValueError
return scope
def get_object_references(tb, source, max_string_length=1000):
"""
Find the values of referenced attributes of objects within the traceback scope.
:param tb: traceback
:return: list of tuples containing (variable name, value)
"""
global obj_ref_regex
referenced_attr = set()
for line in source.split('\n'):
referenced_attr.update(set(re.findall(obj_ref_regex, line)))
referenced_attr = sorted(referenced_attr)
info = []
for attr in referenced_attr:
v = string_variable_lookup(tb, attr)
if v is not ValueError:
ref_string = format_reference(v, max_string_length=max_string_length)
info.append((attr, ref_string))
return info
def get_local_references(tb, max_string_length=1000):
"""
Find the values of the local variables within the traceback scope.
:param tb: traceback
:return: list of tuples containing (variable name, value)
"""
if 'self' in tb.tb_frame.f_locals:
_locals = [('self', repr(tb.tb_frame.f_locals['self']))]
else:
_locals = []
for k, v in tb.tb_frame.f_locals.iteritems():
if k == 'self':
continue
try:
vstr = format_reference(v, max_string_length=max_string_length)
_locals.append((k, vstr))
except TypeError:
pass
return _locals
def format_reference(ref, max_string_length=1000):
"""
Converts an object / value into a string representation to pass along in the payload
:param ref: object or value
:param max_string_length: maximum number of characters to represent the object
:return:
"""
_pass = lambda *args: None
_numpy_info = ('dtype', 'shape', 'size', 'min', 'max')
additionals = []
if _NUMPY_INSTALLED and isinstance(ref, np.ndarray):
# Check for numpy info
for np_attr in _numpy_info:
np_value = getattr(ref, np_attr, None)
if np_value is not None:
if inspect.isbuiltin(np_value):
try:
np_value = np_value()
except Exception as e:
logging.error(e)
continue
additionals.append((np_attr, np_value))
elif isinstance(ref, (list, tuple, set, dict)):
# Check for length of reference
length = getattr(ref, '__len__', _pass)()
if length is not None:
additionals.append(('length', length))
if additionals:
vstr = ', '.join(['%s: %s' % a for a in additionals] + [repr(ref)])
else:
vstr = repr(ref)
if len(vstr) > max_string_length:
vstr = vstr[:max_string_length] + ' ...'
return vstr
def analyze_traceback(tb, inspection_level=None, limit=None):
"""
Extract trace back information into a list of dictionaries.
:param tb: traceback
:return: list of dicts containing filepath, line, module, code, traceback level and source code for tracebacks
"""
info = []
tb_level = tb
extracted_tb = traceback.extract_tb(tb, limit=limit)
for ii, (filepath, line, module, code) in enumerate(extracted_tb):
func_source, func_lineno = inspect.getsourcelines(tb_level.tb_frame)
d = {"File": filepath,
"Error Line Number": line,
"Module": module,
"Error Line": code,
"Module Line Number": func_lineno,
"Custom Inspection": {},
"Source Code": ''}
if inspection_level is None or len(extracted_tb) - ii <= inspection_level:
# Perform advanced inspection on the last `inspection_level` tracebacks.
d['Source Code'] = ''.join(func_source)
d['Local Variables'] = get_local_references(tb_level)
d['Object Variables'] = get_object_references(tb_level, d['Source Code'])
tb_level = getattr(tb_level, 'tb_next', None)
info.append(d)
return info | {
"repo_name": "lobocv/crashreporter",
"path": "crashreporter/tools.py",
"copies": "1",
"size": "5988",
"license": "mit",
"hash": -361811620085533060,
"line_mean": 31.7267759563,
"line_max": 118,
"alpha_frac": 0.5868403474,
"autogenerated": false,
"ratio": 3.880751782242385,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9958242631801275,
"avg_score": 0.0018698995682221175,
"num_lines": 183
} |
__author__ = 'calvin'
import inspect
import timeit
from types import FunctionType
from pyperform import StringIO
from .tools import *
class Benchmark(object):
enable = True
def __init__(self, setup=None, classname=None, timeit_repeat=3, timeit_number=1000, largs=None, kwargs=None):
self.setup = setup
self.timeit_repeat = timeit_repeat
self.timeit_number = timeit_number
self.classname = classname
self.group = None
self.is_class_method = None
if largs is not None and type(largs) is tuple:
self._args = largs[:]
else:
self._args = ()
self._kwargs = kwargs.copy() if kwargs is not None else {}
self.setup_src = ''
self.callable = None
self._is_function = None
self.log = StringIO.StringIO()
self.time_average_seconds = None
def __call__(self, caller):
if self.enable:
self.callable = caller
self._is_function = isinstance(caller, FunctionType)
fp = inspect.getfile(caller)
imports = get_tagged_imports(fp)
func_src = remove_decorators(globalize_indentation(inspect.getsource(caller)))
# Determine if the function is bound. If it is, keep track of it so we can run the benchmark after the class
# benchmark has been initialized.
src_lines = func_src.splitlines()
self.is_class_method = 'def' in src_lines[0] and 'self' in src_lines[0]
if self.is_class_method and self.classname:
from .benchmarkedclass import BenchmarkedClass
try:
BenchmarkedClass.bound_functions[self.classname].append(self)
except KeyError:
BenchmarkedClass.bound_functions[self.classname] = [self]
if callable(self.setup):
setup_func = inspect.getsource(self.setup)
setup_src = globalize_indentation(setup_func[setup_func.index('\n') + 1:])
elif type(self.setup) == str:
setup_src = self.setup
else:
setup_src = ''
src = '\n'.join([imports, setup_src, func_src])
self.setup_src = src + '\n'
self.log.write(self.setup_src)
self.stmt = generate_call_statement(caller, self.is_class_method, *self._args, **self._kwargs)
return caller
def write_log(self, fs=None):
"""
Write the results of the benchmark to a log file.
:param fs: file-like object.
"""
log = StringIO.StringIO()
log.write(self.setup_src)
# If the function is not bound, write the test score to the log
if not self.is_class_method:
time_avg = convert_time_units(self.time_average_seconds)
log.write("\nAverage time: {0} \n".format(time_avg))
if fs:
with open(fs, 'w') as _f:
_f.write(log.getvalue())
def run_timeit(self, stmt, setup):
""" Create the function call statement as a string used for timeit. """
_timer = timeit.Timer(stmt=stmt, setup=setup)
trials = _timer.repeat(self.timeit_repeat, self.timeit_number)
self.time_average_seconds = sum(trials) / len(trials) / self.timeit_number
# Convert into reasonable time units
time_avg = convert_time_units(self.time_average_seconds)
return time_avg
| {
"repo_name": "lobocv/pyperform",
"path": "pyperform/benchmark.py",
"copies": "1",
"size": "3456",
"license": "mit",
"hash": 6642760858431099000,
"line_mean": 36.5652173913,
"line_max": 120,
"alpha_frac": 0.5885416667,
"autogenerated": false,
"ratio": 3.9815668202764978,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0009185683220879332,
"num_lines": 92
} |
__author__ = 'calvin'
import logging
from anonymoususage.tools import *
from anonymoususage.exceptions import *
from threading import RLock
logger = logging.getLogger('AnonymousUsage')
class Table(object):
time_fmt = "%d/%m/%Y %H:%M:%S"
table_args = ("UUID", "INTEGER"), ("Count", "REAL"), ("Time", "TEXT")
lock = RLock()
def __init__(self, name, tracker, max_rows):
if ' ' in name:
raise TableNameError(name)
self.max_rows = max_rows
self.tracker = tracker
self.name = name
self.number_of_rows = self.get_number_of_rows()
last = self.get_last()
if last:
self.count = last[0]['Count']
else:
self.count = 0
logger.debug("{s.name}: {s.number_of_rows} table entries found".format(s=self))
if not check_table_exists(self.tracker.dbcon, name):
create_table(self.tracker.dbcon, name, self.table_args)
def get_rows(self):
"""
Attempt to load the statistic from the database.
:return: Number of entries for the statistic
"""
rows = []
if check_table_exists(self.tracker.dbcon_master, self.name):
rows.extend(get_rows(self.tracker.dbcon_master, self.name))
if self.tracker.dbcon_part and check_table_exists(self.tracker.dbcon_part, self.name):
rows.extend(get_rows(self.tracker.dbcon_part, self.name))
return rows
def get_number_of_rows(self):
n_rows = get_number_of_rows(self.tracker.dbcon_master, self.name)
if self.tracker.dbcon_part:
n_rows += get_number_of_rows(self.tracker.dbcon_part, self.name)
return n_rows
def insert(self, value):
"""
Contains the functionally of assigning a value to a statistic in the AnonymousUsageTracker. Usually this will
involve inserting some data into the database table for the statistic.
:param value: assignment value to the tracker, ie. `tracker[stat_name] = some_value`
"""
pass
def get_first(self, n=1):
"""
Retrieve the first n rows from the table
:param n: number of rows to return
:return: list of rows
"""
rows = []
# Get values from the partial db first
if self.tracker.dbcon_master and check_table_exists(self.tracker.dbcon_master, self.name):
rows.extend(get_first_row(self.tracker.dbcon_master, self.name, n))
# Then add rows from the master if required
if len(rows) < n and self.tracker.dbcon_part and check_table_exists(self.tracker.dbcon_part, self.name):
rows.extend(get_first_row(self.tracker.dbcon_part, self.name, n))
return rows[:n]
def get_last(self, n=1):
"""
Retrieve the last n rows from the table
:param n: number of rows to return
:return: list of rows
"""
rows = []
# Get values from the partial db first
if self.tracker.dbcon_part and check_table_exists(self.tracker.dbcon_part, self.name):
rows.extend(get_last_row(self.tracker.dbcon_part, self.name, n))
# Then add rows from the master if required
if len(rows) < n and check_table_exists(self.tracker.dbcon_master, self.name):
rows.extend(get_last_row(self.tracker.dbcon_master, self.name, n))
return rows[-n:]
def delete_last(self):
last = self.get_last()
if last:
last = last[0]
db = self.tracker.dbcon_part if self.tracker.dbcon_part else self.tracker.dbcon_master
delete_row(db, self.name, "Time", last['Time'])
self.count -= 1
def delete_first(self):
for db in (self.tracker.dbcon_master, self.tracker.dbcon_part):
if db:
rowid = get_first_row(db, self.name)
if rowid:
delete_row(db, self.name, "Time", rowid[0]['Time'])
break
def get_count(self):
row = self.get_last()
if row:
return row[0]['Count']
else:
return 0
| {
"repo_name": "lobocv/anonymoususage",
"path": "anonymoususage/tables/table.py",
"copies": "1",
"size": "4124",
"license": "mit",
"hash": -5679674408484384000,
"line_mean": 35.4955752212,
"line_max": 117,
"alpha_frac": 0.5935984481,
"autogenerated": false,
"ratio": 3.6657777777777776,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4759376225877778,
"avg_score": null,
"num_lines": null
} |
__author__ = 'calvin'
import logging
from .benchmark import Benchmark
from .benchmarkedfunction import BenchmarkedFunction
from .tools import convert_time_units
from .exceptions import ValidationError
class BenchmarkedClass(Benchmark):
bound_functions = {}
def __init__(self, setup=None, largs=None, kwargs=None, **kw):
super(BenchmarkedClass, self).__init__(setup, largs=largs, kwargs=kwargs, **kw)
def __call__(self, cls):
if self.enable:
super(BenchmarkedClass, self).__call__(cls)
setup_src = self.setup_src
setup_src += '\ninstance = {}'.format(self.stmt)
groups = set()
for p in self.bound_functions[cls.__name__]:
stmt = p.stmt
p.run_timeit(stmt, setup_src)
p.write_log()
if isinstance(p, BenchmarkedFunction):
print("{} \t {}".format(p.callable.__name__, convert_time_units(p.time_average_seconds)))
if hasattr(p, 'result_validation') and p.result_validation and p.group not in groups:
self.validate(p.groups[p.group])
groups.add(p.group)
return cls
def validate(self, benchmarks):
"""
Execute the code once to get it's results (to be used in function validation). Compare the result to the
first function in the group.
:param benchmarks: list of benchmarks to validate.
"""
class_code = self.setup_src
instance_creation = '\ninstance = {}'.format(self.stmt)
for i, benchmark in enumerate(benchmarks):
if not benchmark.result_validation:
break
validation_code = class_code + instance_creation + '\nvalidation_result = ' + benchmark.stmt
validation_scope = {}
exec(validation_code, validation_scope)
# Store the result in the first function in the group.
if i == 0:
compare_against_function = benchmarks[0].callable.__name__
compare_against_result = validation_scope['validation_result']
logging.info('PyPerform: Validating group "{b.group}" against method '
'"{b.classname}.{b.callable.__name__}"'.format(b=benchmarks[0]))
else:
if compare_against_result == validation_scope['validation_result']:
logging.info('PyPerform: Validating {b.classname}.{b.callable.__name__}......PASSED!'
.format(b=benchmark))
else:
error = 'Results of functions {0} and {1} are not equivalent.\n{0}:\t {2}\n{1}:\t{3}'
raise ValidationError(error.format(compare_against_function, benchmark.callable.__name__,
compare_against_result, validation_scope['validation_result']))
| {
"repo_name": "lobocv/pyperform",
"path": "pyperform/benchmarkedclass.py",
"copies": "1",
"size": "2917",
"license": "mit",
"hash": -5180241561821178000,
"line_mean": 44.578125,
"line_max": 112,
"alpha_frac": 0.5735344532,
"autogenerated": false,
"ratio": 4.393072289156627,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5466606742356627,
"avg_score": null,
"num_lines": null
} |
__author__ = 'calvin'
import re
import sys
from math import log10
if sys.version[0] == '3':
pass
else:
range = xrange
classdef_regex = re.compile(r"\S*def .*#!|class .*#!")
tagged_line_regex = re.compile(r".*#!")
def convert_time_units(t):
""" Convert time in seconds into reasonable time units. """
if t == 0:
return '0 s'
order = log10(t)
if -9 < order < -6:
time_units = 'ns'
factor = 1000000000
elif -6 <= order < -3:
time_units = 'us'
factor = 1000000
elif -3 <= order < -1:
time_units = 'ms'
factor = 1000.
elif -1 <= order:
time_units = 's'
factor = 1
return "{:.3f} {}".format(factor * t, time_units)
def globalize_indentation(src):
""" Strip the indentation level so the code runs in the global scope. """
lines = src.splitlines()
indent = len(lines[0]) - len(lines[0].strip(' '))
func_src = ''
for ii, l in enumerate(src.splitlines()):
line = l[indent:]
func_src += line + '\n'
return func_src
def remove_decorators(src):
""" Remove decorators from the source code """
src = src.strip()
src_lines = src.splitlines()
multi_line = False
n_deleted = 0
for n in range(len(src_lines)):
line = src_lines[n - n_deleted].strip()
if (line.startswith('@') and 'Benchmark' in line) or multi_line:
del src_lines[n - n_deleted]
n_deleted += 1
if line.endswith(')'):
multi_line = False
else:
multi_line = True
setup_src = '\n'.join(src_lines)
return setup_src
def get_tagged_imports(fp):
imports = []
inside_def = False
def_lines = []
def_indent = 0
with open(fp, 'r') as f:
lastLine = f.readline()
for line in f:
tagged_class_or_def = re.findall(classdef_regex, lastLine)
tagged_line = re.findall(tagged_line_regex, lastLine)
# Find the indentation level of the function/class definition and capture all source code lines
# until we get a line that is the same indentation level (end of function/class definition).
if tagged_class_or_def or inside_def:
if tagged_class_or_def and def_lines:
imports.append(''.join(def_lines))
def_lines = []
inside_def = False
if inside_def:
# For lines within the definition
indent = len(lastLine) - len(lastLine.lstrip(' '))
if indent == def_indent and lastLine != '\n':
imports.append(''.join(def_lines))
def_lines = []
inside_def = False
def_indent = 0
if tagged_line:
imports.append(lastLine)
else:
if lastLine != '\n':
def_lines.append(lastLine)
else:
# For the definition line
inside_def = True
def_indent = len(lastLine) - len(lastLine.lstrip(' '))
def_lines.append(lastLine)
elif tagged_line:
imports.append(lastLine)
lastLine = line
# Examine the last line
tagged_line = re.findall(tagged_line_regex, lastLine)
if inside_def:
def_lines.append(line)
imports.append(''.join(def_lines))
elif tagged_line:
imports.append(line)
src = '\n'.join(imports) + '\n'
return src
def generate_call_statement(func, is_class_method, *args, **kwargs):
# Create the call statement
if is_class_method:
stmt = 'instance.' + func.__name__ + '('
else:
stmt = func.__name__ + '('
for arg in args:
stmt += arg.__repr__() + ', '
for kw, val in kwargs.items():
stmt += '{0}={1}, '.format(kw, val.__repr__())
stmt = stmt.strip(', ')
stmt += ')'
return stmt
def walk_tree(start, attr):
"""
Recursively walk through a tree relationship. This iterates a tree in a top-down approach,
fully reaching the end of a lineage before moving onto the next sibling of that generation.
"""
path = [start]
for child in path:
yield child
idx = path.index(child)
for grandchild in reversed(getattr(child, attr)):
path.insert(idx + 1, grandchild) | {
"repo_name": "lobocv/pyperform",
"path": "pyperform/tools.py",
"copies": "1",
"size": "4519",
"license": "mit",
"hash": -7149361426149947000,
"line_mean": 29.7482993197,
"line_max": 107,
"alpha_frac": 0.5264439035,
"autogenerated": false,
"ratio": 3.960560911481157,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4987004814981157,
"avg_score": null,
"num_lines": null
} |
__author__ = 'calvin'
import requests
import json
import logging
HQ_DEFAULT_TIMEOUT = 10
SMTP_DEFAULT_TIMEOUT = 5
def upload_report(server, payload, timeout=HQ_DEFAULT_TIMEOUT):
"""
Upload a report to the server.
:param payload: Dictionary (JSON serializable) of crash data.
:return: server response
"""
try:
data = json.dumps(payload)
r = requests.post(server + '/reports/upload', data=data, timeout=timeout)
except Exception as e:
logging.error(e)
return False
return r
def upload_many_reports(server, payloads, timeout=HQ_DEFAULT_TIMEOUT):
try:
data = json.dumps(payloads)
r = requests.post(server + '/reports/upload_many', data=data, timeout=timeout)
except Exception as e:
logging.error(e)
return False
return r
def delete_report(server, report_number, timeout=HQ_DEFAULT_TIMEOUT):
"""
Delete a specific crash report from the server.
:param report_number: Report Number
:return: server response
"""
try:
r = requests.post(server + "/reports/delete/%d" % report_number, timeout=timeout)
except Exception as e:
logging.error(e)
return False
return r
| {
"repo_name": "lobocv/crashreporter",
"path": "crashreporter/api.py",
"copies": "1",
"size": "1228",
"license": "mit",
"hash": -7973952015720329000,
"line_mean": 23.56,
"line_max": 89,
"alpha_frac": 0.6506514658,
"autogenerated": false,
"ratio": 3.8984126984126983,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5049064164212698,
"avg_score": null,
"num_lines": null
} |
__author__ = 'calvin'
import time
import json
import socket
from threading import Thread
def run(port, cmds):
HOST = '127.0.0.1'
DISCOVER_PORT = 1213
def communicate(sock, cmd):
"""
Send a command and print it's response
:param cmd:
:return:
"""
if not isinstance(cmd, dict):
cmd()
else:
sock.send(json.dumps(cmd))
time.sleep(1)
print sock.recv(1024)
# Connect to the discoverer socket
print 'Opening discoverer port'
discover_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
discover_socket.connect((HOST, DISCOVER_PORT))
# Ask the process to create a new socket for communication
print 'Requesting new socket'
response = communicate(discover_socket, {'command': 'ACT', 'trackable': '', 'action': 'new_connection', 'args': (port,)})
print response
print 'Closing discoverer port'
# Close the discoverer socket so that another process can connect to it
discover_socket.shutdown(socket.SHUT_RDWR)
discover_socket.close()
print 'Connecting to new socket'
# Connect to the new socket that we requested have made
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((HOST, port))
# Run the commands
for c in cmds:
communicate(sock, c)
ports =[1214, 1215, 1216]
cmds = [
[{'command': 'ACT', 'trackable': '', 'action': 'track_statistic', 'args': ('grids',)},
{'command': 'GET', 'trackable': 'grids', 'attribute': 'count'},
{'command': 'SET', 'trackable': 'grids', 'attribute': 'count', 'value': 0},
{'command': 'ACT', 'trackable': 'grids', 'action': 'increment', 'args': (1,)},
{'command': 'ACT', 'trackable': 'grids', 'action': 'increment', 'args': (1,)},
{'command': 'ACT', 'trackable': 'grids', 'action': 'increment', 'args': (1,)},
{'command': 'ACT', 'trackable': 'grids', 'action': 'increment', 'args': (1,)},
{'command': 'ACT', 'trackable': 'grids', 'action': 'decrement', 'args': (1,)},
{'command': 'ACT', 'trackable': 'grids', 'action': 'decrement', 'args': (1,)},
{'command': 'GET', 'trackable': 'grids', 'attribute': 'count'}],
[{'command': 'ACT', 'trackable': '', 'action': 'track_statistic', 'args': ('lines',)},
{'command': 'GET', 'trackable': 'lines', 'attribute': 'count'},
{'command': 'SET', 'trackable': 'lines', 'attribute': 'count', 'value': 0},
{'command': 'ACT', 'trackable': 'lines', 'action': 'increment', 'args': (1,)},
{'command': 'ACT', 'trackable': 'lines', 'action': 'increment', 'args': (1,)},
{'command': 'ACT', 'trackable': 'lines', 'action': 'increment', 'args': (1,)},
{'command': 'ACT', 'trackable': 'lines', 'action': 'increment', 'args': (1,)},
{'command': 'ACT', 'trackable': 'lines', 'action': 'decrement', 'args': (1,)},
{'command': 'ACT', 'trackable': 'lines', 'action': 'decrement', 'args': (1,)},
{'command': 'GET', 'trackable': 'lines', 'attribute': 'count'}],
[{'command': 'ACT', 'trackable': '', 'action': 'track_statistic', 'args': ('screenshots',)},
{'command': 'GET', 'trackable': 'screenshots', 'attribute': 'count'},
{'command': 'SET', 'trackable': 'screenshots', 'attribute': 'count', 'value': 0},
{'command': 'ACT', 'trackable': 'screenshots', 'action': 'increment', 'args': (1,)},
{'command': 'ACT', 'trackable': 'screenshots', 'action': 'increment', 'args': (1,)},
{'command': 'ACT', 'trackable': 'screenshots', 'action': 'increment', 'args': (1,)},
{'command': 'ACT', 'trackable': 'screenshots', 'action': 'increment', 'args': (1,)},
{'command': 'ACT', 'trackable': 'screenshots', 'action': 'decrement', 'args': (1,)},
{'command': 'ACT', 'trackable': 'screenshots', 'action': 'decrement', 'args': (1,)},
{'command': 'GET', 'trackable': 'screenshots', 'attribute': 'count'}]
]
for i in xrange(3):
t = Thread(target=run, args=(ports[i], cmds[i]))
t.start()
time.sleep(2) | {
"repo_name": "lobocv/anonymoususage",
"path": "test_multi_ipc.py",
"copies": "1",
"size": "4173",
"license": "mit",
"hash": -2274354280981620000,
"line_mean": 45.8988764045,
"line_max": 125,
"alpha_frac": 0.5530793194,
"autogenerated": false,
"ratio": 3.4920502092050207,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4545129528605021,
"avg_score": null,
"num_lines": null
} |
__author__ = 'calvin'
class AnonymousUsageError(Exception):
"""
Base class for errors in this module
"""
pass
@property
def message(self):
return str(self)
class IntervalError(AnonymousUsageError):
def __init__(self, value):
self.value = value
def __str__(self):
return 'Interval must be a datetime.timedelta object. Received %s.' % self.value
class TableNameError(AnonymousUsageError):
def __init__(self, name):
self.name = name
def __str__(self):
return 'Table name "{}" cannot contain spaces. Consider "{}" instead.'.format(self.name,
self.name.replace(' ', '_'))
class TableConflictError(AnonymousUsageError):
def __init__(self, name):
self.name = name
def __str__(self):
return 'Table name "{}" already exists in this usage tracker.'.format(self.name)
class InvalidCheckpointError(AnonymousUsageError):
def __init__(self, checkpoint):
self.checkpoint = checkpoint
def __str__(self):
return 'Checkpoint "{}" assignment is not in the valid list of checkpoints'.format(self.checkpoint) | {
"repo_name": "lobocv/anonymoususage",
"path": "anonymoususage/exceptions.py",
"copies": "1",
"size": "1220",
"license": "mit",
"hash": 6863221816800177000,
"line_mean": 23.9183673469,
"line_max": 114,
"alpha_frac": 0.593442623,
"autogenerated": false,
"ratio": 4.518518518518518,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0012279271310578527,
"num_lines": 49
} |
__author__ = 'calvin'
class Property(object):
def __init__(self, default_value, **additionals):
self.instances = {}
self.default_value = default_value
self._additionals = additionals
def __get__(self, obj, objtype=None):
return obj.event_dispatcher_properties[self.name]['value']
def __set__(self, obj, value):
if value != obj.event_dispatcher_properties[self.name]['value']:
prop = obj.event_dispatcher_properties[self.name]
prop['value'] = value
for callback in prop['callbacks']:
if callback(obj, value):
break
def __delete__(self, obj):
raise AttributeError("Cannot delete properties")
def register(self, instance, property_name, default_value):
info = self._additionals.copy()
info.update({'property': self, 'value': default_value, 'name': property_name, 'callbacks': []})
# Create the instances dictionary at registration so that each class has it's own instance of it.
self.instances[instance] = info
instance.event_dispatcher_properties[property_name] = info
def get_dispatcher_property(self, property_name):
return self.instances[self][property_name]
| {
"repo_name": "lobocv/eventdispatcher",
"path": "eventdispatcher/property.py",
"copies": "1",
"size": "1262",
"license": "mit",
"hash": 5882357895837380000,
"line_mean": 35.0571428571,
"line_max": 105,
"alpha_frac": 0.6259904913,
"autogenerated": false,
"ratio": 4.277966101694915,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.02911569562512959,
"num_lines": 35
} |
__author__ = 'calvin'
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
import flask.ext.login as flask_login
# Mock database / persistence layer
app = Flask(__name__)
app.config.from_object('crashreporter_hq.config')
login_manager = flask_login.LoginManager()
login_manager.init_app(app)
login_manager.login_view = "login"
db = SQLAlchemy(app)
# @app.teardown_appcontext
# def shutdown_session(exception=None):
# db_session.remove()
__PROFILER_ENABLED__ = False
if __PROFILER_ENABLED__:
from sqlalchemy import event
from sqlalchemy.engine import Engine
import time
import logging
logging.basicConfig()
logger = logging.getLogger("myapp.sqltime")
logger.setLevel(logging.DEBUG)
@event.listens_for(Engine, "before_cursor_execute")
def before_cursor_execute(conn, cursor, statement,
parameters, context, executemany):
context._query_start_time = time.time()
logger.debug("Start Query:\n%s" % statement)
# Modification for StackOverflow answer:
# Show parameters, which might be too verbose, depending on usage..
logger.debug("Parameters:\n%r" % (parameters,))
@event.listens_for(Engine, "after_cursor_execute")
def after_cursor_execute(conn, cursor, statement,
parameters, context, executemany):
total = time.time() - context._query_start_time
logger.debug("Query Complete!")
# Modification for StackOverflow: times in milliseconds
logger.debug("Total Time: %.02fms" % (total * 1000))
import views, models | {
"repo_name": "lobocv/crashreporter_hq",
"path": "crashreporter_hq/__init__.py",
"copies": "1",
"size": "1616",
"license": "mit",
"hash": -5850590493590650000,
"line_mean": 28.4,
"line_max": 75,
"alpha_frac": 0.6757425743,
"autogenerated": false,
"ratio": 3.8846153846153846,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5060357958915385,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Calvin'
"""
This example demonstrates how pyperform can be used to benchmark class functions. In this example we use
ComparisonBenchmarks to compare the speed of two methods which calculates a person's savings.
** Note that when benchmarking class methods, the classname argument to ComparisonBenchmark must be provided.
The class, Person, is initialized with several required parameters (name, age, monthly_income) and some optional
parameters (height). The two methods to calculate savings both have a required argument (retirement_age) and an optional
argument of monthly spending (monthly_spending).
"""
from pyperform import BenchmarkedClass, ComparisonBenchmark, BenchmarkedFunction
@BenchmarkedClass(largs=('Calvin', 24, 1000.,), kwargs={'height': '165 cm'})
class Person(object):
def __init__(self, name, age, monthly_income, height=None, *args, **kwargs):
self.name = name
self.age = age
self.height = height
self.monthly_income = monthly_income
@ComparisonBenchmark('Calculate Savings', classname="Person", timeit_number=100, validation=True, largs=(55,),
kwargs={'monthly_spending': 500})
def calculate_savings_method1(self, retirement_age, monthly_spending=0):
savings = 0
for y in range(self.age, retirement_age):
for m in range(12):
savings += self.monthly_income - monthly_spending
return savings
@ComparisonBenchmark('Calculate Savings', classname="Person", timeit_number=100, validation=True, largs=(55,),
kwargs={'monthly_spending': 500})
def calculate_savings_method2(self, retirement_age, monthly_spending=0):
yearly_income = 12 * (self.monthly_income - monthly_spending)
n_years = retirement_age - self.age
if n_years > 0:
return yearly_income * n_years
@BenchmarkedFunction(classname="Person", timeit_number=100, largs=(55,), kwargs={'monthly_spending': 500})
def same_as_method_2(self, retirement_age, monthly_spending=0):
yearly_income = 12 * (self.monthly_income - monthly_spending)
n_years = retirement_age - self.age
if n_years > 0:
return yearly_income * n_years
# Can print the summary to file or if ComparisonBenchmark.summarize() is not given an fs parameter, it will print to
# console.
report_file = open('report.txt', 'w')
ComparisonBenchmark.summarize(group='Calculate Savings', fs=report_file) | {
"repo_name": "lobocv/pyperform",
"path": "examples/benchmark_class_functions.py",
"copies": "1",
"size": "2488",
"license": "mit",
"hash": 341621671546112800,
"line_mean": 45.9622641509,
"line_max": 120,
"alpha_frac": 0.6921221865,
"autogenerated": false,
"ratio": 3.7754172989377843,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4967539485437784,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Calvin'
try:
from builtins import range
except ImportError:
range = xrange
from pyperform import *
class SomeClass(object): #!
def __init__(self, n):
self.n = n
self.count = 0
if n > 0:
self.a = SomeClass(n-1)
def func(self):
self.count += 1
return sum(range(10))
@BenchmarkedClass()
class MyClass(object):
def __init__(self):
self.obj = SomeClass(5) # setup an object with some nested lookup
self.g = self.generator() # setup the generator in advance
@ComparisonBenchmark('gen', classname='MyClass', validation=True)
def call_generator(self):
for i in self.g: # Call the generator which calls the function 100 times (like not_generator)
pass
return self.obj.a.a.a.a.count
def generator(self):
func = self.obj.a.a.a.a.func
for i in range(100):
func()
yield i
@ComparisonBenchmark('gen', classname='MyClass', validation=True)
def not_generator(self):
func = self.obj.a.a.a.a.func
for i in range(100):
func()
return self.obj.a.a.a.a.count
if __name__ == '__main__':
# c = MyClass()
# c.call_generator()
with open('report.txt', 'w') as _f:
ComparisonBenchmark.summarize('gen', _f, include_source=1)
| {
"repo_name": "lobocv/pyperform",
"path": "examples/generator_state_overhead.py",
"copies": "1",
"size": "1394",
"license": "mit",
"hash": -934522311877461200,
"line_mean": 23.8928571429,
"line_max": 116,
"alpha_frac": 0.5645624103,
"autogenerated": false,
"ratio": 3.649214659685864,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4713777069985864,
"avg_score": null,
"num_lines": null
} |
import sys
import serial
import time
import math
class Navigate:
port1 = "/dev/ttyPCH1"
port2 = "/dev/ttyPCH2"
baudRate = 38400
ser1 = None
ser2 = None
def __init__(self):
self.ser1 = serial.Serial(self.port1, self.baudRate)
self.ser2 = serial.Serial(self.port2, self.baudRate)
def serialSend1(self, send):
for i in send:
self.ser1.write(chr(i))
def serialSend2(self, send):
for i in send:
self.ser2.write(chr(i))
def serialRead1(self, read):
self.serialSend1(read)
data=[]
for i in range(6):
data.append(self.ser1.read())
return data
def serialRead2(self, read):
self.serialSend2(read)
data=[]
for i in range(6):
data.append(self.ser2.read())
return data
def threshold(self, speed):
if (speed < 0):
speed = 0
elif (speed > 127):
speed = 127
return int(speed)
def driveM1(self, speed):
address = 0x81
command = 6
dataBytes = self.threshold(speed)
checksum = (address+command+dataBytes)&0x7F
self.serialSend1([address,command,dataBytes,checksum])
def driveM2(self, speed):
address = 0x80
command = 6
dataBytes = self.threshold(speed)
checksum = (address+command+dataBytes)&0x7F
self.serialSend2([address,command,dataBytes,checksum])
def driveM3(self, speed):
address = 0x81
command = 7
dataBytes = self.threshold(speed)
checksum = (address+command+dataBytes)&0x7F
self.serialSend1([address,command,dataBytes,checksum])
def driveAll(self, speed):
self.driveM1(speed)
self.driveM2(speed)
self.driveM3(speed)
def stop(self):
self.driveAll(64)
def readCurrentRpsM1(self):
address = 0x81
command = 30
# Read the current speed from the motor.
data = self.serialRead1([address,command])
# Read the first four bytes (in hex) as the speed.
speed = (data[0].encode("hex")) + (data[1].encode("hex")) + (data[2].encode("hex")) + (data[3].encode("hex"))
# Convert the speed to an integer.
speed = int(speed, 16)
# If the fifth byte indicates that the speed is negative, take the 2's complement.
if ((ord(data[4]) == 1) and (speed != 0)):
speed = ~(0xffffffff - speed) + 1
# Calculate the speed in rotations per second (negating to account for wiring).
rotations_per_second = -float(speed) * 125 / 8192
return rotations_per_second
def readCurrentRpsM2(self):
address = 0x80
command = 30
# Read the speed information from the motor.
data = self.serialRead2([address,command])
# Read the first four bytes (in hex) as the speed.
speed = (data[0].encode("hex")) + (data[1].encode("hex")) + (data[2].encode("hex")) + (data[3].encode("hex"))
# Convert the speed to an integer.
speed = int(speed, 16)
# If the fifth byte indicates that the value is negative, take the 2's complement.
if ((ord(data[4]) == 1) and (speed != 0)):
speed = ~(0xffffffff - speed) + 1
# Calculate the speed in rotations per second (negating to account for wiring).
rotations_per_second = -float(speed) * 125 / 8192
return rotations_per_second
def readCurrentRpsM3(self):
address = 0x81
command = 31
# Read the current speed from the motor.
data = self.serialRead1([address,command])
# Read the first four bytes (in hex) as the speed.
speed = (data[0].encode("hex")) + (data[1].encode("hex")) + (data[2].encode("hex")) + (data[3].encode("hex"))
# Convert the speed to an integer.
speed = int(speed, 16)
# If the fifth byte indicates that the speed is negative, take the 2's complement.
if ((ord(data[4]) == 1) and (speed != 0)):
speed = ~(0xffffffff - speed) + 1
# Calculate the speed in rotations per second (negating to account for wiring).
rotations_per_second = -float(speed) * 125 / 8192
return rotations_per_second
def rpsComponentM(self, max_rps,xm,ym,x,y):
if (x==0 and y==0):
rpsM = 0
else:
x = x/math.sqrt(x*x+y*y)
y = y/math.sqrt(x*x+y*y)
rpsM = max_rps*(xm*x+ym*y)/math.sqrt(xm*xm+ym*ym)
return rpsM
def rpsComponentM1(self, max_rps,x,y):
x1 = -math.sqrt(2)/2
y1 = -math.sqrt(2)/2
rpsM1 = self.rpsComponentM(max_rps,x1,y1,x,y)
return rpsM1
def rpsComponentM2(self, max_rps,x,y):
x2 = 1
y2 = 0
rpsM2 = self.rpsComponentM(max_rps,x2,y2,x,y)
return rpsM2
def rpsComponentM3(self, max_rps,x,y):
x3 = -math.sqrt(2)/2
y3 = math.sqrt(2)/2
rpsM3 = self.rpsComponentM(max_rps,x3,y3,x,y)
return rpsM3
# Rotate between -180 and +180 degrees
def rotate(self, max_rps,theta):
print "Rotating " + str(theta) + " degrees."
tune_angle = 0.3;
speedM1 = 64
speedM2 = 64
speedM3 = 64
if (theta<0):
max_rps = -max_rps
for i in range(0,int(tune_angle*theta)):
self.driveM1(speedM1)
self.driveM2(speedM2)
self.driveM3(speedM3)
kp_theta = 1
speedM1 = speedM1 + kp_theta*(max_rps - self.readCurrentRpsM1())
speedM2 = speedM2 + kp_theta*(max_rps - self.readCurrentRpsM2())
speedM3 = speedM3 + kp_theta*(max_rps - self.readCurrentRpsM3())
if i%10==0:
print str(self.readCurrentRpsM1()) + "\t\t" + str(self.readCurrentRpsM2()) + "\t\t" + str(self.readCurrentRpsM3())
self.stop()
def goToPoint(self, max_rps,x,y):
print "Moving to (x,y)=(" + str(x) + "," + str(y) + ")"
tune_distance = 5
distance = int(tune_distance*math.sqrt(x*x+y*y))
rpsM1 = self.rpsComponentM1(max_rps,x,y)
rpsM2 = self.rpsComponentM2(max_rps,x,y)
rpsM3 = self.rpsComponentM3(max_rps,x,y)
speedM1 = 64
speedM2 = 64
speedM3 = 64
for i in range(0,distance):
self.driveM1(speedM1)
self.driveM2(speedM2)
self.driveM3(speedM3)
kp_xy = 1
speedM1 = speedM1 + kp_xy*(rpsM1 - self.readCurrentRpsM1())
speedM2 = speedM2 + kp_xy*(rpsM2 - self.readCurrentRpsM2())
speedM3 = speedM3 + kp_xy*(rpsM3 - self.readCurrentRpsM3())
if i%10==0:
print str(self.readCurrentRpsM1()) + "\t\t" + str(self.readCurrentRpsM2()) + "\t\t" + str(self.readCurrentRpsM3())
self.stop()
################# Implementation #####################
if __name__ == '__main__':
theta = float(sys.argv[1])
x = float(sys.argv[2])
y = float(sys.argv[3])
max_rps = 3
n = Navigate()
n.rotate(max_rps,theta)
n.goToPoint(max_rps,x,y)
| {
"repo_name": "cletusw/goal-e",
"path": "src/goale/scripts/Navigate.py",
"copies": "1",
"size": "7208",
"license": "mit",
"hash": -973178070804465200,
"line_mean": 32.6822429907,
"line_max": 131,
"alpha_frac": 0.5668701443,
"autogenerated": false,
"ratio": 3.1795324217026906,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42464025660026905,
"avg_score": null,
"num_lines": null
} |
import sys
import serial
import time
import math
theta = float(sys.argv[1])
x = float(sys.argv[2])
y = float(sys.argv[3])
port1 = "/dev/ttyPCH1"
port2 = "/dev/ttyPCH2"
baudRate = 38400
ser1 = serial.Serial(port1,baudRate)
ser2 = serial.Serial(port2,baudRate)
def serialSend1(send):
for i in send:
ser1.write(chr(i))
def serialSend2(send):
for i in send:
ser2.write(chr(i))
def serialRead1(read):
serialSend1(read)
data=[]
for i in range(6):
data.append(ser1.read())
return data
def serialRead2(read):
serialSend2(read)
data=[]
for i in range(6):
data.append(ser2.read())
return data
def threshold(speed):
if (speed < 0):
speed = 0
elif (speed > 127):
speed = 127
return int(speed)
def driveM1(speed):
address = 0x81
command = 6
dataBytes = threshold(speed)
checksum = (address+command+dataBytes)&0x7F
serialSend1([address,command,dataBytes,checksum])
def driveM2(speed):
address = 0x80
command = 6
dataBytes = threshold(speed)
checksum = (address+command+dataBytes)&0x7F
serialSend2([address,command,dataBytes,checksum])
def driveM3(speed):
address = 0x81
command = 7
dataBytes = threshold(speed)
checksum = (address+command+dataBytes)&0x7F
serialSend1([address,command,dataBytes,checksum])
def driveAll(speed):
driveM1(speed)
driveM2(speed)
driveM3(speed)
def stop():
driveAll(64)
def readCurrentRpsM1():
address = 0x81
command = 30
# Read the current speed from the motor.
data = serialRead1([address,command])
# Read the first four bytes (in hex) as the speed.
speed = (data[0].encode("hex")) + (data[1].encode("hex")) + (data[2].encode("hex")) + (data[3].encode("hex"))
# Convert the speed to an integer.
speed = int(speed, 16)
# If the fifth byte indicates that the speed is negative, take the 2's complement.
if ((ord(data[4]) == 1) and (speed != 0)):
speed = ~(0xffffffff - speed) + 1
# Calculate the speed in rotations per second (negating to account for wiring).
rotations_per_second = -float(speed) * 125 / 8192
return rotations_per_second
def readCurrentRpsM2():
address = 0x80
command = 30
# Read the speed information from the motor.
data = serialRead2([address,command])
# Read the first four bytes (in hex) as the speed.
speed = (data[0].encode("hex")) + (data[1].encode("hex")) + (data[2].encode("hex")) + (data[3].encode("hex"))
# Convert the speed to an integer.
speed = int(speed, 16)
# If the fifth byte indicates that the value is negative, take the 2's complement.
if ((ord(data[4]) == 1) and (speed != 0)):
speed = ~(0xffffffff - speed) + 1
# Calculate the speed in rotations per second (negating to account for wiring).
rotations_per_second = -float(speed) * 125 / 8192
return rotations_per_second
def readCurrentRpsM3():
address = 0x81
command = 31
# Read the current speed from the motor.
data = serialRead1([address,command])
# Read the first four bytes (in hex) as the speed.
speed = (data[0].encode("hex")) + (data[1].encode("hex")) + (data[2].encode("hex")) + (data[3].encode("hex"))
# Convert the speed to an integer.
speed = int(speed, 16)
# If the fifth byte indicates that the speed is negative, take the 2's complement.
if ((ord(data[4]) == 1) and (speed != 0)):
speed = ~(0xffffffff - speed) + 1
# Calculate the speed in rotations per second (negating to account for wiring).
rotations_per_second = -float(speed) * 125 / 8192
return rotations_per_second
def rpsComponentM(max_rps,xm,ym,x,y):
if (x==0 and y==0):
rpsM = 0
else:
x = x/math.sqrt(x*x+y*y)
y = y/math.sqrt(x*x+y*y)
rpsM = max_rps*(xm*x+ym*y)/math.sqrt(xm*xm+ym*ym)
return rpsM
def rpsComponentM1(max_rps,x,y):
x1 = -math.sqrt(2)/2
y1 = -math.sqrt(2)/2
rpsM1 = rpsComponentM(max_rps,x1,y1,x,y)
return rpsM1
def rpsComponentM2(max_rps,x,y):
x2 = 1
y2 = 0
rpsM2 = rpsComponentM(max_rps,x2,y2,x,y)
return rpsM2
def rpsComponentM3(max_rps,x,y):
x3 = -math.sqrt(2)/2
y3 = math.sqrt(2)/2
rpsM3 = rpsComponentM(max_rps,x3,y3,x,y)
return rpsM3
# Rotate between -180 and +180 degrees
def rotate(max_rps,theta):
print "Rotating " + str(theta) + " degrees."
tune_angle = 0.3;
speedM1 = 64
speedM2 = 64
speedM3 = 64
if (theta<0):
max_rps = -max_rps
for i in range(0,int(tune_angle*theta)):
driveM1(speedM1)
driveM2(speedM2)
driveM3(speedM3)
kp_theta = 1
speedM1 = speedM1 + kp_theta*(max_rps - readCurrentRpsM1())
speedM2 = speedM2 + kp_theta*(max_rps - readCurrentRpsM2())
speedM3 = speedM3 + kp_theta*(max_rps - readCurrentRpsM3())
if i%10==0:
print str(readCurrentRpsM1()) + "\t\t" + str(readCurrentRpsM2()) + "\t\t" + str(readCurrentRpsM3())
stop()
def goToPoint(max_rps,x,y):
print "Moving to (x,y)=(" + str(x) + "," + str(y) + ")"
tune_distance = 5
distance = int(tune_distance*math.sqrt(x*x+y*y))
rpsM1 = rpsComponentM1(max_rps,x,y)
rpsM2 = rpsComponentM2(max_rps,x,y)
rpsM3 = rpsComponentM3(max_rps,x,y)
speedM1 = 64
speedM2 = 64
speedM3 = 64
for i in range(0,distance):
driveM1(speedM1)
driveM2(speedM2)
driveM3(speedM3)
kp_xy = 1
speedM1 = speedM1 + kp_xy*(rpsM1 - readCurrentRpsM1())
speedM2 = speedM2 + kp_xy*(rpsM2 - readCurrentRpsM2())
speedM3 = speedM3 + kp_xy*(rpsM3 - readCurrentRpsM3())
if i%10==0:
print str(readCurrentRpsM1()) + "\t\t" + str(readCurrentRpsM2()) + "\t\t" + str(readCurrentRpsM3())
stop()
################# Implementation #####################
max_rps = 3
rotate(max_rps,theta)
goToPoint(max_rps,x,y)
| {
"repo_name": "cletusw/goal-e",
"path": "misc/backup_navigate/backup8_navigate.py",
"copies": "1",
"size": "6050",
"license": "mit",
"hash": -7294911487062982000,
"line_mean": 28.2270531401,
"line_max": 113,
"alpha_frac": 0.6190082645,
"autogenerated": false,
"ratio": 2.8713811105837683,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3990389375083768,
"avg_score": null,
"num_lines": null
} |
__author__ = 'canderson'
class Person():
def __init__(self, name, age, weight, gender=""):
self.name = name
self.age = age
self.weight = weight
self.gender = gender
def summary(self):
if self.gender is "male":
return self.name + " is " + self.age + " years old. He weighs " + self.weight + "lbs."
elif self.gender is "female":
return self.name + " is " + self.age + " years old. She weighs " + self.weight + "lbs."
else:
return self.name + " is " + self.age + " years old. They weigh " + self.weight + "lbs."
chase = Person("Chase Anderson", 24, 170, "male")
print chase.summary()
class MyClass:
i = 12345
def f(self):
return "hello world"
x = MyClass()
print x.f()
class Complex:
def __init__(self, realpart, imagpart):
self.r = realpart
self.i = imagpart
def print_me(self):
print self.r
y = Complex(3, 4)
y.print_me()
class DerivedComplex(Complex):
def __init__(self, real, image, kadab="fish"):
self.r = real
self.i = image
self.k = kadab
def print_me(self):
print self.i
__private = 1
not_private = 2
z = DerivedComplex(1, 2, 3)
z.print_me() | {
"repo_name": "W0mpRat/user-signup",
"path": "Logic/classes.py",
"copies": "1",
"size": "1269",
"license": "unlicense",
"hash": 5573841582841412000,
"line_mean": 17.6764705882,
"line_max": 99,
"alpha_frac": 0.5460992908,
"autogenerated": false,
"ratio": 3.212658227848101,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4258757518648101,
"avg_score": null,
"num_lines": null
} |
__author__ = 'caninemwenja'
from pyparsing import Word, alphanums, alphas, Literal, Suppress, ZeroOrMore, Optional, Group, oneOf
opening_bracket = Suppress("(")
closing_bracket = Suppress(")")
semicolon = Suppress(";")
comma = Suppress(",")
star = Literal("*")
identifier = Word(alphas, alphanums + "_")
values = Word(alphanums+"_"+"'")
cd = Literal("create database")
dd = Literal("drop database")
ud = Literal("use")
ct = Literal("create table")
dt = Literal("drop table")
cf = Literal("create fragment")
df = Literal("drop fragment")
sd = Literal("show databases")
st = Literal("show tables")
sf = Literal("show fragments")
sel = Literal("select")
fr = Suppress("from")
whr = Suppress("where")
location = Word(alphas, alphanums+"_"+"%"+":"+"/"+"+"+"."+"@"+"?"+"=")
column = Group(Optional(identifier+Suppress(".")) + identifier.setResultsName("col_name"))
join_header = Optional(Literal("left") | Literal("right")) + (Literal("join") | Literal("inner join") |
Literal("outer join"))
join_tail = Group(identifier.setResultsName("table") + Suppress("on") +
Group(column + Suppress("=") + column).setResultsName("join_link"))
columns = Group(star | (ZeroOrMore(column + comma) + column))
join = Group(join_header.setResultsName("join_type") + join_tail)
whr_column = Group(column + oneOf([">", "<", ">=", "<=", "=", "!="]) + (values | column))
whrs = whr + Group(ZeroOrMore(whr_column + oneOf(["and", "or"]))+ whr_column)
cds = cd.setResultsName("db_cmd") + identifier.setResultsName("db_name")
dds = dd.setResultsName("db_cmd") + identifier.setResultsName("db_name")
uds = ud.setResultsName("db_cmd") + identifier.setResultsName("db_name")
cts = ct.setResultsName("db_cmd") + identifier.setResultsName("table_name")
dts = dt.setResultsName("db_cmd") + identifier.setResultsName("table_name")
cfs = cf.setResultsName("db_cmd") + identifier.setResultsName("fragment_name") + \
location.setResultsName("location") + Suppress("on") + identifier.setResultsName("table_name") + \
opening_bracket + columns.setResultsName("columns") + closing_bracket
dfs = df.setResultsName("db_cmd") + identifier.setResultsName("fragment_name") + Suppress("on") + \
identifier.setResultsName("table_name")
sds = sd.setResultsName("db_cmd")
sts = st.setResultsName("db_cmd")
sfs = sf.setResultsName("db_cmd") + Suppress("on") + identifier.setResultsName("table_name")
sels = sel.setResultsName("db_cmd") + columns.setResultsName("columns") + fr + identifier.setResultsName("main_table") + \
ZeroOrMore(join).setResultsName("joins") + Optional(whrs).setResultsName("selection")
sql = (cds | dds | uds | cts | dts | cfs | dfs | sds | sts | sfs | sels) + semicolon
| {
"repo_name": "caninemwenja/siafu",
"path": "grammar.py",
"copies": "2",
"size": "2769",
"license": "mit",
"hash": -3566651239908189700,
"line_mean": 37.4583333333,
"line_max": 122,
"alpha_frac": 0.6579992777,
"autogenerated": false,
"ratio": 3.276923076923077,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.4934922354623077,
"avg_score": null,
"num_lines": null
} |
__author__ = 'caninemwenja'
from twisted.protocols.basic import LineReceiver
from twisted.internet import reactor
from twisted.internet.protocol import ClientFactory
import sys
def quit():
reactor.stop()
print "Bye!"
class Console(object):
def __init__(self):
self.history = []
def write(self, content):
print content
def read(self):
user_input = raw_input("=> ")
self.history.append(user_input)
return user_input
class SiafuClientProtocol(LineReceiver):
def __init__(self, addr, console):
self.addr = addr
self.console = console
def loop(self):
try:
user_input = self.console.read()
if user_input == '':
self.loop()
return
if user_input == 'quit':
self.transport.loseConnection()
return
self.sendLine(user_input)
except EOFError:
self.transport.loseConnection()
def connectionMade(self):
# self.console.write("Connected to server")
pass
def connectionLost(self, reason):
if reason.getErrorMessage() != 'Connection was closed cleanly.':
self.console.write("Connection Lost: {0}".format(reason.getErrorMessage()))
def lineReceived(self, line):
self.console.write(line)
self.loop()
class SiafuClientFactory(ClientFactory):
def __init__(self):
self.console = Console()
self.console.write("==> Welcome to Siafu <==")
self.console.write("")
self.console.write("to exit type 'quit'")
self.console.write("")
self.console.write("")
def buildProtocol(self, addr):
return SiafuClientProtocol(addr, self.console)
def clientConnectionFailed(self, connector, reason):
self.console.write("Could not connect to server: {0}".format(reason.getErrorMessage()))
quit()
def clientConnectionLost(self, connector, reason):
if reason.getErrorMessage() != 'Connection was closed cleanly.':
self.console.write("Connection was lost: {0}".format(reason.getErrorMessage()))
quit()
if len(sys.argv) < 3:
print "Usage: {0} server port".format(sys.argv[0])
exit()
server = sys.argv[1]
port = int(sys.argv[2])
reactor.connectTCP(server, port, SiafuClientFactory())
reactor.run()
| {
"repo_name": "kmwenja/siafu",
"path": "client.py",
"copies": "2",
"size": "2388",
"license": "mit",
"hash": -7641163170949932000,
"line_mean": 24.1368421053,
"line_max": 95,
"alpha_frac": 0.6122278057,
"autogenerated": false,
"ratio": 4.02020202020202,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.563242982590202,
"avg_score": null,
"num_lines": null
} |
__author__ = 'canliu'
"""
Save the alignment matrix in XML format. Like the following:
<sentence>
<source> </source>
<target> <target>
<alignment>
<sourceword> x,x,x... </sourceword>
<sourceword> x,x,x... </sourceword>
</alignment>
</sentence>
The number of rows is equal to the number of target_words + 1.
The number of columns is equal to the number of source_words + 1.
"""
import json
import sys
import codecs
import logging
def get_alignments(attention, x_mask, y_mask):
n_rows, n_cols = y_mask.shape ###n_cols correspond to the number of sentences.
for target_sent_index in range(n_cols):
target_length = y_mask[:,target_sent_index].tolist().count(1)
source_length = x_mask[:,target_sent_index].tolist().count(1)
temp_attention = attention[range(target_length),:,:]
this_attention = temp_attention[:,target_sent_index,range(source_length)]
jdata = {}
jdata['matrix'] = this_attention.tolist()
jdata = json.dumps(jdata)
yield jdata
def combine_source_target_text(source_IN, nbest_IN, saveto, alignment_IN):
"""
there can be multiple target sentences, aligned to the same source sentence.
"""
source_IN.seek(0)
nbest_IN.seek(0)
alignment_IN.seek(0)
with open(saveto + ".alignment.json", "w") as alignment_OUT:
all_matrixes = alignment_IN.readlines()
nbest_lines = nbest_IN.readlines()
source_lines = source_IN.readlines()
assert len(all_matrixes) == len(nbest_lines), "The number of lines does not match with each other!"
for target_index in range(len(all_matrixes)):
jdata = json.loads(all_matrixes[target_index])
target_line = nbest_lines[target_index]
elements = target_line.strip().split("|||")
refer_index = int(elements[0].strip())
source_sent = source_lines[refer_index].strip()
target_sent = elements[1].strip()
jdata["source_sent"] = source_sent
jdata["target_sent"] = target_sent
jdata["id"] = refer_index
jdata["prob"] = 0
jdata = json.dumps(jdata).decode('unicode-escape').encode('utf8')
alignment_OUT.write(jdata + "\n")
def combine_source_target_text_1to1(source_IN, target_IN, saveto, alignment_IN):
"""
There is a 1-1 mapping of target and source sentence.
"""
source_IN.seek(0)
target_IN.seek(0)
alignment_IN.seek(0)
with open(saveto + ".alignment.json", "w") as alignment_OUT:
all_matrixes = alignment_IN.readlines()
target_lines = target_IN.readlines()
source_lines = source_IN.readlines()
assert len(all_matrixes) == len(target_lines), "The number of lines does not match with each other!"
for target_index in range(len(all_matrixes)):
jdata = json.loads(all_matrixes[target_index])
jdata["source_sent"] = source_lines[target_index].strip()
jdata["target_sent"] = target_lines[target_index].strip()
jdata["id"] = target_index
jdata["prob"] = 0
jdata = json.dumps(jdata).decode('unicode-escape').encode('utf8')
alignment_OUT.write(jdata + "\n")
def convert_to_nodes_edges_v1(filename):
"""
Take as input the aligned file with file names ".withtext", and convert this into a file with nodes and edges.
Which will later used for Visualization.
"""
with open(filename, "r") as IN:
with open(filename + ".forweb" , "w") as OUT:
in_lines = IN.readlines()
for data in in_lines:
data4web = convert_to_nodes_edges_each_v1(data)
OUT.write(data4web + "\n")
def convert_to_nodes_edges_each_v1(data):
"""
give a single data object string, convert it into a json data string that is compatible with the Web interface.
"""
jdata = json.loads(data)
web_data = {}
source_words = jdata["source_sent"].strip().split()
target_words = jdata["target_sent"].strip().split()
web_data["nodes"] = []
for word in source_words:
web_data["nodes"].append({"name":word, "group": 1})
web_data["nodes"].append({"name":"<EOS", "group": 1})
for word in target_words:
web_data["nodes"].append({"name":word, "group": 2})
web_data["nodes"].append({"name":"<EOS", "group": 2})
matrix = jdata["matrix"]
n_rows = len(matrix)
n_cols = len(matrix[0])
web_data["links"] = []
for target_index in range(n_rows):
for source_index in range(n_cols):
if target_index == (n_rows-1):
target_word = "<EOS>"
else:
target_word = target_words[target_index]
if source_index == (n_cols-1):
source_word = "<EOS>"
else:
source_word = source_words[source_index]
score = matrix[target_index][source_index]
web_data["links"].append( {"source": source_word, "target": target_word, "value": score} )
web_data = json.dumps(web_data).decode('unicode-escape').encode('utf8')
return web_data
def convert_to_nodes_edges_v2(filename):
"""
Take as input the aligned file with file names ".withtext", and convert this into a file with nodes and edges.
Which will later used for Visualization.
"""
with codecs.open(filename, "r", encoding="UTF-8") as IN:
with open(filename + ".forweb" , "w") as OUT:
in_lines = IN.readlines()
source_list = []
target_list = []
all_links = []
for sent_id in range(len(in_lines)):
data = in_lines[sent_id]
#print data
source_sent, target_sent, links = convert_to_nodes_edges_each_v2(data, sent_id)
source_list.append(source_sent)
target_list.append(target_sent)
all_links += links
jdata = {}
jdata["source_list"] = source_list
jdata["target_list"] = target_list
jdata["links"] = all_links
jdata = json.dumps(jdata).decode('unicode-escape').encode('utf8')
OUT.write(jdata)
def convert_to_nodes_edges_each_v2(data, sent_id):
"""
give a single data object string, convert it into a json data string that is compatible with the Web interface.
"""
jdata = json.loads(data)
source_words = jdata["source_sent"].encode('unicode-escape').strip().split()
source_words.append("EOS")
target_words = jdata["target_sent"].strip().split()
target_words.append("EOS")
matrix = jdata["matrix"]
n_rows = len(matrix)
n_cols = len(matrix[0])
links = []
for target_index in range(n_rows):
for source_index in range(n_cols):
five_tuple = []
score = matrix[target_index][source_index]
five_tuple.append(target_index)
five_tuple.append(sent_id)
five_tuple.append(score)
five_tuple.append(source_index)
five_tuple.append(sent_id)
links.append(five_tuple)
return source_words, target_words, links
if __name__ == "__main__":
"""
Run the conversion to Web format if needed.
"""
input_file = sys.argv[1]
convert_to_nodes_edges_v2(input_file)
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
### Json for web visuaslization format version 1.
### This corresponds to convert_to_nodes_edges_each_v1; and convert_to_nodes_edges_v1.
"""
{
"nodes":[
{"name":"Good","group":1},
{"name":"Morning","group":1},
{"name":"Buenos","group":2},
{"name":"dias","group":2}
],
"links":[
{"source":"Good" ,"target":"Buenos","value":0.90},
{"source":"Good" ,"target":"dias","value":0.30},
{"source":"Morning" ,"target":"Buenos","value":0.50},
{"source":"Morning" ,"target":"dias","value":0.95}
]
}
"""
| {
"repo_name": "cshanbo/nematus",
"path": "nematus/alignment_util.py",
"copies": "2",
"size": "7989",
"license": "bsd-3-clause",
"hash": -1449208181047120000,
"line_mean": 33.8864628821,
"line_max": 115,
"alpha_frac": 0.5879334084,
"autogenerated": false,
"ratio": 3.5443655723158827,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0027128310509618404,
"num_lines": 229
} |
__author__ = 'can'
class Trie(dict):
def add_word(self, word):
trie = self
for letter in word:
trie = trie.setdefault(letter, Trie())
trie['.'] = None
def get_sub_trie(self, word):
trie = self
for letter in word:
if letter in trie:
trie = trie[letter]
else:
return None
return trie
trie = Trie()
with open('kelimeler.txt') as f:
for line in f:
word = line.strip().lower()
if ' ' not in word:
trie.add_word(word)
def bfs(letter_matrix, n):
import queue
found_words = set()
q = queue.Queue()
for row in range(n):
for col in range(n):
q.put(((row, col), ))
while not q.empty():
path = q.get()
row = path[-1][0]
col = path[-1][1]
# print(path)
def get_word(path):
if len(path) == 0:
return ''
return letter_matrix[path[0][0]][path[0][1]] + get_word(path[1:])
word = get_word(path)
sub_trie = trie.get_sub_trie(word)
if sub_trie is not None:
if '.' in sub_trie:
found_words.add(word)
else:
continue
def add_node(new_row, new_col):
if 0 <= new_row < n and 0 <= new_col < n and (new_row, new_col) not in path:
q.put(path + ((new_row, new_col), ))
for i in (-1, 0, 1):
for j in (-1, 0, 1):
add_node(row + i, col + j)
return found_words
def new_game():
import math
letters = input('Letters: ')
n = int(math.sqrt(len(letters)))
letter_matrix = []
for i in range(n):
letter_matrix.append([])
for j in range(n):
letter_matrix[i].append(letters[i * n + j])
found_words = bfs(letter_matrix, n)
for word in sorted(found_words, key=len):
print(word)
while True:
new_game()
| {
"repo_name": "canguler/kelime_avi",
"path": "kelime_avi.py",
"copies": "1",
"size": "1968",
"license": "mit",
"hash": -1713063609492163300,
"line_mean": 22.4285714286,
"line_max": 88,
"alpha_frac": 0.4842479675,
"autogenerated": false,
"ratio": 3.364102564102564,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4348350531602564,
"avg_score": null,
"num_lines": null
} |
__author__ = "Can Ozbek Arnav"
import pandas as pd
import numpy as np
import pylab
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
import sys
# sys.path.append("/Users/ahmetcanozbek/Desktop/EE660/660Project/Code_Final_Used/functions")
import ml_aux_functions as ml_aux
import crop_rock
import pickle
#PREPROCESSING
#Read the files
df_full = pd.read_pickle("msd_train_t5.pkl") # 80%
print "DEBUG: file read."
#Get rid of the rows that have missing values (nan) and UNCAT
df_full = df_full[ df_full["Genre"] != "UNCAT" ]
df_full = df_full.dropna()
y_full = df_full["Genre"]
X_full = df_full.drop(["Genre", "Track ID", "Year"], axis=1)
#Split the 80% of data to 70% Training and 30% Validation Data
from sklearn.cross_validation import train_test_split
X_train, X_validation, y_train, y_validation = \
train_test_split(X_full, y_full, train_size=0.7, random_state=42)
print "DEBUG: Data splitted"
df_train_toCrop = pd.concat([y_train, X_train], axis=1, join='inner')
#Crop the dataset
maxval = crop_rock.find_second_max_value(df_train_toCrop)
df_cropped = crop_rock.drop_excess_rows(df_train_toCrop, maxval)
y_cropped = df_cropped["Genre"]
X_cropped = df_cropped.drop(["Genre"], axis=1)
# # Start LDA Classification
# print "Performing LDA Classification:"
# from sklearn.lda import LDA
# clf = LDA(solver='svd', shrinkage=None, n_components=None).fit(X_cropped, np.ravel(y_cropped[:]))
#
# #Use X_cropped to get best model
# y_train_predicted = clf.predict(X_train)
# print "Error rate for LDA on Training: ", ml_aux.get_error_rate(y_train,y_train_predicted)
# # ml_aux.plot_confusion_matrix(y_cropped, predicted, "CM on LDA cropped")
# # plt.show()
#
# y_validation_predicted = clf.predict(X_validation)
# print "Error rate for LDA on Validation: ", ml_aux.get_error_rate(y_validation,y_validation_predicted)
# # ml_aux.plot_confusion_matrix(y_validation, y_validation_predicted, "CM on LDA validation (t1)")
# # plt.show()
#
#
#
# # Start Adaboost Classification
# from sklearn.ensemble import AdaBoostClassifier
# adaboost_model = AdaBoostClassifier(n_estimators=50)
# adaboost_model = adaboost_model.fit(X_cropped,y_cropped)
# # predicted = adaboost_model.predict(X_cropped)
# # print "Error rate for LDA on Cropped: ", ml_aux.get_error_rate(y_cropped,predicted)
# # ml_aux.plot_confusion_matrix(y_cropped, predicted, "CM on LDA cropped")
# # plt.show()
#
# y_validation_predicted = adaboost_model.predict(X_validation)
# print "Error rate for Adaboost on Validation: ", ml_aux.get_error_rate(y_validation,y_validation_predicted)
# # ml_aux.plot_confusion_matrix(y_validation, y_validation_predicted, "CM on Adaboost validation (t1)")
# # plt.show()
#
#
# # Start QDA Classification
# print "Performing QDA Classification:"
# from sklearn.qda import QDA
# clf = QDA(priors=None, reg_param=0.001).fit(X_cropped, np.ravel(y_cropped[:]))
# y_validation_predicted = clf.predict(X_validation)
# print "Error rate for QDA (Validation): ", ml_aux.get_error_rate(y_validation,y_validation_predicted)
# Start Random Forest Classification
print "Performing Random Classification:"
from sklearn.ensemble import RandomForestClassifier
forest = RandomForestClassifier(n_estimators=500)
forest = forest.fit(X_cropped, np.ravel(y_cropped[:]))
y_validation_predicted = forest.predict(X_validation)
print "Error rate for Random Forest (Validation): ", ml_aux.get_error_rate(y_validation,y_validation_predicted)
# ml_aux.plot_confusion_matrix(y_validation, y_validation_predicted, "CM Random Forest (t1)")
# plt.show()
pickle.dump(forest,open('t5_random_forest.pkl','wb'))
# # Start k nearest neighbor Classification
# print "Performing kNN Classification:"
# from sklearn import neighbors
# knn_model = neighbors.KNeighborsClassifier(n_neighbors=2, algorithm='auto',leaf_size=15)
# knn_model.fit(X_cropped, y_cropped)
# # y_train_predicted = knn_model.predict(X_train)
# # print "Error Rate for kNN (Cropped): ", ml_aux.get_error_rate(y_train, y_train_predicted)
#
# y_validation_predicted = knn_model.predict(X_validation)
# print "Error Rate for kNN on Validation (t1): ", ml_aux.get_error_rate(y_validation, y_validation_predicted)
#
#
# # Start Naive Bayes Classification
# print "Performing Naive Bayes Classification:"
# from sklearn.naive_bayes import GaussianNB
# naivebayes_model = GaussianNB()
# naivebayes_model.fit(X_cropped, y_cropped)
# y_validation_predicted = naivebayes_model.predict(X_validation)
# print "Naive Bayes Error Rate on Validation (t1): ", ml_aux.get_error_rate(y_validation, y_validation_predicted)
#
#
# # Start SVM Classification
# print "Performing SVM Classification:"
# from sklearn.svm import SVC
# svm_model = SVC(kernel='rbf' ,probability=True, max_iter=1000)
# svm_model.fit(X_cropped, y_cropped)
# y_train_predicted = svm_model.predict(X_train)
# print "SVM Error rate on training data (t1): ", ml_aux.get_error_rate(y_train, y_train_predicted)
# # ml_aux.plot_confusion_matrix(y_train, y_train_predicted, "CM SVM Training (t1)")
# # plt.show()
#
# y_validation_predicted = svm_model.predict(X_validation)
# print "SVM Error rate on validation (t1): ", ml_aux.get_error_rate(y_validation, y_validation_predicted)
#
#
# # Start k nearest Centroid Classification
# print "Performing kNC Classification:"
# from sklearn.neighbors.nearest_centroid import NearestCentroid
# knnc_model = NearestCentroid()
# knnc_model.fit(X_cropped, y_cropped)
# y_validation_predicted = knnc_model.predict(X_validation)
# print "Error Rate on kNNC (t1) Validation: ", ml_aux.get_error_rate(y_validation, y_validation_predicted)
#
# # Start Bagging Classification
# print "Performing Bagging Classification:"
# # Bagging
# from sklearn.ensemble import BaggingClassifier
# from sklearn.neighbors import KNeighborsClassifier
#
# # Bagging
# bagging1 = BaggingClassifier(KNeighborsClassifier(n_neighbors=2),max_samples=1.0, max_features=0.1)
# bagging1.fit(X_cropped, y_cropped)
# y_validation_predicted = bagging1.predict(X_validation)
# print "Error Rate kNN with Baggging Validation: ", ml_aux.get_error_rate(y_validation, y_validation_predicted)
#
| {
"repo_name": "nishantnath/MusicPredictiveAnalysis_EE660_USCFall2015",
"path": "Code/Machine_Learning_Algos/training_t5.py",
"copies": "1",
"size": "6128",
"license": "mit",
"hash": -6134500176210941000,
"line_mean": 40.1275167785,
"line_max": 114,
"alpha_frac": 0.740535248,
"autogenerated": false,
"ratio": 2.9433237271853985,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9164094751529777,
"avg_score": 0.003952844731124394,
"num_lines": 149
} |
__author__ = "Can Ozbek Arnav"
import pandas as pd
import numpy as np
import pylab
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
import sys
sys.path.append("/Users/ahmetcanozbek/Desktop/EE660/660Project/Code_Final_Used/functions")
import ml_aux_functions as ml_aux
import crop_rock
#PREPROCESSING
#Read the files
df_full = pd.read_pickle("/Users/ahmetcanozbek/Desktop/660Stuff/msd_train.pkl") # 80%
print "DEBUG: file read."
#Get rid of the rows that have missing values (nan) and UNCAT
df_full = df_full[ df_full["Genre"] != "UNCAT" ]
df_full = df_full.dropna()
y_full = df_full["Genre"]
X_full = df_full.drop(["Genre", "Track ID", "Year"], axis=1)
#Split the 80% of data to 70% Training and 30% Validation Data
from sklearn.cross_validation import train_test_split
X_train, X_validation, y_train, y_validation = \
train_test_split(X_full, y_full, train_size=0.7, random_state=42)
print "DEBUG: Data splitted"
df_train_toCrop = pd.concat([y_train, X_train], axis=1, join='inner')
#Crop the dataset
maxval = crop_rock.find_second_max_value(df_train_toCrop)
df_cropped = crop_rock.drop_excess_rows(df_train_toCrop, maxval)
y_cropped = df_cropped["Genre"]
X_cropped = df_cropped.drop(["Genre"], axis=1)
# Start LDA Classification
print "Performing LDA Classification:"
from sklearn.lda import LDA
clf = LDA(solver='svd', shrinkage=None, n_components=None).fit(X_cropped, np.ravel(y_cropped[:]))
#Use X_cropped to get best model
y_train_predicted = clf.predict(X_train)
print "Error rate for LDA on Training: ", ml_aux.get_error_rate(y_train,y_train_predicted)
# ml_aux.plot_confusion_matrix(y_cropped, predicted, "CM on LDA cropped")
# plt.show()
y_validation_predicted = clf.predict(X_validation)
print "Error rate for LDA on Validation: ", ml_aux.get_error_rate(y_validation,y_validation_predicted)
# ml_aux.plot_confusion_matrix(y_validation, y_validation_predicted, "CM on LDA validation (t1)")
# plt.show()
# Start Adaboost Classification
from sklearn.ensemble import AdaBoostClassifier
adaboost_model = AdaBoostClassifier(n_estimators=50)
adaboost_model = adaboost_model.fit(X_cropped,y_cropped)
# predicted = adaboost_model.predict(X_cropped)
# print "Error rate for LDA on Cropped: ", ml_aux.get_error_rate(y_cropped,predicted)
# ml_aux.plot_confusion_matrix(y_cropped, predicted, "CM on LDA cropped")
# plt.show()
y_validation_predicted = adaboost_model.predict(X_validation)
print "Error rate for Adaboost on Validation: ", ml_aux.get_error_rate(y_validation,y_validation_predicted)
# ml_aux.plot_confusion_matrix(y_validation, y_validation_predicted, "CM on Adaboost validation (t1)")
# plt.show()
# Start QDA Classification
print "Performing QDA Classification:"
from sklearn.qda import QDA
clf = QDA(priors=None, reg_param=0.001).fit(X_cropped, np.ravel(y_cropped[:]))
y_validation_predicted = clf.predict(X_validation)
print "Error rate for QDA (Validation): ", ml_aux.get_error_rate(y_validation,y_validation_predicted)
# Start Random Forest Classification
print "Performing Random Classification:"
from sklearn.ensemble import RandomForestClassifier
forest = RandomForestClassifier(n_estimators=500)
forest = forest.fit(X_cropped, np.ravel(y_cropped[:]))
y_validation_predicted = forest.predict(X_validation)
print "Error rate for Random Forest (Validation): ", ml_aux.get_error_rate(y_validation,y_validation_predicted)
# ml_aux.plot_confusion_matrix(y_validation, y_validation_predicted, "CM Random Forest (t1)")
# plt.show()
# Start k nearest neighbor Classification
print "Performing kNN Classification:"
from sklearn import neighbors
knn_model = neighbors.KNeighborsClassifier(n_neighbors=2, algorithm='auto',leaf_size=15)
knn_model.fit(X_cropped, y_cropped)
# y_train_predicted = knn_model.predict(X_train)
# print "Error Rate for kNN (Cropped): ", ml_aux.get_error_rate(y_train, y_train_predicted)
y_validation_predicted = knn_model.predict(X_validation)
print "Error Rate for kNN on Validation (t1): ", ml_aux.get_error_rate(y_validation, y_validation_predicted)
# Start Naive Bayes Classification
print "Performing Naive Bayes Classification:"
from sklearn.naive_bayes import GaussianNB
naivebayes_model = GaussianNB()
naivebayes_model.fit(X_cropped, y_cropped)
y_validation_predicted = naivebayes_model.predict(X_validation)
print "Naive Bayes Error Rate on Validation (t1): ", ml_aux.get_error_rate(y_validation, y_validation_predicted)
# Start SVM Classification
print "Performing SVM Classification:"
from sklearn.svm import SVC
svm_model = SVC(kernel='rbf' ,probability=True, max_iter=1000)
svm_model.fit(X_cropped, y_cropped)
y_train_predicted = svm_model.predict(X_train)
print "SVM Error rate on training data (t1): ", ml_aux.get_error_rate(y_train, y_train_predicted)
# ml_aux.plot_confusion_matrix(y_train, y_train_predicted, "CM SVM Training (t1)")
# plt.show()
y_validation_predicted = svm_model.predict(X_validation)
print "SVM Error rate on validation (t1): ", ml_aux.get_error_rate(y_validation, y_validation_predicted)
# Start k nearest Centroid Classification
print "Performing kNC Classification:"
from sklearn.neighbors.nearest_centroid import NearestCentroid
knnc_model = NearestCentroid()
knnc_model.fit(X_cropped, y_cropped)
y_validation_predicted = knnc_model.predict(X_validation)
print "Error Rate on kNNC (t1) Validation: ", ml_aux.get_error_rate(y_validation, y_validation_predicted)
# Start Bagging Classification
print "Performing Bagging Classification:"
# Bagging
from sklearn.ensemble import BaggingClassifier
from sklearn.neighbors import KNeighborsClassifier
# Bagging
bagging1 = BaggingClassifier(KNeighborsClassifier(n_neighbors=2),max_samples=1.0, max_features=0.1)
bagging1.fit(X_cropped, y_cropped)
y_validation_predicted = bagging1.predict(X_validation)
print "Error Rate kNN with Baggging Validation: ", ml_aux.get_error_rate(y_validation, y_validation_predicted)
| {
"repo_name": "nishantnath/MusicPredictiveAnalysis_EE660_USCFall2015",
"path": "Code/Machine_Learning_Algos/training_fullset.py",
"copies": "1",
"size": "5923",
"license": "mit",
"hash": -949988992994559900,
"line_mean": 39.5684931507,
"line_max": 112,
"alpha_frac": 0.7627891271,
"autogenerated": false,
"ratio": 3.0127161749745675,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4275505302074567,
"avg_score": null,
"num_lines": null
} |
__author__ = "Can Ozbek"
import os
import hdf5_getters
import featureExtractionFunctions
import time
import numpy
abspath = os.path.abspath(os.getcwd())
dirname = os.path.dirname(abspath)
#cd into million song subset folder
dataFolderPath = "/Resources/MillionSongSubset/data"
os.chdir(dirname + dataFolderPath)
print "CurrentFolder(This Should be 'Resources/MillionSongSubset/data' folder): ", os.getcwd()
feature_names = [
"Track ID", "Song ID",
'AvgBarDuration','Loudness', 'Tempo','ArtistFamiliarity','ArtistHotttnesss','SongHotttnesss',
'Mode[0]','Mode[1]','Year',
#Key features
'Key[0]','Key[1]','Key[2]','Key[3]','Key[4]','Key[5]',
'Key[6]','Key[7]','Key[8]','Key[9]','Key[10]','Key[11]',
#Picthes Mean
'PicthesMean[0]','PicthesMean[1]','PicthesMean[2]','PicthesMean[3]','PicthesMean[4]','PicthesMean[5]',
'PicthesMean[6]','PicthesMean[7]','PicthesMean[8]','PicthesMean[9]','PicthesMean[10]','PicthesMean[11]',
#Pitches Variance
'PitchesVar[0]','PitchesVar[1]','PitchesVar[2]','PitchesVar[3]','PitchesVar[4]','PitchesVar[5]',
'PitchesVar[6]','PitchesVar[7]','PitchesVar[8]','PitchesVar[9]','PitchesVar[10]','PitchesVar[11]',
#Timbre Mean
'TimbreMean[0]','TimbreMean[1]','TimbreMean[2]','TimbreMean[3]','TimbreMean[4]','TimbreMean[5]',
'TimbreMean[6]','TimbreMean[7]','TimbreMean[8]','TimbreMean[9]','TimbreMean[10]','TimbreMean[11]',
#Timbre Variance
'TimbreVar[0]','TimbreVar[1]','TimbreVar[2]','TimbreVar[3]','TimbreVar[4]','TimbreVar[5]',
'TimbreVar[6]','TimbreVar[7]','TimbreVar[8]','TimbreVar[9]','TimbreVar[10]','TimbreVar[11]',
#Time Signature
'TimeSig[0]', 'TimeSig[1]', 'TimeSig[2]', 'TimeSig[3]', 'TimeSig[4]', 'TimeSig[5]']
#Put feature functions in a list in order
featureFunctions = [featureExtractionFunctions.getBarDuration,
featureExtractionFunctions.getLoudness,
featureExtractionFunctions.getTempo,
featureExtractionFunctions.getArtistFamiliarity,
featureExtractionFunctions.getArtistHotttnesss,
featureExtractionFunctions.getSongHotttnesss,
featureExtractionFunctions.getMode,
featureExtractionFunctions.getYear,
featureExtractionFunctions.getKey,
featureExtractionFunctions.getSegmentPitchesMean,
featureExtractionFunctions.getSegmentPitchesVar,
featureExtractionFunctions.getSegmentTimbreMean,
featureExtractionFunctions.getSegmentTimbreVar,
featureExtractionFunctions.getTimeSignature]
#Initialize Feature Matrix Data Structure with feature names as header, (going to be list of lists)
featureMatrix = []
print "feature_names length", len(feature_names)
#Start the time
startTime = time.time()
#Start going into files
flag = 1
numberOf_h5Files = 0
for root, dirs, files in os.walk(os.getcwd()):
for name in files:
if name.endswith(".h5"):
#increment to count the number of .h5 files
numberOf_h5Files += 1
#open the h5 file
h5SongFile = hdf5_getters.open_h5_file_read(os.path.join(root,name))
#create the sample row for feature matrix
#initialize the list with track ID and song ID
sampleRow = [hdf5_getters.get_track_id(h5SongFile),
hdf5_getters.get_song_id(h5SongFile)]
cnt = 0
for func in featureFunctions:
cnt += 1
#print "functionNumber: ", cnt
#print "BarStrctr: ", hdf5_getters.get_bars_start(h5SongFile)
sampleRow += func(h5SongFile)
#Construct the feature matrix that contains all the songs...
#Data structure for featureMatrix will be list of lists, so that...
#Numpy will be able to convert it to a matrix in a way that...
#All list elements will become rows
featureMatrix += [sampleRow]
#Print out rows
print "SongNumber: ", numberOf_h5Files, " F: ", sampleRow
#close the file
h5SongFile.close()
else:
#place holder for non .h5 files
pass
#Make it numpy matrix
featureMatrixNumpy = numpy.array([feature_names] + featureMatrix)
#Save the feature matrix to file
#cd into Extracted_Data folder
extractedDataFolderPath = "/Extracted_Data"
os.chdir(dirname + extractedDataFolderPath)
print "CurrentDirectory(This should be '/Extracted_Data' folder): ", os.getcwd()
print "Saving the featureMatrix.bin file..."
#Save the featureMatrix.bin file
numpy.savetxt("featureMatrix.bin", featureMatrixNumpy, delimiter='|',fmt="%s")
print "Process Complete."
endTime = time.time()
print "Number Of h5 files: ", numberOf_h5Files
print "FeatureMatrix (Shape)", featureMatrixNumpy.shape
print "FeatureMatrix (Rows): ", featureMatrixNumpy.shape[0], "(Columns): ", featureMatrixNumpy.shape[1]
print "Elapsed Time: ", endTime - startTime, "Seconds" | {
"repo_name": "nishantnath/MusicPredictiveAnalysis_EE660_USCFall2015",
"path": "Code/Data Generation & Manipulation/MSD_Data_Extract_writeFeaturesToFile_10kSet.py",
"copies": "1",
"size": "5069",
"license": "mit",
"hash": 5237976647483685000,
"line_mean": 43.0869565217,
"line_max": 108,
"alpha_frac": 0.6545669757,
"autogenerated": false,
"ratio": 3.323934426229508,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4478501401929508,
"avg_score": null,
"num_lines": null
} |
__author__ = "Can Ozbek"
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import numpy as np
def getUniqueCount(df_column):
"""
Returns a dictionary of unique counts
:param df_column: pandas series, (column)
:return: dictionary containing unique counts
"""
unique_values_list = df_column.unique().tolist()
unique_count_dict = dict.fromkeys(unique_values_list)
for value in unique_values_list:
unique_count_dict[value] = sum(df_column == value)
return unique_count_dict
def get_confusion_matrix(y_true, y_predicted):
#Get the Class Labels
classLabels = y_true.unique().tolist()
classLabels.sort()
print "ClassLabels: ", classLabels
#Get the confusion matrix
cmatrix = confusion_matrix(y_true, y_predicted, classLabels)
return cmatrix
def plot_confusion_matrix(y_true, y_predicted, title='Confusion matrix'):
"""
Returns the confusion matrix as a numpy array
"""
#Get the Class Labels
classLabels = y_true.unique().tolist()
classLabels.sort()
#Get the confusion matrix
cmatrix = confusion_matrix(y_true, y_predicted, classLabels)
#Plot the figure
plt.figure()
#Ticks
# Keep major ticks labeless
plt.xticks(range(len(classLabels)+1), [])
plt.yticks(range(len(classLabels)+1), [])
# Place labels on minor ticks
plt.gca().set_xticks([x + 0.5 for x in range(len(classLabels))], minor=True)
plt.gca().set_xticklabels(classLabels, rotation='45', fontsize=10, minor=True)
plt.gca().set_yticks([y + 0.5 for y in range(len(classLabels))], minor=True)
plt.gca().set_yticklabels(classLabels[::-1], fontsize=10, minor=True)
# Finally, hide minor tick marks...
plt.gca().tick_params('both', width=0, which='minor')
#Grid on
plt.grid(True)
#Put the values into the plot
for x in range(cmatrix.shape[0]):
for y in range(cmatrix.shape[1]):
if x==cmatrix.shape[0]-(y+1):
plt.text(x+0.5,y+0.5,cmatrix[cmatrix.shape[0]-(y+1)][x],
horizontalalignment='center',
verticalalignment='center',
color="Green",
fontsize = 15)
elif cmatrix[cmatrix.shape[0]-(y+1)][x] > 0:
plt.text(x+0.5,y+0.5,cmatrix[cmatrix.shape[0]-(y+1)][x],
horizontalalignment='center',
verticalalignment='center',
color="Red",
fontsize = 15)
else:
plt.text(x+0.5,y+0.5,cmatrix[cmatrix.shape[0]-(y+1)][x],
horizontalalignment='center',
verticalalignment='center',
fontsize = 15)
plt.ylabel("True Label")
plt.xlabel("Predicted Label")
plt.title(title)
plt.tight_layout()
return cmatrix
def plot_histogram(d):
plt.figure()
X = np.arange(len(d))
plt.bar(X, d.values(), align='center', width=0.5)
plt.xticks(X, d.keys())
ymax = max(d.values()) + 1
plt.ylim(0, ymax)
plt.tight_layout()
return d
def get_error_rate(y_true, y_predicted):
return sum(y_true != y_predicted) / float(y_true.shape[0])
| {
"repo_name": "nishantnath/MusicPredictiveAnalysis_EE660_USCFall2015",
"path": "Code/functions/ml_aux_functions.py",
"copies": "1",
"size": "3435",
"license": "mit",
"hash": -549396424844916860,
"line_mean": 34.0510204082,
"line_max": 82,
"alpha_frac": 0.5650655022,
"autogenerated": false,
"ratio": 3.745910577971647,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48109760801716467,
"avg_score": null,
"num_lines": null
} |
__author__ = "Can Ozbek"
import hdf5_getters
import numpy
#Continuous variable feature functions
def getBarDuration(h5):
#Returns the average duration of bars in a song
barsVector = hdf5_getters.get_bars_start(h5)
#If there is no information, return None
if len(barsVector) < 2:
return ["nan"]
#get the sum
barDurationSums = 0
for i in range(0,barsVector.size - 1):
barDurationSums += (barsVector[i + 1] - barsVector[i])
#get the average duration
avgBarDuration = barDurationSums / barsVector.size
return [avgBarDuration]
def getLoudness(h5):
#Returns the loudness of the song
return [hdf5_getters.get_loudness(h5)]
def getTempo(h5):
#Returns the tempo of the song
return [hdf5_getters.get_tempo(h5)]
def getArtistFamiliarity(h5):
#Returns the artist familiarity value
return [hdf5_getters.get_artist_familiarity(h5)]
def getArtistHotttnesss(h5):
#Returns the artist hotttnesss
return [hdf5_getters.get_artist_hotttnesss(h5)]
def getSongHotttnesss(h5):
#Returns song hotttnesss
return [hdf5_getters.get_song_hotttnesss(h5)]
def getYear(h5):
#Returns song year
year = hdf5_getters.get_year(h5)
if year == 0:
return ["nan"]
return [year]
def getSegmentPitchesMean(h5):
"""
Returns the mean of all pitches from all segments
:param h5: input song file
:return: 12 dimensional list for all pitches
"""
pitches = hdf5_getters.get_segments_pitches(h5)
confidence = hdf5_getters.get_segments_confidence(h5)
is_confident = numpy.array(confidence)>0.5
confident_feature = pitches[is_confident,:]
expanded_feat_mean = numpy.mean(confident_feature, axis=0)
return expanded_feat_mean.tolist()
def getSegmentPitchesVar(h5):
"""
Returns the variance of all pitches from all segments
:param h5: input song file
:return: 12 dimensional list for all pitches
"""
pitches = hdf5_getters.get_segments_pitches(h5)
confidence = hdf5_getters.get_segments_confidence(h5)
is_confident = numpy.array(confidence)>0.5
confident_feature = pitches[is_confident,:]
expanded_feat_var = numpy.var(confident_feature, axis=0)
return expanded_feat_var.tolist()
def getSegmentTimbreMean(h5):
"""
Returns the mean of each timbre from all segments
:param h5: input song file
:return: 12 dimensional list
"""
timbre = hdf5_getters.get_segments_timbre(h5)
expanded_feat_mean = numpy.mean(timbre, axis=0)
return expanded_feat_mean.tolist()
def getSegmentTimbreVar(h5):
"""
Returns the variance of each timbre from all segments
:param h5: input song file
:return: 12 dimensional list
"""
timbre = hdf5_getters.get_segments_timbre(h5)
expanded_feat_var = numpy.var(timbre, axis=0)
return expanded_feat_var.tolist()
#Categorical Feature Functions:
#TODO: -1 No Value Issues
def getMode(h5):
"""
Returns whether the song is in major or minor key
#Categorical Feature
#0 --> [0 1]
#1 --> [1 0]
::return: 2 dimensional list
"""
mode = hdf5_getters.get_mode(h5)
if mode == 0:
return [0,1]
elif mode == 1:
return [1,0]
#Mode info not available
return ["nan"]
def getKey(h5):
"""
Returns the key of the song as an expanded feature
As there are 12 keys, the output vector is going to be 12 dimensional
:param h5: input song file
:return: 12 dimensional vector (list)
Examples:
Key: 1 --> Output: [1 0 0 0 0 0 0 0 0 0 0 0]
Key 11 --> Output: [0 0 0 0 0 0 0 0 0 0 1 0]
Key 12 --> Output: [0 0 0 0 0 0 0 0 0 0 0 1]
"""
#initializing the 12 dimensional vector as a list of zeros
output_vector = [0] * 12
#get key
key = hdf5_getters.get_key(h5)
#Key info not available, return None, exit function
if key == -1:
return ["nan"]
#put the '1' into the appropriate index
output_vector[key] = 1
return output_vector
def getTimeSignature(h5):
"""
Returns the Time Signature of the information as a 6 dimensional vector.
The time signature ranges from 3 to 7 indicating time signatures of 3/4 to 7/4
A value of 1 indicates a rather complex or changing time signature
This is a categorical feature and we have (3 to 7) and (1) = 6 different values
Examples
Time Signature = 1 --> Output: [1 0 0 0 0 0]
Time Signature = 3 --> Output: [0 1 0 0 0 0]
Time Signature = 4 --> Output: [0 0 1 0 0 0]
...
Time Signature = 7 --> Output: [0 0 0 0 0 1]
:param h5: input song file
:return: Six dimensional vector as a list
"""
timeSignature = hdf5_getters.get_time_signature(h5)
#If Time Signature Information not available return missing data
if timeSignature == -1:
return ["nan"]
if timeSignature == 1:
return [1,0,0,0,0,0]
else:
#initialize a list of length 6 with zeros
outputVector = [0] * 6
outputVector[timeSignature-2] = 1
return outputVector
| {
"repo_name": "nishantnath/MusicPredictiveAnalysis_EE660_USCFall2015",
"path": "Code/functions/featureExtractionFunctions.py",
"copies": "1",
"size": "5079",
"license": "mit",
"hash": -3977255454482863000,
"line_mean": 28.1896551724,
"line_max": 83,
"alpha_frac": 0.6544595393,
"autogenerated": false,
"ratio": 3.3152741514360313,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44697336907360313,
"avg_score": null,
"num_lines": null
} |
__author__ = "Can Ozbek"
import pandas as pd
import numpy as np
import pylab
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
import ml_aux_functions as ml_aux
#Read the files
df = pd.read_pickle("/Users/ahmetcanozbek/Desktop/660Stuff/msd.pkl")
df_train = pd.read_pickle("/Users/ahmetcanozbek/Desktop/660Stuff/msd_train.pkl") # 80%
df_test = pd.read_pickle("/Users/ahmetcanozbek/Desktop/660Stuff/msd_test.pkl") # 20%
df_train_t1 = pd.read_pickle("/Users/ahmetcanozbek/Desktop/660Stuff/msd_train_t1.pkl")
df_train_t2 = pd.read_pickle("/Users/ahmetcanozbek/Desktop/660Stuff/msd_train_t2.pkl")
df_train_t3 = pd.read_pickle("/Users/ahmetcanozbek/Desktop/660Stuff/msd_train_t3.pkl")
df_train_t4 = pd.read_pickle("/Users/ahmetcanozbek/Desktop/660Stuff/msd_train_t4.pkl")
df_train_t5 = pd.read_pickle("/Users/ahmetcanozbek/Desktop/660Stuff/msd_train_t5.pkl")
print "Reading Done."
print "Histogram: "
print ml_aux.getUniqueCount(df_train_t1["Genre"])
ml_aux.plot_histogram(ml_aux.getUniqueCount(df_train_t1["Genre"]))
plt.show()
| {
"repo_name": "nishantnath/MusicPredictiveAnalysis_EE660_USCFall2015",
"path": "Code/Visualizations & Insights/visualize_data.py",
"copies": "1",
"size": "1056",
"license": "mit",
"hash": 5542411333617014000,
"line_mean": 38.1111111111,
"line_max": 86,
"alpha_frac": 0.7604166667,
"autogenerated": false,
"ratio": 2.550724637681159,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.38111413043811593,
"avg_score": null,
"num_lines": null
} |
__author__ = "Can Ozbek"
import pandas as pd
import numpy as np
import pylab
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
def getUniqueCount(df_column):
"""
Returns a dictionary of unique counts
:param df_column: pandas series, (column)
:return: dictionary containing unique counts
"""
unique_values_list = df_column.unique().tolist()
unique_count_dict = dict.fromkeys(unique_values_list)
for value in unique_values_list:
unique_count_dict[value] = sum(df_column == value)
return unique_count_dict
def plot_confusion_matrix(y_true, y_predicted, title='Confusion matrix', cmap=plt.cm.GnBu):
#Get unique class names
classLabels = y_true.unique().tolist()
#Get the confusion matrix
cmatrix = confusion_matrix(y_true, y_predicted, classLabels)
plt.imshow(cmatrix, interpolation='nearest', cmap = cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classLabels))
plt.xticks(tick_marks, classLabels, rotation=45)
plt.yticks(tick_marks, classLabels)
for x in range(cmatrix.shape[0]):
for y in range(cmatrix.shape[1]):
plt.text(y,x,cmatrix[x][y],horizontalalignment='center',
verticalalignment='center')
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
return cmatrix
df = pd.read_csv("/Users/ahmetcanozbek/Desktop/EE660/660Project/Extracted_Data/featureMatrix.bin",
header = 0, delimiter = "|")
#Getting the headers (Track ID, Song ID, ...)
#Returns a numpy array, to access the elements --> headers[0]
headers = df.columns.values
#Print out the info in original
print "***Original Dataset Shape: ", df.shape
#Clean the dataset
df_clean = df.dropna()
print "***Cleaned dataset Shape: ", df_clean.shape
#Read the Genre and Year True Labels
col_meta = ['Song ID', 'Track ID', 'Genre', 'Year']
df_meta = pd.read_csv('/Users/ahmetcanozbek/Desktop/EE660/660Project/Extracted_Data/msd_data_extract_2.bin',
header=None, delimiter = "|", names=col_meta)
#Delete Year From small file
df_meta = df_meta.drop('Year', axis=1)
#Merge the data and the Genre and Year labels
df_merged = pd.merge(df_clean, df_meta, how='left', on=['Track ID', 'Song ID'])
#Getting rid of UNCAT ones
df_merged = df_merged[df_merged["Genre"] != 'UNCAT']
#Start SVM for Genre Classification
print ""
print "*Start SVM Classification"
from sklearn.svm import SVC
svm_model = SVC(kernel = 'rbf', gamma = 1000)
y = df_merged["Genre"]
X = df_merged.drop(["Genre","Song ID","Track ID"], axis = 1)
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y)
svm_model.fit(X_train, y_train)
y_train_predicted = svm_model.predict(X_train)
y_test_predicted = svm_model.predict(X_test)
print "Number of Train Samples: ", (y_train.shape[0])
print "Number of Test Samples: ", (y_test.shape[0])
print "Train Classification Rate: ", (sum(y_train_predicted == y_train)) / float(y_train.shape[0])
print "Test Classification Rate: ", (sum(y_test_predicted == y_test)) / float(y_test.shape[0])
plt.figure()
plot_confusion_matrix(y_train,y_train_predicted,"C")
plt.show()
# cm_train = confusion_matrix(y_train, y_train_predicted, y.unique().tolist())
# print "Confusion Matrix Train: "
# print cm_train
# cm_test = confusion_matrix(y_test, y_test_predicted, y.unique().tolist())
# print "Confusion Matrix Test: "
# print cm_test
# print y.unique().tolist()
# plt.figure()
# plot_confusion_matrix(cm_train,y.unique().tolist(),"CM train")
# plt.show()
# plt.figure()
# plot_confusion_matrix(cm_test,y.unique().tolist(),"CM test")
# plt.show()
# print "Type (df_input): ", type(df_input)
#
# df_input = df_input.dropna()
#
# df_input_data = df_input[range(1, 70)].as_matrix()
# df_input_target = df_input[list(range(0, 1))].as_matrix()
#
# # splitting the data into training and testing sets
# from sklearn.cross_validation import train_test_split
# X_train, X_test, y_train, y_test = train_test_split(df_input_data, df_input_target.tolist())
#
# # Import the random forest package
# from sklearn.svm import SVC
# svc = SVC(probability=True, max_iter=10000, kernel='rbf') # kernel='rbf'
# # svc = SVC(kernel='linear', probability=True, max_iter=10000)
# # svc = SVC(kernel='poly', probability=True, max_iter=10000)
# # svc = SVC(kernel='precomputed', probability=True, max_iter=10000)
# # svc = SVC(kernel='sigmoid', probability=True, max_iter=10000) #results best for sigmoid
# svc.fit(X_train[:],numpy.ravel(y_train[:]))
# predicted = svc.predict(X_test)
#
# # Prediction Performance Measurement
# matches = (predicted == [item for sublist in y_test for item in sublist])
# print matches.sum()
# print len(matches)
#
# print matches[10:50], len(matches[10:50])
#
# print "Accuracy : ", (matches.sum() / float(len(matches)))
| {
"repo_name": "nishantnath/MusicPredictiveAnalysis_EE660_USCFall2015",
"path": "Code/Machine_Learning_Algos/10k_Tests/ml_classification_svm2.py",
"copies": "1",
"size": "4905",
"license": "mit",
"hash": 1026919178911309600,
"line_mean": 32.8275862069,
"line_max": 108,
"alpha_frac": 0.6862385321,
"autogenerated": false,
"ratio": 3.100505689001264,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4286744221101264,
"avg_score": null,
"num_lines": null
} |
"""
Things left to do:
- Get date to increment by interval
- Add recurisive call
"""
from google_flight import google_flight_api
import datetime
def findBestRoute(array, start):
g = google_flight_api.GoogleFlight('')
temp = {}
end = {}
cheapest = array[0]
for i in range(0,len(array)):
if(cheapest != array[i]):
data = {
"request": {
"slice": [
{
"origin": cheapest,
"destination": array[i],
"date": start
}
],
"passengers": {
"adultCount": 1,
"infantInLapCount": 0,
"infantInSeatCount": 0,
"childCount": 0,
"seniorCount": 0
},
"solutions": 1,
"refundable": 'false'
}
}
g.get(data)
lowestCost = g.getCost()
print (lowestCost)
temp.update({array[i]: str(lowestCost)})
print(temp)
cheapest = min(temp, key = temp.get)
print (cheapest)
cheapestRoute.update({cheapest : temp[cheapest]})
temp.clear()
#missing recursive call here
desiredAirports = []
cheapestRoute = {}
print "Enter the first date of travel"
year = int(input('Enter a year: '))
month = int(input('Enter a month: '))
day = int(input('Enter a day: '))
startDate = datetime.date(year, month, day)
print startDate
#startDate = datetime.date(year, month, day)
dateInterval = input("How many days will you likely spend in each place? ")
getAirports = True
print "Below please input the airport codes you wish to visit, type \"DONE\" when finished: "
while getAirports == True:
airCode = raw_input()
if (airCode == "DONE"):
getAirports = False
else:
desiredAirports.append(airCode)
print(desiredAirports)
findBestRoute(desiredAirports,str(startDate))
for key, value in cheapestRoute.items():
print key, value
| {
"repo_name": "caoimheharvey/Backpacking_Solution",
"path": "tsp.py",
"copies": "1",
"size": "2341",
"license": "mit",
"hash": -3767526770407553500,
"line_mean": 28.6329113924,
"line_max": 93,
"alpha_frac": 0.4882528834,
"autogenerated": false,
"ratio": 4.195340501792114,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.012013528184493591,
"num_lines": 79
} |
__author__ = 'caoxudong'
"""
Design a stack that supports push, pop, top, and retrieving the minimum element in constant time.
push(x) -- Push element x onto stack.
pop() -- Removes the element on top of the stack.
top() -- Get the top element.
getMin() -- Retrieve the minimum element in the stack.
https://oj.leetcode.com/problems/min-stack/
"""
from collections import OrderedDict
class MinStack:
# initialize your data structure here.
def __init__(self):
self.stack = list()
# @param x, an integer
# @return nothing
def push(self, x):
curMin = self.getMin()
if curMin is None or curMin > x:
curMin = x
self.stack.append((x, curMin))
# @return nothing
def pop(self):
x, curMin = self.stack.pop()
return x
# @return an integer
def top(self):
return self.stack[-1][0]
# @return an integer
def getMin(self):
if self.stack:
return self.stack[-1][1]
else:
return None | {
"repo_name": "caoxudong/code_practice",
"path": "leetcode/155_MinStack.py",
"copies": "1",
"size": "1094",
"license": "mit",
"hash": 1319830387036717800,
"line_mean": 22.9090909091,
"line_max": 97,
"alpha_frac": 0.5639853748,
"autogenerated": false,
"ratio": 3.8118466898954706,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48758320646954706,
"avg_score": null,
"num_lines": null
} |
__author__ = 'CarbonBlack, byt3smith'
# stdlib imports
import re
import sys
import time
import urllib.request, urllib.parse, urllib.error
import json
import optparse
import socket
import base64
import hashlib
# cb imports
sys.path.insert(0, "../../")
from .cbfeeds.feed import CbReport
from .cbfeeds.feed import CbFeed
from .cbfeeds.feed import CbFeedInfo
#pypi
from colorama import Fore, Back, Style, init
# Initialize colorama
init(autoreset=True)
def gen_report_id(iocs):
"""
a report id should be unique
because generate_feed_from_raw may be run repeatedly on the same data, it should
also be deterministic.
this routine sorts all the indicators, then hashes in order to meet these criteria
"""
md5 = hashlib.md5()
# sort the iocs so that a re-order of the same set of iocs results in the same report id
iocs.sort()
for ioc in iocs:
md5.update(ioc.strip().encode('utf-8'))
return md5.hexdigest()
def build_reports(options):
reports = []
ips = []
domains = []
md5s = []
# read all of the lines (of text) from the provided
# input file (of IOCs)
#
iocs = options['ioc_file']
try:
raw_iocs = open(iocs).readlines()
except:
print((Fore.RED + '\n[-]' + Fore.RESET), end=' ')
print('Could not open %s' % iocs)
exit(0)
# iterate over each of the lines
# attempt to determine if each line is a suitable
# ipv4 address, dns name, or md5
#
for raw_ioc in raw_iocs:
# strip off any leading or trailing whitespace
# skip any empty lines
#
raw_ioc = raw_ioc.strip()
if len(raw_ioc) == 0:
continue
try:
# attempt to parse the line as an ipv4 address
#
socket.inet_aton(raw_ioc)
# parsed as an ipv4 address!
#
ips.append(raw_ioc)
except Exception as e:
# attept to parse the line as a md5 and, if that fails,
# as a domain. use trivial parsing
#
if 32 == len(raw_ioc) and \
re.findall(r"([a-fA-F\d]{32})", raw_ioc):
md5s.append(raw_ioc)
elif -1 != raw_ioc.find("."):
domains.append(raw_ioc)
fields = {'iocs': {
},
'timestamp': int(time.mktime(time.gmtime())),
'link': options['feed_link'],
'title': options['report_name'],
'id': gen_report_id(ips + domains + md5s),
'score': 100}
if len(ips) > 0:
fields['iocs']['ipv4'] = ips
if len(domains) > 0:
fields['iocs']['dns'] = domains
if len(md5s) > 0:
fields['iocs']['md5'] = md5s
reports.append(CbReport(**fields))
return reports
def create_feed(options):
feed_meta = json.loads(options)
# generate the required feed information fields
# based on command-line arguments
#
feedinfo = {'name': feed_meta['name'],
'display_name': feed_meta['display_name'],
'provider_url': feed_meta['provider_url'],
'summary': feed_meta['summary'],
'tech_data': feed_meta['tech_data']}
# if an icon was provided, encode as base64 and
# include in the feed information
#
if feed_meta['icon']:
try:
bytes = base64.b64encode(open(feed_meta['icon']).read())
feedinfo['icon'] = bytes
except:
print((Fore.RED + '\n[-]' + Fore.RESET), end=' ')
print('Could not open %s. Make sure file still exists.\n' % feed_meta['icon'])
# build a CbFeedInfo instance
# this does field validation
#
feedinfo = CbFeedInfo(**feedinfo)
# build a list of reports (always one report in this
# case). the single report will include all the IOCs
#
reports = build_reports(feed_meta)
# build a CbFeed instance
# this does field validation (including on the report data)
#
feed = CbFeed(feedinfo, reports)
return feed.dump()
| {
"repo_name": "byt3smith/Forager",
"path": "forager/cb/generate_feed.py",
"copies": "1",
"size": "4088",
"license": "mit",
"hash": 3923757986954018000,
"line_mean": 25.8947368421,
"line_max": 92,
"alpha_frac": 0.5741193738,
"autogenerated": false,
"ratio": 3.6895306859205776,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4763650059720578,
"avg_score": null,
"num_lines": null
} |
from simpleai.search import SearchProblem, hill_climbing_random_restarts
import sys
import pdb
import random
import time
class KnapsackProblem(SearchProblem):
def __init__(self,numObjects,maxWeight,weights,values):
super(KnapsackProblem, self)
self.weights = weights
self.values = values
self.maxWeight = maxWeight
self.numObjects = numObjects # is it not better compute num?
#How can I set these as the initial values if not passed in?
#self.weights = [4,6,5,5,3,2,4,8,1,5,3,7,2,5,6,3,8,4,7,2]
#self.values = [5,6,2,8,6,5,8,2,7,6,1,3,4,4,1,5,6,2,5,3]
#self.maximum = 35
#self.num_objects = len(self.weights)
def generate_random_state(self):
'''
Create a random DNA string (...from existing, probably).
'''
return
def crossover(self):
def mutate(self):
def value(self, s):
'''
Determin the sequence's fitness.
'''
return
def _weight(self, s):
'''
Calculates the entire weight of the knapsack given s[x] == Truthy
'''
total = 0
for pos in range(0,len(s)):
if s[pos] == True:
total += self.weights[pos]
return total
def _valid(self, s):
if _weight(self,s) <= self.maxWeight:
return True
else:
return False
weights = [4,6,5,5,3,2,4,8,1,5,3,7,2,5,6,3,8,4,7,2]
values = [5,6,2,8,6,5,8,2,7,6,1,3,4,4,1,5,6,2,5,3]
maximum = 35
num_objects = len(weights)
starttime = time.time()
problem = KnapsackProblem( num_objects,maximum,weights,values )
result = genetic(problem, iterations_limit=100, population_size=16,mutation_chance=0.10)
endtime = time.time()
for i in result.path():
print i
print('It took ' + str(endtime - starttime) + ' to compute this answer.')
| {
"repo_name": "carmonc/assignment5",
"path": "CarmoneyHwk5.py",
"copies": "1",
"size": "2080",
"license": "mit",
"hash": -1284401116163399200,
"line_mean": 26.7333333333,
"line_max": 89,
"alpha_frac": 0.6048076923,
"autogenerated": false,
"ratio": 3.0498533724340176,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41546610647340176,
"avg_score": null,
"num_lines": null
} |
class Precise:
def __init__(self, number, decimals=0):
is_string = isinstance(number, str)
is_int = isinstance(number, int)
if not (is_string or is_int):
raise RuntimeError('Precise class initiated with something other than a string or int')
if is_int:
self.integer = number
self.decimals = decimals
else:
if decimals:
raise RuntimeError('Cannot set decimals when initializing with a string')
modifier = 0
number = number.lower()
if 'e' in number:
number, modifier = number.split('e')
modifier = int(modifier)
decimal_index = number.find('.')
self.decimals = len(number) - decimal_index - 1 if decimal_index > -1 else 0
integer_string = number.replace('.', '')
self.integer = int(integer_string)
self.decimals = self.decimals - modifier
self.base = 10
self.reduce()
def mul(self, other):
integer_result = self.integer * other.integer
return Precise(integer_result, self.decimals + other.decimals)
def div(self, other, precision=18):
distance = precision - self.decimals + other.decimals
if distance == 0:
numerator = self.integer
elif distance < 0:
exponent = self.base ** -distance
numerator = self.integer // exponent
else:
exponent = self.base ** distance
numerator = self.integer * exponent
result, mod = divmod(numerator, other.integer)
# python floors negative numbers down instead of truncating
# if mod is zero it will be floored to itself so we do not add one
result = result + 1 if result < 0 and mod else result
return Precise(result, precision)
def add(self, other):
if self.decimals == other.decimals:
integer_result = self.integer + other.integer
return Precise(integer_result, self.decimals)
else:
smaller, bigger = [other, self] if self.decimals > other.decimals else [self, other]
exponent = bigger.decimals - smaller.decimals
normalised = smaller.integer * (self.base ** exponent)
result = normalised + bigger.integer
return Precise(result, bigger.decimals)
def sub(self, other):
negative = Precise(-other.integer, other.decimals)
return self.add(negative)
def abs(self):
return Precise(abs(self.integer), self.decimals)
def neg(self):
return Precise(-self.integer, self.decimals)
def mod(self, other):
rationizerNumberator = max(-self.decimals + other.decimals, 0)
numerator = self.integer * (self.base ** rationizerNumberator)
rationizerDenominator = max(-other.decimals + self.decimals, 0)
denominator = other.integer * (self.base ** rationizerDenominator)
result = numerator % denominator
return Precise(result, rationizerDenominator + other.decimals)
def pow(self, other):
result = self.integer ** other.integer
return Precise(result, self.decimals * other.integer)
def reduce(self):
if self.integer == 0:
self.decimals = 0
return self
div, mod = divmod(self.integer, self.base)
while mod == 0:
self.integer = div
self.decimals -= 1
div, mod = divmod(self.integer, self.base)
return self
def __str__(self):
sign = '-' if self.integer < 0 else ''
integer_array = list(str(abs(self.integer)).rjust(self.decimals, '0'))
index = len(integer_array) - self.decimals
if index == 0:
item = '0.'
elif self.decimals < 0:
item = '0' * (-self.decimals)
elif self.decimals == 0:
item = ''
else:
item = '.'
integer_array.insert(index, item)
return sign + ''.join(integer_array)
@staticmethod
def string_mul(string1, string2):
if string1 is None or string2 is None:
return None
return str(Precise(string1).mul(Precise(string2)))
@staticmethod
def string_div(string1, string2, precision=18):
if string1 is None or string2 is None:
return None
return str(Precise(string1).div(Precise(string2), precision))
@staticmethod
def string_add(string1, string2):
if string1 is None and string2 is None:
return None
if string1 is None:
return string2
elif string2 is None:
return string1
return str(Precise(string1).add(Precise(string2)))
@staticmethod
def string_sub(string1, string2):
if string1 is None or string2 is None:
return None
return str(Precise(string1).sub(Precise(string2)))
@staticmethod
def string_abs(string):
if string is None:
return None
return str(Precise(string).abs())
@staticmethod
def string_neg(string):
if string is None:
return None
return str(Precise(string).neg())
@staticmethod
def string_mod(string1, string2):
if string1 is None or string2 is None:
return None
return str(Precise(string1).mod(Precise(string2)))
@staticmethod
def string_pow(string1, string2):
if string1 is None or string2 is None:
return None
return str(Precise(string1).pow(Precise(string2)))
| {
"repo_name": "ccxt/ccxt",
"path": "python/ccxt/base/precise.py",
"copies": "1",
"size": "5743",
"license": "mit",
"hash": 7739194745821787000,
"line_mean": 33.0773809524,
"line_max": 99,
"alpha_frac": 0.5930131004,
"autogenerated": false,
"ratio": 4.034531360112755,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002539068126059902,
"num_lines": 168
} |
#Imports
from emokit import emotiv
from pykeyboard import PyKeyboard
from sklearn.cross_validation import train_test_split
from time import sleep
from winsound import Beep as beep
import gevent
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import platform
import pyttsx
import time
if platform.system() == "Windows":
import socket
#Constants
VERY_LOW_SOUND = 400
LOW_SOUND = 1000
HIGH_SOUND = 2000
STANDART_TIME = 500
STANDART_SLEEP = 0.5
SECTION_SLEEP = 2
FILE_NAME = 'EEG_XX.csv'
#Functions
def start_action(action_command):
k.press_key(action_command)
def stop_action(action_command):
k.release_key(action_command)
def init_experiment_sound():
beep(LOW_SOUND, STANDART_TIME)
sleep(STANDART_SLEEP)
beep(LOW_SOUND, STANDART_TIME)
sleep(STANDART_SLEEP)
beep(LOW_SOUND, STANDART_TIME)
sleep(STANDART_SLEEP)
beep(HIGH_SOUND, STANDART_TIME)
sleep(STANDART_SLEEP)
def init_experiment_section():
beep(HIGH_SOUND, STANDART_TIME)
def end_experiment_section():
beep(LOW_SOUND, STANDART_TIME)
def say(text):
reader.say(text)
reader.runAndWait()
#Main
if __name__ == "__main__":
#Headset setup
headset = emotiv.Emotiv(display_output=False)
gevent.spawn(headset.setup)
gevent.sleep(0)
intentions = {0:'.',1:'w',2:'s',3:'q',4:'e'}
readings_array = []
readings = 0
number_of_readings = 150
experiment = 0
iteration = 0
intention = 0
Class_index = 0
iters = 1000
k = PyKeyboard()
init_experiment_sound()
print('Calibrating')
epoc_init = 0
try:
while epoc_init < 1000:
packet = headset.dequeue()
readings_array.append([packet.AF3[0], packet.F7[0], packet.F3[0], packet.FC5[0], packet.T7[0], packet.P7[0], packet.O1[0], packet.O2[0], packet.P8[0], packet.T8[0], packet.FC6[0], packet.F4[0], packet.F8[0], packet.AF4[0], Class_index])
epoc_init += 1
except KeyboardInterrupt:
headset.close()
readings_array = []
try:
print('Real Experiment')
gevent.sleep(0.5)
time_record = time.clock()
init_experiment_section()
k.press_key(intentions[Class_index])
while True:
#Get the package from the headset
packet = headset.dequeue()
#Printing the values from tahe headset's package and the class
readings_array.append([packet.AF3[0], packet.F7[0], packet.F3[0], packet.FC5[0], packet.T7[0], packet.P7[0], packet.O1[0], packet.O2[0], packet.P8[0], packet.T8[0], packet.FC6[0], packet.F4[0], packet.F8[0], packet.AF4[0], Class_index])
readings += 1
#Optional delay for reading packages
#gevent.sleep(0)
if(readings % iters == 0):
k.release_key(intentions[Class_index])
end_experiment_section()
print('Time: {} Class: {}'.format(time.clock() - time_record,Class_index))
Class_index += 1
if Class_index < 5:
time_record = time.clock()
init_experiment_section()
k.press_key(intentions[Class_index])
if(readings % (iters * 5) == 0):
break
#Close headset connection
except KeyboardInterrupt:
time.sleep(1)
headset.close()
finally:
time.sleep(1)
data = pd.DataFrame(readings_array, columns=['AF3', 'F7', 'F3', 'FC5', 'T7', 'P7', 'O1', 'O2', 'P8', 'T8', 'FC6', 'F4', 'F8', 'AF4', 'Class'])
data.to_csv(FILE_NAME, index=False)
headset.close()
| {
"repo_name": "camm0991/ThesisProject",
"path": "Scripts/01 Signal recording/EEG sampling procedure.py",
"copies": "1",
"size": "3912",
"license": "mit",
"hash": -4394811692026668500,
"line_mean": 29.0923076923,
"line_max": 248,
"alpha_frac": 0.6124744376,
"autogenerated": false,
"ratio": 3.172749391727494,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9197960159039831,
"avg_score": 0.017452734057532577,
"num_lines": 130
} |
import re, sys, math
from bs4 import BeautifulSoup
STOP_WORDS = ["a", "about", "above", "above", "across", "after", "afterwards", \
"again", "against", "all", "almost", "alone", "along", "already",\
"also","although","always","am","among", "amongst", "amoungst",\
"amount", "an", "and", "another", "any","anyhow","anyone",\
"anything","anyway", "anywhere", "are", "around", "as", "at", \
"back","be","became", "because","become","becomes", "becoming",\
"been", "before", "beforehand", "behind", "being", "below",\
"beside", "besides", "between", "beyond", "bill", "both",\
"bottom","but", "by", "call", "can", "cannot", "cant", "co",\
"con", "could", "couldnt", "cry", "de", "describe", "detail",\
"do", "done", "down", "due", "during", "each", "eg", "eight",\
"either", "eleven","else", "elsewhere", "empty", "enough",\
"etc", "even", "ever", "every", "everyone", "everything",\
"everywhere", "except", "few", "fifteen", "fify", "fill",\
"find", "fire", "first", "five", "for", "former", "formerly",\
"forty", "found", "four", "from", "front", "full", "further",\
"get", "give", "go", "had", "has", "hasnt", "have", "he",\
"hence", "her", "here", "hereafter", "hereby", "herein",\
"hereupon", "hers", "herself", "him", "himself", "his", "how",\
"however", "hundred", "ie", "if", "in", "inc", "indeed",\
"interest", "into", "is", "it", "its", "itself", "keep", "last",\
"latter", "latterly", "least", "less", "ltd", "made", "many",\
"may", "me", "meanwhile", "might", "mill", "mine", "more",\
"moreover", "most", "mostly", "move", "much", "must", "my",\
"myself", "name", "namely", "neither", "never", "nevertheless",\
"next", "nine", "no", "nobody", "none", "noone", "nor", "not",\
"nothing", "now", "nowhere", "of", "off", "often", "on", "once",\
"one", "only", "onto", "or", "other", "others", "otherwise",\
"our", "ours", "ourselves", "out", "over", "own","part", "per",\
"perhaps", "please", "put", "rather", "re", "same", "see",\
"seem", "seemed", "seeming", "seems", "serious", "several",\
"she", "should", "show", "side", "since", "sincere", "six",\
"sixty", "so", "some", "somehow", "someone", "something",\
"sometime", "sometimes", "somewhere", "still", "such",\
"system", "take", "ten", "than", "that", "the", "their",\
"them", "themselves", "then", "thence", "there", "thereafter",\
"thereby", "therefore", "therein", "thereupon", "these", "they",\
"thickv", "thin", "third", "this", "those", "though", "three",\
"through", "throughout", "thru", "thus", "to", "together",\
"too", "top", "toward", "towards", "twelve", "twenty", "two",\
"un", "under", "until", "up", "upon", "us", "very", "via",\
"was", "we", "well", "were", "what", "whatever", "when",\
"whence", "whenever", "where", "whereafter", "whereas",\
"whereby", "wherein", "whereupon", "wherever", "whether",\
"which", "while", "whither", "who", "whoever", "whole", "whom",\
"whose", "why", "will", "with", "within", "without", "would",\
"yet", "you", "your", "yours", "yourself", "yourselves", "the",\
"reuter", "s"]
#A Naive Bayes learner and classifier
#for the Reuters-21578 dataset
######################################
class NaiveBayes():
def __init__(self):
self.vocabulary = {} #stores the number of times each vocab appears
self.V = {} #stores the number of times each class appears
self.prob_v = {} #a priori probability of a certain class v
self.prob_w = {} #conditional probability of a certain word w given v
self.docs = 0 #number of documents in training set
self.n = {} #number of distinct classes
self.files = [] #files to be processed
self.total = 0 #number of docs to classify in test dataset
self.hits = 0 #number of hits in test dataset
self.found = {} #stores the number of docs per class in test ds
self.hit = {} #keeps the number of hits per class in test ds
def read_files(self, filenames):
for filename in filenames:
with open(filename, "r") as f:
self.files.append(f.read())
def learn(self):
print("TRAINING...")
i = ""
for f in self.files:
self._process_file(f)
i += "|"
print('\r[{0:22}] complete'.format(i), end="", flush=True)
print("\ndone!")
self._calculate_probabilities()
def classify(self):
if not self.prob_v:
print("You have to train the classifier with examples first!")
sys.exit()
print("CLASSIFYING... (this may take a while)")
i = ""
for f in self.files:
self._classify_file(f)
i += "|"
print('\r[{0:22}] complete'.format(i), end="", flush=True)
print("\nDISPLAYING RESULTS...")
self._show_results()
def _process_file(self, file_d):
soup = BeautifulSoup(file_d, "xml")
for r in soup.find_all('REUTERS'):
if r['LEWISSPLIT'] == 'TRAIN' and r.BODY and r.TOPICS.D:
self.docs += 1
topic = []
#extracting topics
for t in r.TOPICS:
topic.append(t.string)
if t.string not in self.V.keys():
self.V[t.string] = 0
self.n[t.string] = set()
self.V[t.string] += 1
#extracting vocabulary
words = re.findall('[A-Za-z]+', r.BODY.string)
for w in words:
word = w.lower()
if word not in STOP_WORDS:
if word not in self.vocabulary.keys():
self.vocabulary[word] = {}
for t in topic:
if t not in self.vocabulary[word].keys():
self.vocabulary[word][t] = 0
self.vocabulary[word][t] += 1
self.n[t].add(word)
def _calculate_probabilities(self):
for w in self.vocabulary.keys():
self.prob_w[w] = {}
for v in self.V.keys():
vsize = len(self.vocabulary.keys())
nsize = len(self.n[v])
self.prob_v[v] = math.log(self.V[v]/self.docs)
if v not in self.vocabulary[w].keys():
self.vocabulary[w][v] = 0
self.prob_w[w][v] = math.log((self.vocabulary[w][v]+1)/(nsize+vsize))
def _get_argmax(self, word_list):
argmax = "not found"
maxval = -sys.maxsize
for v in self.V.keys():
accum = 0.0
for w in word_list:
word = w.lower()
if word not in STOP_WORDS and word in self.vocabulary.keys():
accum += self.prob_w[word][v]
accum += self.prob_v[v]
if accum > maxval:
maxval = accum
argmax = v
return argmax
def _classify_file(self, file_d):
soup = BeautifulSoup(file_d, "xml")
for r in soup.find_all('REUTERS'):
if r['LEWISSPLIT'] == 'TEST' and r.BODY and r.TOPICS.D:
self.total += 1
words = re.findall('[A-Za-z]+', r.BODY.string)
v_max = self._get_argmax(words)
topics = []
for t in r.TOPICS:
if t.string not in self.found.keys():
self.found[t.string] = 0
self.hit[t.string] = 0
self.found[t.string] += 1
topics.append(t.string)
if v_max in topics:
for t in topics:
self.hit[t] += 1
self.hits += 1
def _show_results(self):
results = sorted(self.found.items(), key=lambda x: x[1], reverse=True)
print(" class | found | hits | rate ")
print("--------------------------------------------------")
for r in results:
f = r[0]
found = r[1]
hits = self.hit[f]
rate = self.hit[f]/found
print('{0:17}|{1:7d}|{2:8}|{3:15}'.format(f, found, hits, rate))
#main program
#############
def main():
#the program expects the files to be in the following folders:
filenames = ["reuters21578/reut2-00" + str(i) + ".sgm" for i in range(10)]
filenames += ["reuters21578/reut2-0" + str(i) + ".sgm" for i in range(10,22)]
nb = NaiveBayes()
nb.read_files(filenames)
nb.learn()
nb.classify()
if __name__ == "__main__":
main()
| {
"repo_name": "elmadjian/pcs5735",
"path": "aula3/exercise03.py",
"copies": "1",
"size": "9424",
"license": "mpl-2.0",
"hash": -6139875195463779000,
"line_mean": 45.6534653465,
"line_max": 85,
"alpha_frac": 0.4644524618,
"autogenerated": false,
"ratio": 3.6148830072880704,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45793354690880705,
"avg_score": null,
"num_lines": null
} |
from __future__ import print_function
import numpy as np
import sys
import matplotlib.pyplot as plt
from keras.datasets import reuters
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.utils import np_utils
from keras.callbacks import Callback
#Necessary to recover accuracy data from tests
#---------------------------------------------
class TestCallback(Callback):
def __init__(self, testing_set, testing_target):
self.testing_set = testing_set
self.testing_target = testing_target
self.accuracy = []
def on_epoch_end(self, epoch, logs={}):
x = self.testing_set
y = self.testing_target
loss, acc = self.model.evaluate(x, y, verbose=0)
self.accuracy.append(acc)
#initial setup
#--------------
def main():
if len(sys.argv) != 2:
print("usage: <this_program> <dataset_file>")
sys.exit()
#initial settings
np.random.seed(7)
nb_epoch = 250
#loading data
print('Loading data...')
dataset, target = load_dataset(sys.argv[1])
training_set, training_target = dataset[:-25], target[:-25]
testing_set, testing_target = dataset[-25:], target[-25:]
print(len(training_set), 'train sequences')
print(len(testing_set), 'test sequences')
nb_classes = 2
print(nb_classes, 'classes')
#normalizing classes
print('Convert class vector to binary class matrix (for use with categorical_crossentropy)')
training_target = np_utils.to_categorical(training_target, nb_classes)
testing_target = np_utils.to_categorical(testing_target, nb_classes)
#building the network
print('Building model...')
model = Sequential()
model.add(Dense(24, input_dim=24))
model.add(Activation('softmax'))
model.add(Dense(12))
model.add(Activation('relu'))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
tester = TestCallback(testing_set, testing_target)
history = model.fit(training_set, training_target,
nb_epoch=nb_epoch, batch_size=12,
callbacks=[tester],
verbose=1, validation_split=0.1)
score = model.evaluate(testing_set, testing_target,
batch_size=25, verbose=1)
#show the results
print('\n\nTest score:', score[0])
print('Test accuracy:', score[1])
p1, = plt.plot(history.history['loss'], 'r-')
p2, = plt.plot(history.history['acc'], 'b-')
p3, = plt.plot(tester.accuracy, 'g-')
plt.legend([p1, p2, p3], ['perda', 'acurácia', 'dados de teste'])
plt.show()
#Expects the "german data numeric" file as parameter
#---------------------------------------------------
def load_dataset(filename):
dataset, target = [], []
with open(filename, 'r') as f:
for line in f:
values = line.split()
numeric = [float(v) for v in values]
dataset.append(numeric[:-1])
target.append(numeric[-1]-1)
return dataset, target
#-----------------------
if __name__=="__main__":
main()
| {
"repo_name": "elmadjian/pcs5735",
"path": "aula2/exercise04.py",
"copies": "1",
"size": "3344",
"license": "mpl-2.0",
"hash": -6357290127504135000,
"line_mean": 32.42,
"line_max": 96,
"alpha_frac": 0.6029323758,
"autogenerated": false,
"ratio": 3.793416572077185,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4896348947877185,
"avg_score": null,
"num_lines": null
} |
import numpy as np
#Class to model a single neuron
#------------------------------
class Neuron():
def __init__(self, idx, eta, inputs):
self.idx = idx
self.eta = eta
self.weight = [0.0 for i in range(inputs+1)]
self.input = [1.0 for i in range(inputs+1)]
self.f_links = []
self.b_links = []
self.output = None
self.delta = None
def set_input(self, idx, x):
self.input[idx] = x
def set_weight(self, idx, w):
self.weight[idx] = w
def set_weight_list(self, new_list):
self.weight = new_list
def set_b_links(self, p):
self.b_links.append(p)
def set_f_links(self, p):
self.f_links.append(p)
def get_param(self):
sum = 0
for f in self.f_links:
sum += f.weight[self.idx] * f.delta
return sum
def calculate_delta(self, param):
self.delta = self.output * (1.0 - self.output) * param
def calculate_output(self):
y = np.dot(self.weight, self.input)
self.output = 1/(1 + np.exp(y))
def propagate_output(self):
for l in self.f_links:
l.set_input(self.idx, self.output)
def update_weights(self):
for i in range(len(self.weight)):
self.weight[i] += self.eta * self.delta * self.input[i]
#Calculates the BPG algorithm for exercise 01
#--------------------------------------------
def backpropagation(training_set, n_in, n_out, n_hidden, eta=0.05, epochs=1):
P_hidden = [Neuron(i+1, eta, n_in) for i in range(n_hidden)]
P_out = [Neuron(i+1, eta, n_hidden) for i in range(n_out)]
#setting links
for pi in P_hidden:
for pj in P_out:
pj.set_b_links(pi)
pi.set_f_links(pj)
#manually setting weights
P_out[0].set_weight_list([-0.1, -0.4, 0.1, 0.6])
P_out[1].set_weight_list([0.6, 0.2, -0.1, -0.2])
P_hidden[0].set_weight_list([0.1, -0.2, 0.0, 0.2])
P_hidden[1].set_weight_list([0.2, -0.2, 0.1, 0.3])
P_hidden[2].set_weight_list([0.5, 0.3, -0.4, 0.2])
#training
for i in range(epochs):
for sample in training_set:
#forwarding the input
X = [1.0] + sample[0]
Y = sample[1]
for p in P_hidden:
p.input = X
p.calculate_output()
p.propagate_output()
for p in P_out:
p.calculate_output()
#backwards propagation
for i in range(len(P_out)):
P_out[i].calculate_delta(Y[i] - P_out[i].output)
for p in P_hidden:
p.calculate_delta(p.get_param())
#updating network weights
[p.update_weights() for p in P_out]
[p.update_weights() for p in P_hidden]
print("printing weights after %d epochs:" % epochs)
print("\nP_hidden:")
[print(['%.4f' % w for w in p.weight]) for p in P_hidden]
print("\nP_out:")
[print(['%.4f' % w for w in p.weight]) for p in P_out]
#initial setup
#--------------
def main():
training_set = [[[0.6, 0.1, 0.2], [1, 0]], [[0.1, 0.5, 0.6], [0, 1]]]
backpropagation(training_set, 3, 2, 3, epochs=1)
#-----------------------
if __name__ == "__main__":
main()
| {
"repo_name": "elmadjian/pcs5735",
"path": "aula2/exercise01.py",
"copies": "1",
"size": "3304",
"license": "mpl-2.0",
"hash": 6453538485390030000,
"line_mean": 28.2300884956,
"line_max": 77,
"alpha_frac": 0.5165001514,
"autogenerated": false,
"ratio": 3.0754189944134076,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40919191458134074,
"avg_score": null,
"num_lines": null
} |
import sys
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import numpy as np
theta_list = []
x_list = []
y_list = []
def main():
if len(sys.argv) != 2:
print("modo de usar: <este_programa> <arquivo_csv>")
sys.exit()
csv_file = sys.argv[1]
with open(csv_file, "r") as arquivo:
classes = arquivo.readline().split(",")
theta_list = [0.0 for i in range(len(classes))]
for line in arquivo:
values = line.split(",")
curr_x = [float(i) for i in values[:-1]]
curr_x.append(1.0)
x_list.append(curr_x)
y_list.append(1.0) if values[-1].startswith("yes") else y_list.append(0.0)
logistic_regression(theta_list, x_list, y_list, 0.0005, 0.0000001)
plot(theta_list, x_list, y_list)
#The logistic regression algorithm using SGD
#-------------------------------------------
def logistic_regression(theta_list, x_list, y_list, alpha, epsilon):
J_prev = 0
J_curr = J(theta_list, x_list, y_list)
count = 0
while abs(J_curr - J_prev) > epsilon:
if count == 10000:
print("too much iterations")
break
count += 1
for j in range(len(theta_list)):
for i in range(len(x_list)):
diff = (h_theta(theta_list, x_list[i]) - y_list[i])
theta_list[j] = theta_list[j] - alpha * diff * x_list[i][j]
J_prev = J_curr
J_curr = J(theta_list, x_list, y_list)
#Calculates the minimum cost function
#------------------------------------
def J(theta_list, x_list, y_list):
sigma = 0
for i in range(len(x_list)):
sigma += (h_theta(theta_list, x_list[i]) - y_list[i])**2
return sigma / 2
#Calculates h_theta
#-------------------
def h_theta(theta, x):
return 1.0/(1.0 + np.exp(-np.dot(theta, x)))
#Binary classifier
#------------------
def predict(theta, x, y):
return (h_theta(theta, x)**y) * ((1.0-h_theta(theta, x))**(1.0-y))
#DEBUG: Plot our findings
#------------------------
def plot(theta_list, x_list, y_list):
new_x_list = [i[0] for i in x_list]
new_y_list = [i[1] for i in x_list]
hit, p1, p2, p3, p4 = 0, 0, 0, 0, 0
for i in range(len(y_list)):
if y_list[i] == 1.0:
if predict(theta_list, x_list[i], y_list[i]) >= 0.5:
p1, = plt.plot(np.dot(theta_list, x_list[i]), y_list[i], 'go')
hit += 1
else:
p2, = plt.plot(np.dot(theta_list, x_list[i]), y_list[i], 'gx')
elif y_list[i] == 0.0 :
if predict(theta_list, x_list[i], y_list[i]) >= 0.5:
p3, = plt.plot(np.dot(theta_list, x_list[i]), y_list[i], 'ro')
hit += 1
else:
p4, = plt.plot(np.dot(theta_list, x_list[i]), y_list[i], 'rx')
plt.title("Regressão logística sobre os dados de 'students.csv'")
plt.xlabel("z")
plt.ylabel("g(z)")
hit_true = 'P(y=admitido) = admitido'
hit_false = 'P(y=admitido) = não admitido'
miss_true = 'P(y=não admitido) = não admitido'
miss_false ='P(y=não admitido) = admitido'
plt.legend([p1,p2,p3,p4],[hit_true, hit_false, miss_true, miss_false])
print("hit rate:", hit/len(y_list))
plt.show()
#-----------------------
if __name__=="__main__":
main()
| {
"repo_name": "elmadjian/pcs5735",
"path": "aula1/logistic_regression.py",
"copies": "1",
"size": "3369",
"license": "mpl-2.0",
"hash": -1257968575261042400,
"line_mean": 30.7169811321,
"line_max": 86,
"alpha_frac": 0.5214158239,
"autogenerated": false,
"ratio": 2.8491525423728814,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3870568366272881,
"avg_score": null,
"num_lines": null
} |
import sys
import matplotlib.pyplot as plt
import numpy as np
theta_list = []
x_list = []
y_list = []
def main():
if len(sys.argv) != 2:
print("modo de usar: <este_programa> <arquivo_csv>")
sys.exit()
csv_file = sys.argv[1]
with open(csv_file, "r") as arquivo:
classes = arquivo.readline().split(",")
theta_list = [0.0 for i in range(len(classes))]
for line in arquivo:
values = line.split(",")
curr_x = [float(i) for i in values]
curr_x[-1] = 1.0
x_list.append(curr_x)
y_list.append(float(values[-1]))
#batch_gradient_descent(theta_list, x_list, y_list, 0.000005, 0.00001)
#stochastic_gradient_descent(theta_list, x_list, y_list, 0.000005, 0.00001)
theta_list = normal_equations(x_list, y_list)
plot(theta_list, x_list, y_list)
#Minimum cost function
#----------------------
def J(theta_list, x_list, y_list):
sigma = 0
for i in range(len(x_list)):
sigma += (h_theta(theta_list, x_list[i]) - y_list[i])**2
return sigma / 2
#The h_theta function, used by J
#--------------------------------
def h_theta(theta_list, x_list_i):
return np.dot(theta_list, x_list_i)
#BGD algorithm for linear regression
#------------------------------------
def batch_gradient_descent(theta_list, x_list, y_list, alpha, epsilon):
J_prev = 0
J_curr = J(theta_list, x_list, y_list)
count = 0
while (abs(J_curr - J_prev) > epsilon):
count+=1
if count > 10000:
print("too much iterations")
break
for j in range(len(theta_list)):
sigma = 0
for i in range(len(x_list)):
h = h_theta(theta_list, x_list[i])
sigma += (h - y_list[i]) * x_list[i][j]
#print("h>>", h)
theta_list[j] = theta_list[j] - alpha * sigma
J_prev = J_curr
J_curr = J(theta_list, x_list, y_list)
print("iterations:", count)
#SGD algorithm for linear regression
#-----------------------------------
def stochastic_gradient_descent(theta_list, x_list, y_list, alpha, epsilon):
J_prev = 0
J_curr = J(theta_list, x_list, y_list)
count = 0
while (abs(J_curr - J_prev) > epsilon):
count+=1
if count > 10000:
print("too much iterations")
break
for j in range(len(theta_list)):
for i in range(len(x_list)):
diff = (h_theta(theta_list, x_list[i]) - y_list[i])
theta_list[j] = theta_list[j] - alpha * diff * x_list[i][j]
J_prev = J_curr
J_curr = J(theta_list, x_list, y_list)
print("iterations:", count)
#Normal equations method for linear regression
#---------------------------------------------
def normal_equations(x_list, y_list):
X = np.array(x_list)
y = np.array(y_list)
return np.dot(np.dot(np.linalg.inv(np.dot(X.T, X)), X.T), y)
#DEBUG: Plot our findings. This code was not meant to be generic
#---------------------------------------------------------------
def plot(theta_list, x_list, y_list):
new_x_list = [i[1] for i in x_list]
plt.plot(new_x_list, y_list, 'ro')
plt.plot(new_x_list, [np.dot((theta_list[1], theta_list[3]), (i[1], i[3])) for i in x_list])
plt.title("Regressão SGD sobre os dados em 'Iris Dataset'")
plt.xlabel("Largura da sépala")
plt.ylabel("Largura da pétala")
plt.show()
#-----------------------
if __name__=="__main__":
main()
| {
"repo_name": "elmadjian/pcs5735",
"path": "aula1/linear_regression.py",
"copies": "1",
"size": "3543",
"license": "mpl-2.0",
"hash": -2575507641784892000,
"line_mean": 31.4678899083,
"line_max": 96,
"alpha_frac": 0.5289629839,
"autogenerated": false,
"ratio": 3.1071115013169446,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9099602387292778,
"avg_score": 0.0072944195848332494,
"num_lines": 109
} |
__author__ = 'Carlos'
from django.db import models
from django.core.management.base import BaseCommand, CommandError
from boe_api.state_documents.models import Diario ,Documento, Departamento, Rango, Origen_legislativo, DocumentoAnuncio, \
Modalidad, Tipo, Tramitacion, Precio, Procedimiento, DocumentoBORME
from boe_api.state_documents.models import Estado_consolidacion, Nota, Materia, Alerta, Palabra, Referencia
from boe_api.state.models import Legislatura
import os
import sys
import locale
import redis
import re
from datetime import datetime
from lxml import etree, objectify
from bs4 import BeautifulSoup
from pattern.web import URL
class ProcessDocument():
url_a_pattern = "http://www.boe.es/diario_boe/xml.php?id={0}"
url_a_html_pattern = "http://www.boe.es/diario_boe/txt.php?id={0}"
xmlDoc = None
rootXML = None
doc = Documento()
metadatos = None
def __init__(self, url_xml):
self.last_legislatura = Legislatura.objects.get_or_none(final__isnull = True)
self.url = url_xml
self.downloadXML()
self.xmlToObject()
self.getMetadatos()
self.getAnalisis()
self.createDocument()
def saveDoc(self):
try:
self.doc.save()
except:
raise Exception
def isDocumentoAnuncio(self):
seccion = self.getElement(self.metadatos, 'seccion')
subseccion = self.getElement(self.metadatos, 'subseccion')
return seccion == '5' and subseccion == 'A'
def isBORME(self):
diario_id, diario = self.getElementCodigoTitulo(self.metadatos, 'diario')
if (diario_id == "BORME"):
return True
return False
def processReferencias(self, doc):
if self.existElement(self.analisis, 'referencias'):
ref = self.analisis.referencias
if self.existElement(ref, 'anteriores'):
if self.existElement(ref.anteriores, 'anterior'):
ref_ant = []
for anterior in ref.anteriores.anterior:
referencia = anterior.get('referencia')
doc_ref = self.get_or_create(Documento, identificador=referencia)
palabra_codigo = anterior.palabra.get('codigo')
palabra_texto = anterior.palabra.text
texto = anterior.texto.text
palabra = self.get_or_create(Palabra, codigo=palabra_codigo, titulo=palabra_texto)
busqueda = dict(referencia=doc_ref, palabra=palabra)
insert = dict(texto=texto)
ref = self.get_or_create(Referencia, busqueda=busqueda, insert=insert)
ref_ant.append(ref)
doc.referencias_anteriores = ref_ant
if self.existElement(ref, 'posteriores'):
if self.existElement(ref.posteriores, 'posterior'):
ref_post = []
for posterior in ref.posteriores.posterior:
referencia = posterior.get('referencia')
doc_ref = self.get_or_create(Documento, identificador=referencia)
palabra_codigo = posterior.palabra.get('codigo')
palabra_texto = posterior.palabra.text
texto = posterior.texto.text
palabra = self.get_or_create(Palabra, codigo=palabra_codigo, titulo=palabra_texto)
busqueda = dict(referencia=doc_ref, palabra=palabra)
insert = dict(texto=texto)
ref = self.get_or_create(Referencia, busqueda=busqueda, insert=insert)
ref_post.append(ref)
doc.referenicas_posteriores = ref_post
def createDocument(self):
identificador = self.getElement(self.metadatos, 'identificador')
if not identificador:
raise Exception
if self.isDocumentoAnuncio():
self.doc = self.get_or_create(DocumentoAnuncio, identificador=identificador)
mod_codigo, mod_titulo = self.getElementCodigoTitulo(self.analisis, 'modalidad')
self.doc.modalidad = self.get_or_create(Modalidad, codigo=mod_codigo, titulo=mod_titulo)
tipo_codigo, tipo_titulo = self.getElementCodigoTitulo(self.analisis, 'tipo')
self.doc.tipo = self.get_or_create(Tipo, codigo=tipo_codigo, titulo=tipo_titulo)
tram_codigo, tram_titulo = self.getElementCodigoTitulo(self.analisis, 'tramitacion')
self.doc.tramitacion = self.get_or_create(Tramitacion, codigo=tram_codigo, titulo=tram_titulo)
proc_codigo, proc_titulo = self.getElementCodigoTitulo(self.analisis, 'procedimiento')
self.doc.procedimiento = self.get_or_create(Procedimiento, codigo=proc_codigo, titulo=proc_titulo)
self.doc.fecha_presentacion_ofertas = self.getElement(self.analisis, 'fecha_presentacion_ofertas')
self.doc.fecha_apertura_ofertas = self.getElement(self.analisis, 'fecha_apertura_ofertas')
precio_codigo, precio_titulo = self.getElementCodigoTitulo(self.analisis, 'precio')
self.doc.precio = self.get_or_create(Precio, codigo=precio_codigo, titulo=precio_titulo)
importe = self.getElement(self.analisis, 'importe')
if isinstance(importe, str):
self.doc.importe = self.stringToFloat(importe)
self.doc.ambito_geografico = self.getElement(self.analisis, 'ambito_geografico')
self.doc.materias_anuncio = self.getElement(self.analisis, 'materias')
self.doc.materias_cpv = self.getElement(self.analisis, 'materias_cpv')
self.doc.observaciones = self.getElement(self.analisis, 'observaciones')
if self.doc.materias_cpv:
materias = []
for materia in self.doc.materias_cpv.split('\n'):
m = re.match('(\d+)\ (.*)', materia)
if m:
code = int(m.group(1))
titulo = m.group(2)
mat = self.get_or_create(Materia, codigo=code, titulo=titulo)
materias.append(mat)
self.doc.materias_licitaciones = materias
elif self.isBORME():
self.doc = self.get_or_create(DocumentoBORME, identificador=identificador)
self.doc.departamento_anuncio = self.getElement(self.metadatos, 'departamento')
self.doc.num_anuncio = self.getElement(self.metadatos, 'numero_anuncio')
self.doc.anuncio_id = self.getElement(self.metadatos, 'id_anuncio')
else:
self.doc = self.get_or_create(Documento, identificador=identificador)
doc = self.doc
doc.seccion = self.getElement(self.metadatos, 'seccion')
doc.subseccion = self.getElement(self.metadatos, 'subseccion')
doc.titulo = self.getElement(self.metadatos, 'titulo')
diario_codigo, diario_titulo = self.getElementCodigoTitulo(self.metadatos, 'diario')
doc.diario = self.get_or_create(Diario, codigo=diario_codigo, titulo=diario_titulo)
doc.diario_numero = self.getElement(self.metadatos, 'diario_numero')
dep_codigo, dep_titulo = self.getElementCodigoTitulo(self.metadatos, 'departamento')
if dep_codigo:
doc.departamento = self.get_or_create(Departamento, codigo=dep_codigo, titulo=dep_titulo)
rango_codigo, rango_titulo = self.getElementCodigoTitulo(self.metadatos, 'rango')
if rango_codigo != '' or rango_codigo is not None:
try:
doc.rango = self.get_or_create(Rango, codigo=int(rango_codigo), titulo=rango_titulo)
except:
pass
doc.numero_oficial = self.getElement(self.metadatos, 'numero_oficial')
doc.fecha_disposicion = self.textToDate(self.getElement(self.metadatos, 'fecha_disposicion'))
if doc.fecha_disposicion:
if self.last_legislatura and doc.fecha_disposicion.date() >= self.last_legislatura.inicio:
doc.legislatura = self.last_legislatura
print doc.legislatura
else:
legislatura = Legislatura.objects.get_or_none(inicio__lte = doc.fecha_disposicion, final__gt = doc.fecha_disposicion)
print legislatura
if legislatura is not None:
print legislatura
doc.legislatura = legislatura
doc.fecha_publicacion = self.textToDate(self.getElement(self.metadatos, 'fecha_publicacion'))
doc.fecha_vigencia = self.textToDate(self.getElement(self.metadatos, 'fecha_vigencia'))
doc.fecha_derogacion = self.textToDate(self.getElement(self.metadatos, 'fecha_derogacion'))
doc.letra_imagen = self.getElement(self.metadatos, 'letra_imagen')
doc.pagina_inicial = int(self.getElement(self.metadatos, 'pagina_inicial'))
doc.pagina_final = int(self.getElement(self.metadatos, 'pagina_final'))
doc.suplemento_pagina_inicial = self.getElement(self.metadatos, 'suplemento_pagina_inicial')
doc.suplemento_pagina_final = self.getElement(self.metadatos, 'suplemento_pagina_final')
doc.estatus_legislativo = self.getElement(self.metadatos, 'estatus_legislativo')
origen_leg_cod, origen_leg_titulo = self.getElementCodigoTitulo(self.metadatos, 'origen_legislativo')
doc.origen_legislativo = self.get_or_create(Origen_legislativo, codigo=origen_leg_cod, titulo=origen_leg_titulo)
est_cons_cod, est_cons_titulo = self.getElementCodigoTitulo(self.metadatos, 'estado_consolidacion')
if est_cons_cod != None and est_cons_cod != '':
doc.estado_consolidacion = self.get_or_create(Estado_consolidacion, codigo=int(est_cons_cod), titulo=est_cons_titulo)
doc.judicialmente_anulada = self.SiNoToBool(self.getElement(self.metadatos, 'judicialmente_anulada'))
doc.vigencia_agotada = self.SiNoToBool(self.getElement(self.metadatos, 'vigencia_agotada'))
doc.estatus_derogacion = self.SiNoToBool(self.getElement(self.metadatos, 'estatus_derogacion'))
doc.url_htm = self.url_a_html_pattern.format(doc.identificador)
doc.url_xml = self.url_a_pattern.format(doc.identificador)
doc.url_epub = self.getElement(self.metadatos, 'url_epub')
doc.url_pdf = self.getElement(self.metadatos, 'url_pdf')
doc.url_pdf_catalan = self.getElement(self.metadatos, 'url_pdf_catalan')
doc.url_pdf_euskera = self.getElement(self.metadatos, 'url_pdf_euskera')
doc.url_pdf_gallego = self.getElement(self.metadatos, 'url_pdf_gallego')
doc.url_pdf_valenciano = self.getElement(self.metadatos, 'url_pdf_valenciano')
doc.notas = self.getArrayOfElements(self.analisis, 'notas', 'nota', Nota)
doc.materias = self.getArrayOfElements(self.analisis, 'materias', 'materia', Materia)
doc.alertas = self.getArrayOfElements(self.analisis, 'alertas', 'alerta', Alerta)
self.processReferencias(doc)
doc.texto = etree.tostring(self.rootXML.texto, pretty_print=True)
doc.save()
def getArrayOfElements(self, origin, element, subelement, model):
if self.existElement(origin, element):
subel = getattr(origin, element)
if self.existElement(subel, subelement):
elements = []
for el in getattr(subel, subelement):
codigo = el.get('codigo')
titulo = el.text
if codigo:
ob = self.get_or_create(model, codigo=codigo, titulo=titulo)
elements.append(ob)
return elements
return []
# codigo, titulo = self.getElementCodigoTitulo()
def getElementCodigoTitulo(self, origin, element):
codigo = self.getAttribute(origin, element, 'codigo')
titulo = self.getElement(origin, element)
return codigo, titulo
def getAttribute(self, origin, element, attribute):
if self.existElement(origin, element):
return getattr(origin,element).get(attribute)
return None
def downloadXML(self):
url_xml = URL(self.url)
self.xmlDoc = url_xml.download()
def xmlToObject(self):
self.rootXML = objectify.fromstring(self.xmlDoc)
def getMetadatos(self):
self.metadatos = self.rootXML.metadatos
def getAnalisis(self):
self.analisis = self.rootXML.analisis
def existElement(self, origin, element):
return hasattr(origin, element)
def getElement(self,origin, element):
if hasattr(origin, element):
return getattr(getattr(origin,element), 'text')
@staticmethod
def get_or_create(model, **kwargs):
len_items = len(kwargs)
count_items = 0
for k, v in kwargs.items():
if v is None or v is '':
count_items += 1
if len_items == count_items:
return None
objeto = None
try:
if kwargs.has_key('busqueda'):
objeto = model.objects.get(**kwargs['busqueda'])
else:
objeto = model.objects.get(**kwargs)
except:
# print kwargs
if kwargs.has_key('busqueda') and kwargs.has_key('insert'):
insert = dict(kwargs['busqueda'].items() + kwargs['insert'].items())
objeto = model(**insert)
# print objeto
else:
objeto = model(**kwargs)
objeto.save()
return objeto
@staticmethod
def stringToFloat(value):
# Remove anything not a digit, comma or period
no_cruft = re.sub(r'[^\d,.-]', '', value)
# Split the result into parts consisting purely of digits
parts = re.split(r'[,.]', no_cruft)
# ...and sew them back together
try:
if len(parts) == 1:
# No delimeters found
float_str = parts[0]
elif len(parts[-1]) != 2:
# >= 1 delimeters found. If the length of last part is not equal to 2, assume it is not a decimal part
float_str = ''.join(parts)
else:
float_str = '%s%s%s' % (''.join(parts[0:-1]),
locale.localeconv()['decimal_point'],
parts[-1])
# Convert to float
return float(float_str)
except:
return None
@staticmethod
def textToDate(texto):
regex = re.compile("(\d{4})(\d{2})(\d{2})")
if texto is not None:
match = re.match(regex, texto)
if match != None:
year = int(match.group(1))
month = int(match.group(2))
day = int(match.group(3))
d = datetime(year,month, day)
return d
return None
@staticmethod
def SiNoToBool(character):
return character == 'S' | {
"repo_name": "BOE-API/new_boe_api",
"path": "boe_api/state_documents/processDocument.py",
"copies": "1",
"size": "15113",
"license": "mit",
"hash": 3265050563054032400,
"line_mean": 46.6782334385,
"line_max": 133,
"alpha_frac": 0.6115265004,
"autogenerated": false,
"ratio": 3.325924295774648,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4437450796174648,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Carlos'
from django.shortcuts import render_to_response
from django.contrib.auth.decorators import login_required
from principal.forms import NewForm
from django.template import RequestContext
from django.contrib.auth.decorators import permission_required
from django.contrib import messages
from django.utils.translation import ugettext as _
from django.http import HttpResponseRedirect
from principal.services import SubjectService, NewService
from principal.models import Noticia, Asignatura
from django.shortcuts import get_object_or_404
@login_required()
def news(request, new_id=None, method=None):
if request.method == 'POST':
assert request.user.has_perm('principal.profesor')
form = NewForm(request.POST, lecturer_id=request.user.id)
if form.is_valid():
NewService.save(form, request.user.id)
messages.add_message(request, messages.SUCCESS, _('Action completed successfully.'))
return HttpResponseRedirect('/news')
if request.user.has_perm('principal.alumno'):
subjects = SubjectService.get_student_subjects(request.user.id)
form = None
elif request.user.has_perm('principal.profesor'):
subjects = SubjectService.get_lecturer_subjects(request.user.id)
if method == 'rm':
new = get_object_or_404(Noticia, id=new_id)
if new.profesor.id == request.user.id:
NewService.delete(new_id)
messages.success(request, _('New was deleted successfully.'))
else:
messages.error(request, _('You cannot delete this new.'))
return HttpResponseRedirect('/news')
elif method == 'ed':
new = get_object_or_404(Noticia, id=new_id)
if new.profesor.id == request.user.id:
initial_data = NewService.build_initial(new_id)
form = NewForm(lecturer_id=request.user.id, initial=initial_data)
else:
messages.error(request, _('You cannot edit this new.'))
return HttpResponseRedirect('/news')
elif not method:
form = NewForm(lecturer_id=request.user.id)
else:
subjects = Asignatura.objects.all()
form = None
return render_to_response('new/list.html', {'subjects': subjects, 'form': form},
context_instance=RequestContext(request))
@permission_required('principal.profesor')
def edit_new(request, new_id):
form = None
if request.method == 'POST':
form = NewForm(request.POST)
if form.is_valid():
pass
# save
else:
initial_data = NewService.build_initial(new_id)
form = NewForm(initial=initial_data)
return render_to_response('new/edit.html', {'form', ls}, context_instance=RequestContext(request)) | {
"repo_name": "carborgar/gestionalumnostfg",
"path": "principal/views/NewViews.py",
"copies": "1",
"size": "2853",
"license": "mit",
"hash": -595759007780091900,
"line_mean": 37.5675675676,
"line_max": 102,
"alpha_frac": 0.6498422713,
"autogenerated": false,
"ratio": 4.046808510638298,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000728066882720849,
"num_lines": 74
} |
__author__ = 'Carlos'
from django.shortcuts import render_to_response
from django.contrib.auth.decorators import login_required
from principal.forms import StudentProfileForm, AddressForm
from principal.models import Alumno, Profesor
from django.template import RequestContext
from django.contrib.auth.decorators import permission_required
from django.forms.formsets import formset_factory
from principal.services import ProfileService
from django.contrib import messages
from django.utils.translation import ugettext as _
from principal.services import SubjectService
from principal.forms import RemarkForm, LecturerStudentFilterForm
from principal.services import RemarkService
from django.http import HttpResponseRedirect
from principal.forms import LecturerProfileForm
@login_required()
def view_profile(request, student_id):
template_vars = {}
form = None
if request.method == 'POST':
student = Alumno.objects.get(id=student_id)
lecturer_subjects = SubjectService.get_lecturer_subjects(request.user.id)
template_vars['lecturer_subjects'] = lecturer_subjects
# Save remark from form -> logged user is a lecturer
form = RemarkForm(request.POST, lecturer_id=request.user.id)
if form.is_valid():
RemarkService.save(form)
messages.success(request, _('Remark was saved successfully.'))
return HttpResponseRedirect('/profile/view/%s' % form.cleaned_data['student_id'])
else:
if request.user.has_perm('principal.alumno'):
student = Alumno.objects.get(id=request.user.id)
else:
student = Alumno.objects.get(id=student_id)
if request.user.has_perm('principal.profesor'):
lecturer_subjects = SubjectService.get_lecturer_subjects(request.user.id)
form = RemarkForm(lecturer_id=request.user.id, initial={'student_id': student.id})
template_vars['lecturer_subjects'] = lecturer_subjects
template_vars['student'] = student
template_vars['form'] = form
return render_to_response('profile/details.html', template_vars, context_instance=RequestContext(request))
@permission_required('principal.alumno')
def edit_profile(request):
address_formset = formset_factory(AddressForm, max_num=2, extra=2)
student = Alumno.objects.get(id=request.user.id)
if request.method == 'POST':
form = StudentProfileForm(request.POST, request.FILES, student_id=student.id)
formset = address_formset(request.POST)
if form.is_valid() and formset.is_valid():
ProfileService.reconstruct_and_save(form, formset, request.user.id)
messages.success(request, _('Information has been saved successfully.'))
elif len(request.FILES) > 0:
# Warn the user to select the image again
messages.warning(request, _('Please, select the profile photo again.'))
else:
form = StudentProfileForm(initial=ProfileService.get_form_data(student), student_id=student.id)
formset = address_formset(initial=ProfileService.get_formset_data(student))
return render_to_response('profile/edit.html', {'form': form, 'formset': formset, 'student': student},
context_instance=RequestContext(request))
@permission_required('principal.profesor')
def lecturer_students(request):
if request.method == 'POST':
form = LecturerStudentFilterForm(request.POST, lecturer_id=request.user.id)
if form.is_valid():
subject = form.cleaned_data['subject']
students = SubjectService.subject_students(subject, request.user.id)
else:
form = LecturerStudentFilterForm(lecturer_id=request.user.id)
students = SubjectService.lecturer_students(request.user.id)
return render_to_response('student/list.html', {'students': students, 'form': form},
context_instance=RequestContext(request))
@permission_required('principal.profesor')
def edit_lecturer(request):
lecturer = Profesor.objects.get(id=request.user.id)
if request.method == 'POST':
form = LecturerProfileForm(request.POST)
if form.is_valid():
ProfileService.update_profile(lecturer, form)
messages.success(request, _('Information was updated successfully'))
else:
initial_data = ProfileService.build_initial_profile_data(lecturer)
form = LecturerProfileForm(initial=initial_data)
return render_to_response('lecturer/edit.html', {'form': form}, context_instance=RequestContext(request))
| {
"repo_name": "carborgar/gestionalumnostfg",
"path": "principal/views/ProfileViews.py",
"copies": "1",
"size": "4601",
"license": "mit",
"hash": -420105140865261440,
"line_mean": 42,
"line_max": 110,
"alpha_frac": 0.7007172354,
"autogenerated": false,
"ratio": 3.9324786324786323,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5133195867878632,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Carlos'
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.http import HttpResponseRedirect
from django.contrib.auth.decorators import permission_required
from django.utils.translation import ugettext as _
from principal.services import PeticionCitaService
from principal.models import Profesor
from principal.forms import PeticionCitaForm
from principal.models import Peticioncita
from django.shortcuts import get_object_or_404
from principal.models import Tutoria
from django.contrib import messages
from principal.forms import TutorialForm
from principal.services import TutorialService
from django.contrib.auth.decorators import login_required
from django.http.response import JsonResponse
import datetime
import time
from calendar import timegm
@login_required
def view_tutorials(request, lecturer_id=None, tutorial_id=None):
if request.method == 'POST':
lecturer = Profesor.objects.get(id=request.user.id)
form = TutorialForm(request.POST, lecturer_id=lecturer.id)
if form.is_valid():
# Lecturer as parameter (lecturer_id is in the form, but this way we save one DB query)
TutorialService.update(form, lecturer)
messages.success(request, _('Tutorial was updated successfully') if form.cleaned_data['tutorial_id'] else _(
'Tutorial was added successfully.'))
return HttpResponseRedirect('/lecturer/details/')
else:
lecturer = get_object_or_404(Profesor, id=lecturer_id if lecturer_id else request.user.id)
initial_data = {}
if request.user.has_perm('principal.profesor'):
# Edit tutorial
if tutorial_id:
tutorial_to_edit = get_object_or_404(Tutoria, id=tutorial_id)
if not tutorial_to_edit.profesor == lecturer:
messages.warning(request, _('You cannot edit this tutorial.'))
else:
initial_data['tutorial_id'] = tutorial_to_edit.id
initial_data['start_hour'] = tutorial_to_edit.horainicio.strftime('%H:%M')
initial_data['finish_hour'] = tutorial_to_edit.horafin.strftime('%H:%M')
initial_data['day'] = tutorial_to_edit.dia
form = TutorialForm(lecturer_id=request.user.id, initial=initial_data)
return render_to_response('lecturer/details.html', {'lecturer': lecturer, 'form': form},
context_instance=RequestContext(request))
@permission_required('principal.alumno')
def create_tutorial(request):
if request.method == 'POST':
student_id = request.user.alumno.id
form = PeticionCitaForm(request.POST)
if form.is_valid():
tutorial_request = PeticionCitaService.create(form, student_id)
# Business rule: a student cannot request more than two appointments for the same lecturer and day.
appointment_date = tutorial_request.fechacita.date()
appointment_day = appointment_date.day
appointment_month = appointment_date.month
appointment_year = appointment_date.year
parameters = [appointment_year, appointment_month, appointment_day, tutorial_request.profesor.id,
tutorial_request.alumno.id]
query = '''SELECT idcita FROM peticioncita
WHERE YEAR(fechaCita)=%s AND MONTH(fechaCita)=%s
AND DAY(fechaCita)=%s AND profesor=%s AND alumno=%s'''
same_day_same_lecturer = len(list(Peticioncita.objects.raw(query, parameters)))
if same_day_same_lecturer < 2:
tutorial_request.save()
messages.add_message(request, messages.SUCCESS, _('The request has been sent successfully.'))
return HttpResponseRedirect('/student/tutorial/create/')
else:
messages.add_message(request, messages.ERROR, _(
'Sorry, but you cannot request more than two appointments for the same teacher in the same day.'))
else:
form = PeticionCitaForm()
return render_to_response('tutorial/edit.html', {'form': form}, context_instance=RequestContext(request))
# def edit_tutorial(request, tutorial_id):
# if request.method == 'POST':
# pass
# else:
# form = TutorialForm
@permission_required('principal.profesor')
def delete_tutorial(request, tutorial_id):
tutorial = Tutoria.objects.get(id=tutorial_id, profesor__id=request.user.id)
tutorial.delete()
messages.success(request, _('The tutorial has been removed.'))
return HttpResponseRedirect('/lecturer/details/')
@permission_required('principal.profesor')
def enable_tutorials(request):
if request.method == 'POST':
lecturer = Profesor.objects.get(id=request.user.id)
TutorialService.enable_tutorials(lecturer)
messages.add_message(request, messages.SUCCESS, _('Tutorials have been enabled.'))
return HttpResponseRedirect(request.POST['return_url'])
@permission_required('principal.profesor')
def disable_tutorials(request):
if request.method == 'POST':
lecturer = Profesor.objects.get(id=request.user.id)
motivation = request.POST['motivation']
add_error = False
if motivation:
if not motivation.isspace() and not '' == motivation:
TutorialService.disable_tutorials(lecturer, motivation)
messages.success(request, _('Tutorials have been disabled.'))
else:
add_error = True
else:
add_error = True
if add_error:
messages.error(request, _('You must enter a reason to disable tutorials.'))
return HttpResponseRedirect(request.POST['return_url'])
@permission_required('principal.profesor')
def tutorials_json(request, timestamp_from, timestamp_to, utc_offset):
lecturer = Profesor.objects.get(id=request.user.id)
start_date = datetime.datetime.fromtimestamp(int(timestamp_from) / 1000)
end_date = datetime.datetime.fromtimestamp(int(timestamp_to) / 1000)
tutorial_requests = lecturer.peticioncita_set.filter(estado='AC', fechacita__gte=start_date,
fechacita__lt=end_date)
tutorials = []
for tutorial_request in tutorial_requests:
title = str(tutorial_request)
iso_string = tutorial_request.fechacita.strftime('%Y-%m-%dT%H:%M:%S')
iso_string_end = tutorial_request.fechacitafin.strftime('%Y-%m-%dT%H:%M:%S')
timestamp = timegm(
time.strptime(
iso_string,
'%Y-%m-%dT%H:%M:%S'
)
)* 1000
end_timestamp = timegm(
time.strptime(
iso_string_end,
'%Y-%m-%dT%H:%M:%S'
)
) *1000
completename = tutorial_request.alumno.first_name + ' ' + tutorial_request.alumno.last_name
tutorials.append({'title': title, "start": timestamp, 'end':end_timestamp, 'short_title': completename, 'motivation':tutorial_request.motivo})
to_dump = {'success': 1, 'result': tutorials}
return JsonResponse(to_dump, safe=False)
| {
"repo_name": "carborgar/gestionalumnostfg",
"path": "principal/views/TutorialViews.py",
"copies": "1",
"size": "7256",
"license": "mit",
"hash": 1658312349691194000,
"line_mean": 40.4628571429,
"line_max": 150,
"alpha_frac": 0.6491179713,
"autogenerated": false,
"ratio": 3.7929952953476214,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49421132666476214,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Carlos'
from principal.models import Alumno, Profesor, Peticioncita
from datetime import timedelta
import hashlib
from principal.views import EmailViews
def create(form, student_id):
lecturer = Profesor.objects.get(id=form.cleaned_data['lecturer'])
return Peticioncita(
alumno=Alumno.objects.get(id=student_id),
profesor=lecturer,
fechacita=form.cleaned_data['date'],
fechacitafin=form.cleaned_data['date'] + timedelta(minutes=30),
motivo=form.cleaned_data['motivation'],
estado='EC'
)
# Returns the tutorial request to automatically cancel (Busy schedule)
# That means: 'EN CURSO' requests for the same lecturer and datetime excluding the request to accept
def request_by_hour(lecturer, request_to_compare):
return Peticioncita.objects.exclude(idcita=request_to_compare.idcita).filter(profesor=lecturer,
fechacita=request_to_compare.fechacita,
estado='EC')
# Returns true when there is no accepted request on the same date
def can_accept(request_to_accept):
return Peticioncita.objects.filter(profesor=request_to_accept.profesor, fechacita=request_to_accept.fechacita,
estado='AC').count() == 0
# Accepts the given request and automatically cancels the requests with the same datetime.
def accept(request_to_accept):
cancel_concurrent_requests(request_to_accept)
request_to_accept.estado = 'AC'
request_to_accept.save()
def cancel_concurrent_requests(request):
same_hour_requests = request_by_hour(request.profesor, request)
for tutorial_request in same_hour_requests:
auto_cancel(tutorial_request)
def auto_cancel(request_to_cancel):
request_to_cancel.estado = 'CA'
request_to_cancel.motivocancelacion = 'There is another accepted request for this date.'
request_to_cancel.save()
EmailViews.send_tutorial_rejected_mail(request_to_cancel)
def cancel(deny_form):
tutorial_request = Peticioncita.objects.get(idcita=deny_form.cleaned_data['request_id'])
tutorial_request.motivocancelacion = deny_form.cleaned_data['motivation']
tutorial_request.estado = 'DE'
tutorial_request.save()
EmailViews.send_tutorial_rejected_mail(tutorial_request)
def lecturer_requests(lecturer_id, state):
return Peticioncita.objects.filter(profesor__id=lecturer_id, estado=state).order_by('-fechacita')
def student_requests(student_id, state=None):
if state:
return Peticioncita.objects.filter(alumno__id=student_id, estado=state).order_by('-fechacita')
else:
return Alumno.objects.get(id=student_id).peticioncita_set.all()
def cancel_all(tutorial_request_list, motivation):
for t in tutorial_request_list:
t.motivocancelacion = motivation
t.estado = 'DE'
t.save()
EmailViews.send_tutorial_rejected_mail(t)
def can_request(tutorial_request):
# Business rule: a student cannot request more than two appointments for the same lecturer and day.
appointment_date = tutorial_request.fechacita.date()
appointment_day = appointment_date.day
appointment_month = appointment_date.month
appointment_year = appointment_date.year
parameters = [appointment_year, appointment_month, appointment_day, tutorial_request.profesor.id,
tutorial_request.alumno.id]
query = '''SELECT idCita FROM peticioncita
WHERE YEAR(fechaCita)=%s AND MONTH(fechaCita)=%s
AND DAY(fechaCita)=%s AND profesor=%s AND alumno=%s'''
same_day_same_lecturer = len(list(Peticioncita.objects.raw(query, parameters)))
return same_day_same_lecturer < 2
def auto_assign(auto_assign_tutorial_form, lecturer):
return Peticioncita.objects.create(
alumno=auto_assign_tutorial_form.cleaned_data['student'],
profesor=lecturer,
fechacita=auto_assign_tutorial_form.cleaned_data['date'],
fechacitafin=auto_assign_tutorial_form.cleaned_data['date'] + timedelta(minutes=30),
motivo=auto_assign_tutorial_form.cleaned_data['motivation'],
estado='AC'
)
def is_token_valid(tutorial_request, token):
return token == hashlib.sha512(str(tutorial_request.activation_hash).encode('utf-8')).hexdigest() | {
"repo_name": "carborgar/gestionalumnostfg",
"path": "principal/services/PeticionCitaService.py",
"copies": "1",
"size": "4412",
"license": "mit",
"hash": 1180575532143875600,
"line_mean": 36.7179487179,
"line_max": 120,
"alpha_frac": 0.6883499547,
"autogenerated": false,
"ratio": 3.404320987654321,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45926709423543205,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Carlos'
from principal.models import Ficha, Alumno, Profesor
from principal.services import AddressService
def reconstruct_and_save(form, formset, student_id):
student = Alumno.objects.get(id=student_id)
if student.ficha:
# Use existing data
data = student.ficha
else:
data = Ficha()
data.telefono = form.cleaned_data['phone1']
# data.apellidos = form.cleaned_data['surname']
data.movil = form.cleaned_data['phone2']
data.fecha_nacimiento = form.cleaned_data['birth_date']
form_photo = form.cleaned_data['photo']
data.foto = data.foto if not form_photo else form_photo
# Returns array -> [residence address, address while studying]
addresses = AddressService.reconstruct_and_save(formset, student)
data.direccion_residencia = addresses[0]
data.direccion_estudios = addresses[1]
# Save the student data
data.save()
# Update student data, id number and first_name (auth_user table)
student.ficha = data
student.dni = form.cleaned_data['id_number']
student.first_name = form.cleaned_data['name']
student.last_name = form.cleaned_data['surname']
student.email = form.cleaned_data['email']
student.save()
def get_form_data(student):
data = {'name': student.first_name, 'surname': student.last_name}
student_data = student.ficha
if student_data:
data['photo'] = student_data.foto
data['email'] = student.email
data['id_number'] = student.dni
data['birth_date'] = student_data.fecha_nacimiento
data['phone1'] = student_data.telefono
data['phone2'] = student_data.movil
return data
def get_formset_data(student):
if student.ficha:
residence_address = student.ficha.direccion_estudios
studies_address = student.ficha.direccion_estudios
# The first form is the residence address and the second form is the address while studying
data = [{
'address': residence_address.direccion,
'province': residence_address.provincia,
'location': residence_address.localizacion,
'postal_code': residence_address.codigo_postal,
'country': residence_address.pais
},
{
'address': studies_address.direccion,
'province': studies_address.provincia,
'location': studies_address.localizacion,
'postal_code': studies_address.codigo_postal,
'country': studies_address.pais
}]
return data
else:
return {}
def build_initial_profile_data(lecturer):
return {'name': lecturer.first_name, 'surname': lecturer.last_name, 'phone': lecturer.telefono,
'office': lecturer.despacho, 'web': lecturer.web, 'category': lecturer.categoria, 'email': lecturer.email}
def update_profile(lecturer, form):
lecturer.first_name = form.cleaned_data['name']
lecturer.last_name = form.cleaned_data['surname']
lecturer.telefono = form.cleaned_data['phone']
lecturer.despacho = form.cleaned_data['office']
lecturer.web = form.cleaned_data['web']
lecturer.categoria = form.cleaned_data['category']
lecturer.email = form.cleaned_data['email']
lecturer.save()
| {
"repo_name": "carborgar/gestionalumnostfg",
"path": "principal/services/ProfileService.py",
"copies": "1",
"size": "3280",
"license": "mit",
"hash": 3959899477551156000,
"line_mean": 33.8936170213,
"line_max": 118,
"alpha_frac": 0.6533536585,
"autogenerated": false,
"ratio": 3.4453781512605044,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4598731809760504,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Carlos'
from principal.models import Tutoria
def update(tutorial_form, lecturer):
old_id = tutorial_form.cleaned_data['tutorial_id']
if old_id:
# Edit the current tutorial
old_tutorial = Tutoria.objects.get(id=old_id)
assert old_tutorial.profesor == lecturer
old_tutorial.dia = tutorial_form.cleaned_data['day']
old_tutorial.horainicio = tutorial_form.cleaned_data['start_hour']
old_tutorial.horafin = tutorial_form.cleaned_data['finish_hour']
old_tutorial.save()
else:
Tutoria.objects.create(
horainicio=tutorial_form.cleaned_data['start_hour'],
horafin=tutorial_form.cleaned_data['finish_hour'],
dia=tutorial_form.cleaned_data['day'],
profesor=lecturer
)
def enable_tutorials(lecturer):
lecturer.tutoriaactivada = True
lecturer.save()
def disable_tutorials(lecturer, motivation):
lecturer.tutoriaactivada = False
lecturer.motivotutorias = motivation
lecturer.save()
| {
"repo_name": "carborgar/gestionalumnostfg",
"path": "principal/services/TutorialService.py",
"copies": "1",
"size": "1040",
"license": "mit",
"hash": -7851903776225163000,
"line_mean": 31.5,
"line_max": 74,
"alpha_frac": 0.6653846154,
"autogenerated": false,
"ratio": 3.25,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44153846154,
"avg_score": null,
"num_lines": null
} |
__author__ = 'carlpearson'
import csv
def seeds():
res = []
with open("rdat.csv", newline="") as csvfile:
lines_reader = csv.reader(csvfile)
lines_reader.__next__()
for x in range(0, 5):
dat = lines_reader.__next__()
res.append(int(dat[1].strip()))
return res
def color_list():
res = []
with open("cdat.csv", newline="") as csvfile:
lines_reader = csv.reader(csvfile)
lines_reader.__next__()
for x in range(0, 3):
dat = lines_reader.__next__()
res.append(dat[1].strip())
return res
def shape_list():
res = []
with open("sdat.csv", newline="") as csvfile:
lines_reader = csv.reader(csvfile)
lines_reader.__next__()
for x in range(0, 3):
dat = lines_reader.__next__()
res.append(dat[1].strip())
return res
import random
if __name__ == "__main__":
colors = color_list()
shapes = shape_list()
tot_res = {}
for s in seeds():
random.seed(s)
print("seed", s, sep=" ")
with open("cuts"+str(s)+".out", mode="w") as tarfile:
writer = csv.writer(tarfile)
res = {}
for x in range(0, 40):
c = random.choice(colors)
s = random.choice(shapes)
writer.writerow([c, s])
m = res.get(c, {})
m[s] = m.get(s, 0) + 1
res[c] = m
m = tot_res.get(c, {})
m[s] = m.get(s, 0) + 1
tot_res[c] = m
print(res)
print("totals:")
print(tot_res) | {
"repo_name": "AIMS-Ghana/cams",
"path": "disorganized/dostuff.py",
"copies": "2",
"size": "1634",
"license": "cc0-1.0",
"hash": -3806814497113424000,
"line_mean": 26.7118644068,
"line_max": 61,
"alpha_frac": 0.4663402693,
"autogenerated": false,
"ratio": 3.536796536796537,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5003136806096536,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Caroline Beyne'
from PyQt4 import QtGui, QtCore
import sys
from FrameLayout import FrameLayout
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
win = QtGui.QMainWindow()
w = QtGui.QWidget()
w.setMinimumWidth(350)
win.setCentralWidget(w)
l = QtGui.QVBoxLayout()
l.setSpacing(0)
l.setAlignment(QtCore.Qt.AlignTop)
w.setLayout(l)
t = FrameLayout(title="Buttons")
t.addWidget(QtGui.QPushButton('a'))
t.addWidget(QtGui.QPushButton('b'))
t.addWidget(QtGui.QPushButton('c'))
f = FrameLayout(title="TableWidget")
rows, cols = (6, 3)
data = {'col1': ['1', '2', '3', '4', '5', '6'],
'col2': ['7', '8', '9', '10', '11', '12'],
'col3': ['13', '14', '15', '16', '17', '18']}
table = QtGui.QTableWidget(rows, cols)
headers = []
for n, key in enumerate(sorted(data.keys())):
headers.append(key)
for m, item in enumerate(data[key]):
newitem = QtGui.QTableWidgetItem(item)
table.setItem(m, n, newitem)
table.setHorizontalHeaderLabels(headers)
f.addWidget(table)
l.addWidget(t)
l.addWidget(f)
win.show()
win.raise_()
print "Finish"
sys.exit(app.exec_())
| {
"repo_name": "By0ute/pyqt-collapsable-widget",
"path": "code/main.py",
"copies": "1",
"size": "1244",
"license": "mit",
"hash": -791179006238507400,
"line_mean": 25.4680851064,
"line_max": 57,
"alpha_frac": 0.5844051447,
"autogenerated": false,
"ratio": 3.214470284237726,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4298875428937726,
"avg_score": null,
"num_lines": null
} |
__author__ = 'carolinux'
"""Functionality that requires knowledge of the lastFM api"""
import json
import requests
from datetime import datetime
class LastFmException(Exception):
pass
def create_url(user_name, api_key, page, to_date):
return "http://ws.audioscrobbler.com/2.0/?method=user.getrecenttracks&user={}&api_key={}" \
"&format=json&page={}&to={}".format(user_name, api_key, page, to_date)
def get_tracks(user_name, api_key, page, to_date):
""" Get all tracks from page
to_date must be in epoch timestamp format (integer)
"""
url = create_url(user_name, api_key, page, to_date)
print "Loading stuff from url: {}".format(url)
text = requests.get(url).text
try:
tracks = json.loads(text)["recenttracks"]["track"]
except:
raise LastFmException(text)
return tracks
def clean_track_info(track):
"""Create a more compact json object out of the lastfm track json"""
artist = track["artist"]["#text"]
song = track["name"]
if "@attr" in track.keys() and track["@attr"]["nowplaying"] == "true":
date_listened = datetime.utcnow()
else:
date_str = track["date"]["#text"]
date_listened = datetime.strptime(date_str, "%d %b %Y, %H:%M")
return {"artist": artist, "song": song, "date_listened": date_listened}
| {
"repo_name": "carolinux/lastfm-fetch",
"path": "lastfm.py",
"copies": "1",
"size": "1332",
"license": "mit",
"hash": 8853960768056567000,
"line_mean": 29.976744186,
"line_max": 95,
"alpha_frac": 0.6403903904,
"autogenerated": false,
"ratio": 3.3979591836734695,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9526030373846583,
"avg_score": 0.002463840045377198,
"num_lines": 43
} |
__author__ = 'carolinux'
import abc
import os
import pandas as pd
import numpy as np
from datetime import datetime
"""Classes to help store and load the user song data"""
class DataStore:
"""Abstract base class to define interface for functionality.
Could extend this to a number of concrete implementations
(csv, sqlite, pickles -if we must- etc)"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def user_exists(self, username):
pass
@abc.abstractmethod
def get_date_range(self, username):
pass
@abc.abstractmethod
def get_songs_as_df(self, username):
pass
@abc.abstractmethod
def add_songs_df(self, username, df, mode):
"""add a songdata dataframe to the store"""
pass
class CSVDataStore(DataStore):
SEP = "|" # song names unlikely to have pipes in the name
def __init__(self, folder):
if not os.path.exists(folder):
os.mkdir(folder)
self.folder = folder
self.header = ["artist", "date_listened", "song"]
def _user_file(self, username):
return os.path.join(self.folder, username + ".csv")
def user_exists(self, user):
return os.path.exists(self._user_file(user))
def get_songs_as_df(self, user):
if self.user_exists(user):
songdf_archive = pd.read_csv(self._user_file(user), encoding='utf-8', sep=self.SEP)
songdf_archive.date_listened = songdf_archive.date_listened.apply(
lambda x: datetime.strptime(x, "%Y-%m-%d %H:%M:%S"))
else:
songdf_archive = self._get_empty_df()
return songdf_archive
def get_date_range(self, user):
songdf_archive = self.get_songs_as_df(user)
max_date = songdf_archive.date_listened.max()
min_date = songdf_archive.date_listened.min()
return min_date, max_date
def _get_empty_df(self):
df = pd.DataFrame(columns=["artist", "date_listened", "song"])
df.date_listened = df.date_listened.astype(np.datetime64)
return df
def add_songs_df(self, user, df, mode):
if mode == "append":
existing_songs = self.get_songs_as_df(user)
all_songs = existing_songs.append(df)
elif mode == "overwrite":
all_songs = df
else:
raise Exception("Invalid mode")
# check schema
all_songs.to_csv(self._user_file(user), index=False, encoding='utf-8', sep=self.SEP)
| {
"repo_name": "carolinux/lastfm-fetch",
"path": "datastore.py",
"copies": "1",
"size": "2479",
"license": "mit",
"hash": 974635955201682400,
"line_mean": 29.9875,
"line_max": 95,
"alpha_frac": 0.6135538524,
"autogenerated": false,
"ratio": 3.556671449067432,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46702253014674316,
"avg_score": null,
"num_lines": null
} |
__author__ = 'carol'
import os
import sys
import tempfile
import mimetypes
import webbrowser
# Import the email modules we'll need
from email import policy
from email.parser import BytesParser
# An imaginary module that would make this work and be safe.
from imaginary import magic_html_parser
# In a real program you'd get the filename from the arguments.
with open('outgoing.msg', 'rb') as fp:
msg = BytesParser(policy=policy.default).parse(fp)
# Now the header items can be accessed as a dictionary, and any non-ASCII will
# be converted to unicode:
print('To:', msg['to'])
print('From:', msg['from'])
print('Subject:', msg['subject'])
# If we want to print a priview of the message content, we can extract whatever
# the least formatted payload is and print the first three lines. Of course,
# if the message has no plain text part printing the first three lines of html
# is probably useless, but this is just a conceptual example.
simplest = msg.get_body(preferencelist=('plain', 'html'))
print()
print(''.join(simplest.get_content().splitlines(keepends=True)[:3]))
ans = input("View full message?")
if ans.lower()[0] == 'n':
sys.exit()
# We can extract the richest alternative in order to display it:
richest = msg.get_body()
partfiles = {}
if richest['content-type'].maintype == 'text':
if richest['content-type'].subtype == 'plain':
for line in richest.get_content().splitlines():
print(line)
sys.exit()
elif richest['content-type'].subtype == 'html':
body = richest
else:
print("Don't know how to display {}".format(richest.get_content_type()))
sys.exit()
elif richest['content-type'].content_type == 'multipart/related':
body = richest.get_body(preferencelist=('html'))
for part in richest.iter_attachments():
fn = part.get_filename()
if fn:
extension = os.path.splitext(part.get_filename())[1]
else:
extension = mimetypes.guess_extension(part.get_content_type())
with tempfile.NamedTemporaryFile(suffix=extension, delete=False) as f:
f.write(part.get_content())
# again strip the <> to go from email form of cid to html form.
partfiles[part['content-id'][1:-1]] = f.name
else:
print("Don't know how to display {}".format(richest.get_content_type()))
sys.exit()
with tempfile.NamedTemporaryFile(mode='w', delete=False) as f:
# The magic_html_parser has to rewrite the href="cid:...." attributes to
# point to the filenames in partfiles. It also has to do a safety-sanitize
# of the html. It could be written using html.parser.
f.write(magic_html_parser(body.get_content(), partfiles))
webbrowser.open(f.name)
os.remove(f.name)
for fn in partfiles.values():
os.remove(fn)
# Of course, there are lots of email messages that could break this simple
# minded program, but it will handle the most common ones.
| {
"repo_name": "willingc/tone-tuner",
"path": "emailprocessor.py",
"copies": "1",
"size": "2928",
"license": "mit",
"hash": 6593710908728064000,
"line_mean": 37.025974026,
"line_max": 80,
"alpha_frac": 0.6854508197,
"autogenerated": false,
"ratio": 3.655430711610487,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9840079863842153,
"avg_score": 0.00016033349366682698,
"num_lines": 77
} |
__author__ = 'carol'
#!/usr/bin/env python3
import smtplib
from email.message import EmailMessage
from email.headerregistry import Address
from email.utils import make_msgid
# Create the base text message.
msg = EmailMessage()
msg['Subject'] = "Ayons asperges pour le déjeuner"
msg['From'] = Address("Pepé Le Pew", "pepe@example.com")
msg['To'] = (Address("Penelope Pussycat", "penelope@example.com"),
Address("Fabrette Pussycat", "fabrette@example.com"))
msg.set_content("""\
Salut!
Cela ressemble à un excellent recipie[1] déjeuner.
[1] http://www.yummly.com/recipe/Roasted-Asparagus-Epicurious-203718
--Pepé
""")
# Add the html version. This converts the message into a multipart/alternative
# container, with the original text message as the first part and the new html
# message as the second part.
asparagus_cid = make_msgid()
msg.add_alternative("""\
<html>
<head></head>
<body>
<p>Salut!<\p>
<p>Cela ressemble à un excellent
<a href="http://www.yummly.com/recipe/Roasted-Asparagus-Epicurious-203718>
recipie
</a> déjeuner.
</p>
<img src="cid:{asparagus_cid}" \>
</body>
</html>
""".format(asparagus_cid=asparagus_cid[1:-1]), subtype='html')
# note that we needed to peel the <> off the msgid for use in the html.
# Now add the related image to the html part.
with open("roasted-asparagus.jpg", 'rb') as img:
msg.get_payload()[1].add_related(img.read(), 'image', 'jpeg',
cid=asparagus_cid)
# Make a local copy of what we are going to send.
with open('outgoing.msg', 'wb') as f:
f.write(bytes(msg))
# Send the message via local SMTP server.
with smtplib.SMTP('localhost') as s:
s.send_message(msg)
| {
"repo_name": "willingc/tone-tuner",
"path": "emailcreator.py",
"copies": "1",
"size": "1735",
"license": "mit",
"hash": -6102754864440518000,
"line_mean": 28.7931034483,
"line_max": 82,
"alpha_frac": 0.6724537037,
"autogenerated": false,
"ratio": 2.918918918918919,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9079527984802549,
"avg_score": 0.002368927563273954,
"num_lines": 58
} |
__author__ = 'carpedm20'
__date__ = '2014.07.25'
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
# http://movie.naver.com/movie/sdb/rank/rmovie.nhn?sel=cnt&date=20050207&tg=0
from scrapy.item import Item, Field
class Movie(Item):
name = Field()
url = Field()
rank = Field()
date = Field()
#tgs = range(20)
#tgs.remove(9)
def make_urls(tg):
url = "http://movie.naver.com/movie/sdb/rank/rmovie.nhn?sel=cnt&date=%s&tg=%s"
urls = []
from datetime import date, timedelta
current_date = date(2005, 2, 7)
end_date = date.today()
delta = timedelta(days=1)
while current_date <= end_date:
urls.append(url % (current_date.strftime("%Y%m%d"), tg))
current_date += delta
print "[*] length of urls : %s" % len(urls)
return urls
import urlparse
class RankSpider(BaseSpider):
name = "rank"
allowed_domains = ["movie.naver.com"]
start_urls = None
def __init__(self, tg='0'):
self.tg = tg
self.start_urls = make_urls(self.tg)
def parse(self, response):
parsed = urlparse.urlparse(response.url)
date = urlparse.parse_qs(parsed.query)['date']
hxs = HtmlXPathSelector(response)
items = []
hrefs = hxs.xpath("//tbody/tr/td/div/a/@href").extract()
titles = hxs.xpath("//tbody/tr/td/div/a/text()").extract()
for index, href in enumerate(hrefs):
movie = Movie()
movie['url'] = href
movie['name'] = titles[index]
movie['rank'] = index + 1
movie['date'] = date[0]
items.append(movie)
return items
| {
"repo_name": "carpedm20/voxoffice",
"path": "scrapy/tutorial/spiders/spider.py",
"copies": "1",
"size": "1665",
"license": "bsd-3-clause",
"hash": 7751475743897971000,
"line_mean": 23.8507462687,
"line_max": 82,
"alpha_frac": 0.590990991,
"autogenerated": false,
"ratio": 3.2905138339920947,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43815048249920946,
"avg_score": null,
"num_lines": null
} |
__author__ = 'carpedm20'
__date__ = '2014.07.25'
from scrapy.spider import BaseSpider
# http://movie.naver.com/movie/sdb/rank/rmovie.nhn?sel=cnt&date=20050207&tg=0
from scrapy.item import Item, Field
class Movie(Item):
name = Field()
url = Field()
rank = Field()
date = Field()
#tgs = range(20)
#tgs.remove(9)
def make_urls():
#url = "http://movie.naver.com/movie/sdb/rank/rmovie.nhn?sel=cnt&date=%s&tg=%s"
url = "http://movie.naver.com/movie/sdb/rank/rpeople.nhn?date=%s"
urls = []
from datetime import date, timedelta
current_date = date(2005, 2, 7)
end_date = date.today()
delta = timedelta(days=1)
while current_date <= end_date:
urls.append(url % (current_date.strftime("%Y%m%d")))
current_date += delta
print "[*] length of urls : %s" % len(urls)
return urls
import urlparse
class RankSpider(BaseSpider):
name = "people"
allowed_domains = ["movie.naver.com"]
start_urls = None
def __init__(self):
self.start_urls = make_urls()
def parse(self, response):
parsed = urlparse.urlparse(response.url)
date = urlparse.parse_qs(parsed.query)['date']
items = []
hrefs = response.xpath("//tbody/tr/td[@class='title']/a/@href").extract()
titles = response.xpath("//tbody/tr/td[@class='title']/a/text()").extract()
for index, href in enumerate(hrefs):
movie = Movie()
movie['url'] = href
movie['name'] = titles[index]
movie['rank'] = index + 1
movie['date'] = date[0]
items.append(movie)
return items
| {
"repo_name": "carpedm20/voxoffice",
"path": "scrapy/tutorial/spiders/people.py",
"copies": "1",
"size": "1642",
"license": "bsd-3-clause",
"hash": -559398472010503700,
"line_mean": 24.2615384615,
"line_max": 83,
"alpha_frac": 0.5889159562,
"autogenerated": false,
"ratio": 3.277445109780439,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4366361065980439,
"avg_score": null,
"num_lines": null
} |
__author__ = 'carpedm20'
__date__ = '2014.07.25'
from scrapy.spider import BaseSpider
# http://music.naver.com/listen/history/index.nhn?type=TOTAL&year=2008&month=01&week=3
from scrapy.item import Item, Field
class Music(Item):
name = Field()
artist = Field()
artist_id = Field()
track_id = Field()
album_id = Field()
rank = Field()
date = Field()
#tgs = range(20)
#tgs.remove(9)
def make_urls(mode = "TOTAL"):
# mode = TOTAL, DOMESTIC, OVERSEA
base = "http://music.naver.com/listen/history/index.nhn?type=%s&year=%s&month=%s&week=%s"
urls = []
for year in xrange(2008, 2015):
for month in xrange(1, 13):
for week in xrange(1,5):
url = base % (mode, year, month, week)
urls.append(url)
print "[*] length of urls : %s" % len(urls)
return urls
import re
import urlparse
class RankSpider(BaseSpider):
name = "music"
allowed_domains = ["movie.naver.com"]
start_urls = None
def __init__(self, mode="TOTAL"):
self.start_urls = make_urls(mode)
def parse(self, response):
parsed = urlparse.urlparse(response.url)
dic = urlparse.parse_qs(parsed.query)
date = dic['year'][0] + dic['month'][0] + dic['week'][0]
items = []
hrefs = response.xpath("//tbody/tr/td[@class='title']/a/@href").extract()
titles = response.xpath("//tbody/tr/td[@class='name']//span[@class='ellipsis']/text()").extract()
for idx, elem in enumerate(response.xpath("//tbody/tr")[1:]):
music = elem.xpath("./td[@class='name']")
href = music.xpath("./a/@href")[0].extract()
album_id = int(re.findall(r'\d+',href)[0])
href = music.xpath("./a/@href")[-1].extract()
track_id = int(re.findall(r'\d+',href)[0])
artist = elem.xpath("./td[@class='_artist artist']")
if len(artist) == 0:
artist = elem.xpath("./td[@class='_artist artist no_ell2']")
artist_id = -1
artist_name = artist.xpath("./a/text()")[0].extract()
else:
try:
href = artist.xpath("./a/@href")[0].extract()
artist_id = int(href[href.find('artistId=')+9:])
artist_name = artist.xpath("./a/span/text()")[0].extract().strip()
except:
artist_name = artist.xpath("./span/span/text()")[0].extract().strip()
artist_id = -1
try:
music_name = music.xpath("./a/span/text()")[0].extract()
except:
music_name = music.xpath("./span/span/text()")[0].extract()
#print idx
music= Music()
music['name'] = music_name
music['artist'] = artist_name
music['artist_id'] = artist_id
music['track_id'] = track_id
music['album_id'] = album_id
music['rank'] = idx + 1
music['date'] = date
items.append(music)
return items
| {
"repo_name": "carpedm20/voxoffice",
"path": "scrapy/tutorial/spiders/music.py",
"copies": "1",
"size": "3090",
"license": "bsd-3-clause",
"hash": 4313880998598796300,
"line_mean": 29,
"line_max": 105,
"alpha_frac": 0.5187702265,
"autogenerated": false,
"ratio": 3.5930232558139537,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46117934823139534,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Casey Bajema'
from jcudc24ingesterapi import typed, APIDomainObject, ValidationError
from jcudc24ingesterapi.models.data_sources import _DataSource
from jcudc24ingesterapi.models.locations import LocationOffset
class Dataset(APIDomainObject):
"""
Represents a single dataset and contains the information required to ingest the data as well as location
metadata.
"""
__xmlrpc_class__ = "dataset"
id = typed("_id", int)
version = typed("_version", int)
location = typed("_location", int, "ID of location for dataset")
schema = typed("_schema", int, "ID of schema for dataset")
data_source = typed("_data_source", _DataSource, "Data source used for ingesting")
location_offset = typed("_location_offset", LocationOffset, "Offset from the locations frame of reference")
redbox_uri = typed("_redbox_uri", (str,unicode), "Redbox URI")
enabled = typed("_enabled", bool, "Dataset enabled")
running = typed("_running", bool, "Dataset running")
description = typed("_description", (str,unicode), "Description of dataset")
repository_id = typed("_repository_id", (str))
def __init__(self, dataset_id=None, location=None, schema=None, data_source=None, redbox_uri=None, location_offset=None, enabled=False):
self.id = dataset_id
self.location = location
self.schema = schema # subclass of DataType
self.data_source = data_source
self.redbox_uri = redbox_uri # URL to the ReDBox collection.
self.enabled = enabled
self.description = None
self.location_offset = location_offset
def validate(self):
valid = []
if self.location == None:
valid.append(ValidationError("location", "Location must be set"))
if self.schema == None:
valid.append(ValidationError("schema", "Schema must be set"))
return valid
| {
"repo_name": "jcu-eresearch/jcu.dc24.ingesterapi",
"path": "jcudc24ingesterapi/models/dataset.py",
"copies": "1",
"size": "1941",
"license": "bsd-3-clause",
"hash": 8533389889003021000,
"line_mean": 47.525,
"line_max": 140,
"alpha_frac": 0.658423493,
"autogenerated": false,
"ratio": 4.086315789473685,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5244739282473685,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Casey Bajema'
from jcudc24ingesterapi import typed, APIDomainObject, ValidationError
class Region(APIDomainObject):
"""
Represents a 2D area on earth, possible a sub-region of another regions.
An example would be that Queensland is a sub-region of Australia
"""
__xmlrpc_class__ = "region"
id = typed("_id", int)
version = typed("_version", int)
name = typed("_name", (str,unicode) )
region_points = typed("_region_points", (tuple, list))
parent_region = typed("_parent_region", int)
def __init__(self, region_name = None, region_points = None, parent_regions = None, region_id = None):
"""
:param region_name: A human recognisable string naming the region
:param region_points: A 2D array of latitude/longitude points ((lat, long), (lat, long),...), the
last point joins the first point to close the region.
:param parent_regions: A region object containing the parent region.
:return: The initialised region.
"""
self.id = region_id
self.name = region_name
self.region_points = region_points
self.parent_region = parent_regions
class Location(APIDomainObject):
"""
A 3D point on Earth.
"""
__xmlrpc_class__ = "location"
id = typed("_id", int)
version = typed("_version", int)
name = typed("_name", (str,unicode))
latitude = typed("_latitude", float)
longitude = typed("_longitude", float)
elevation = typed("_elevation", (int,float))
region = typed("_region", int, "ID of region")
repository_id = typed("_repository_id", (str))
def __init__(self, latitude=None, longitude=None, location_name = None, elevation = None, region = None):
"""
:param latitude: Double value indicating the latitude (WGS84 assumed, metadata should be attached otherwise)
:param longitude: Double value representing the longitude (WGS84 assumed, metadata should be attached otherwise)
:param location_name: Human identifiable string naming this location
:param elevation: Height of the location (Height above mean sea level assumed, attach metadata otherwise)
:param region: A region object that this location is associated with, the location should be within
the regions area.
:return: Initialised Location object.
"""
self.id = None
self.name = location_name # String
self.latitude = latitude # double
self.longitude = longitude # double
self.elevation = elevation # double
self.region = region
def validate(self):
valid = []
if self.name == None:
valid.append(ValidationError("name", "Name must be set"))
return valid
class LocationOffset(APIDomainObject):
"""An offset from a frame of reference.
"""
__xmlrpc_class__ = "offset"
x = typed("_x", (int, float))
y = typed("_y", (int, float))
z = typed("_z", (int, float))
def __init__(self, x=None, y=None, z=None):
self.x = x
self.y = y
self.z = z
| {
"repo_name": "jcu-eresearch/jcu.dc24.ingesterapi",
"path": "jcudc24ingesterapi/models/locations.py",
"copies": "1",
"size": "3232",
"license": "bsd-3-clause",
"hash": -1037607897714154000,
"line_mean": 40.4358974359,
"line_max": 120,
"alpha_frac": 0.603960396,
"autogenerated": false,
"ratio": 4.1811125485122895,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5285072944512289,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Casey Bajema'
import logging
from jcudc24ingesterapi import typed, APIDomainObject, ValidationError
from jcudc24ingesterapi.schemas.data_types import DataType
logger = logging.getLogger(__name__)
class TypedList(list):
def __init__(self, valid_type):
self.valid_type = valid_type
def append(self, item):
if not isinstance(item, self.valid_type):
raise TypeError, 'item is not of type %s' % self.valid_type
super(TypedList, self).append(item) #append the item to itself (the list)
class SchemaAttrDict(dict):
def __init__(self, *args, **kwargs):
self.update(*args, **kwargs)
def __setitem__(self, key, value):
if key != value.name:
raise ValueError("The provided key and the fields name do not match")
# optional processing here
super(SchemaAttrDict, self).__setitem__(key, value)
def update(self, *args, **kwargs):
if args:
if len(args) > 1:
raise TypeError("update expected at most 1 arguments, got %d" % len(args))
other = dict(args[0])
for key in other:
self[key] = other[key]
for key in kwargs:
self[key] = kwargs[key]
class Schema(APIDomainObject):
"""
Base class for all calibration schemas that provide a known type.
All calibration schemas that will be used need to be setup when creating the dataset.
Each calibration schema has a 1:1 relationship with each data_entry. This means that there can only
be 1 QualitySchema calibration for a specific data_entry but there may be a different calibration
(sub-classed from _CalibrationSchema) added to the same data_entry. Sending a duplicate calibration
will overwrite previous values.
"""
id = typed("_id", int)
version = typed("_version", int)
name = typed("_name", (str, unicode) )
repository_id = typed("_repository_id", (str))
def __init__(self, name=None):
self.name = name
self.__attrs = SchemaAttrDict()
self.__extends = TypedList(int)
def addAttr(self, data_type):
if not isinstance(data_type, DataType):
raise ValueError("Not a subclass of DataType")
self.attrs[data_type.name] = data_type
@property
def attrs(self):
return self.__attrs
@property
def extends(self):
return self.__extends
@extends.setter
def extends(self, values):
"""Check that the list is valid before replacing it"""
tmp = TypedList(int)
for v in values:
tmp.append(v)
self.__extends = tmp
def validate(self):
valid = []
if self.name == None:
valid.append(ValidationError("name", "Name must be set"))
return valid
class ConcreteSchema(object):
"""The concrete schema composites all the individual schemas into
a domain object.
"""
def __init__(self, schemas=None):
if schemas == None: schemas = []
self.__attrs = SchemaAttrDict()
# Add all the passed schemas to the concrete schema
for schema in schemas:
self.add(schema)
def add(self, schema):
"""Add all the attributes to the concrete schema's list"""
for attr in schema.attrs:
if attr in self.__attrs: raise ValueError("Duplicate attributes: " + attr)
self.__attrs[attr] = schema.attrs[attr]
@property
def attrs(self):
return self.__attrs | {
"repo_name": "jcu-eresearch/jcu.dc24.ingesterapi",
"path": "jcudc24ingesterapi/schemas/__init__.py",
"copies": "1",
"size": "3547",
"license": "bsd-3-clause",
"hash": 6785203315810967000,
"line_mean": 32.7904761905,
"line_max": 104,
"alpha_frac": 0.6143219622,
"autogenerated": false,
"ratio": 4.182783018867925,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5297104981067925,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Casey Bajema'
import re
from jcudc24ingesterapi import typed
RE_ATTR_NAME = re.compile("^[A-Za-z][A-Za-z0-9_]*$")
class DataType(object):
"""
Base data type schema defines an empty dictionary that can have fields added to it dynamically,
these fields will then be used by the ingester platform to setup the required table.
Field names map to table column names
Note: ForeignKey or other table links are not supported, only single, flat tables are supported.
"""
description = typed("_description", str, "Description of the field")
name = typed("_name", (str, unicode), "Name of the field")
units = typed("_units", (str, unicode), "Units of the field")
def __init__(self, name, description=None, units=None):
if name is None or RE_ATTR_NAME.match(name) == None:
raise ValueError("Name is not valid")
self.name = name
self.description = description
self.units = units
class FileDataType(DataType):
"""
This schema extends the base _DataType schema and additionally defines that data will be stored as a
flat file and each data_entry will provide the file mime_type and the file handle.
Ingesters that want to index additional data should add fields to this schema and provide
a custom processing script to extract that data.
"""
__xmlrpc_class__ = "file"
class String(DataType):
__xmlrpc_class__ = "string"
class Integer(DataType):
__xmlrpc_class__ = "integer"
class Double(DataType):
__xmlrpc_class__ = "double"
class DateTime(DataType):
__xmlrpc_class__ = "datetime"
class Boolean(DataType):
__xmlrpc_class__ = "boolean"
| {
"repo_name": "jcu-eresearch/jcu.dc24.ingesterapi",
"path": "jcudc24ingesterapi/schemas/data_types.py",
"copies": "1",
"size": "1690",
"license": "bsd-3-clause",
"hash": -2948329365884264400,
"line_mean": 30.8867924528,
"line_max": 104,
"alpha_frac": 0.674556213,
"autogenerated": false,
"ratio": 3.976470588235294,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5151026801235294,
"avg_score": null,
"num_lines": null
} |
__author__ = 'casey'
from collections import Iterable
import numpy as np
from coverage_model.coverage import AbstractCoverage, ComplexCoverageType, SimplexCoverage
from coverage_model.coverages.aggregate_coverage import AggregateCoverage
from coverage_model.coverages.coverage_extents import ReferenceCoverageExtents, ExtentsDict
from coverage_model.parameter_data import NumpyDictParameterData
class ComplexCoverage(AggregateCoverage):
"""
References 1-n coverages
"""
def __init__(self, root_dir, persistence_guid, name=None, reference_coverage_locs=None, reference_coverage_extents=None, parameter_dictionary=None,
mode=None, complex_type=ComplexCoverageType.PARAMETRIC_STRICT, temporal_domain=None, spatial_domain=None):
# Sets h5py file operation mode to 'w' if not specified
# 'w' = Create file, truncate if exists
if mode is None:
mode = 'w'
reference_coverage_extents = ExtentsDict(reference_coverage_extents)
# Initializes base class with proper mode.
super(ComplexCoverage, self).__init__(root_dir, persistence_guid, name, reference_coverage_locs, reference_coverage_extents, parameter_dictionary,
mode, complex_type, temporal_domain, spatial_domain)
def num_timesteps(self):
return self.get_parameter_values('time').get_data()['time'].shape[0]
def get_parameter_values(self, param_names=None, time_segment=None, time=None,
sort_parameter=None, stride_length=None, return_value=None, fill_empty_params=False,
function_params=None, as_record_array=False, remove_duplicate_records=False):
'''
Obtain the value set for a given parameter over a specified domain
'''
if param_names is None:
param_names = self.list_parameters()
if not isinstance(param_names, Iterable) or isinstance(param_names, basestring):
param_names = [param_names]
cov_value_list = []
all_empty = set(param_names)
for coverage in self._reference_covs.values():
fill_params = set()
if isinstance(coverage, SimplexCoverage):
for param_name in param_names:
if param_name not in self.list_parameters():
raise KeyError('No parameter named %s in Complex Coverage' % param_name)
if param_name in coverage.list_parameters():
if coverage.get_parameter_context(param_name).fill_value != self.get_parameter_context(param_name).fill_value:
print 'different fill values - Handle it'
else:
fill_params.add(param_name)
if param_names is not None:
this_param_names = set(param_names)
this_param_names = this_param_names.intersection(set(coverage.list_parameters()))
this_param_names = list(this_param_names)
extent_segments = None
if coverage.persistence_guid in self._persistence_layer.rcov_extents.data:
extent_segments = list(self._persistence_layer.rcov_extents.data[coverage.persistence_guid])
if extent_segments is not None:
for extents in extent_segments:
from coverage_model.util.extent_utils import get_overlap
if isinstance(extents, ReferenceCoverageExtents):
extents = extents.time_extents
current_time_segment = None
try:
current_time_segment = get_overlap(extents, time_segment)
except RuntimeError:
continue
params = coverage.get_parameter_values(this_param_names, current_time_segment, time, sort_parameter,
return_value, fill_empty_params, function_params, as_record_array=False,
remove_duplicate_records=remove_duplicate_records)
# if len(params.get_data()) == 1 and coverage.temporal_parameter_name in params.get_data():
# continue
cov_dict = params.get_data()
for param_name in param_names:
if param_name not in fill_params and param_name not in cov_dict:
fill_params.add(param_name)
elif param_name in cov_dict and param_name in all_empty:
all_empty.remove(param_name)
size = cov_dict[coverage.temporal_parameter_name].size
self._add_filled_arrays(fill_params, cov_dict, size)
self._add_coverage_array(cov_dict, size, coverage.persistence_guid)
if time is not None and time_segment is None and len(cov_value_list) > 0:
new = cov_dict[coverage.temporal_parameter_name][0]
old = cov_value_list[0][0][coverage.temporal_parameter_name][0]
if abs(new-time) < abs(old-time):
cov_value_list = [(cov_dict, coverage)]
else:
cov_value_list.append((cov_dict, coverage))
combined_data = self._merge_value_dicts(cov_value_list, stride_length=stride_length)
if not fill_empty_params:
for param_name in all_empty:
if param_name in combined_data and param_name != self.temporal_parameter_name:
combined_data.pop(param_name)
if sort_parameter is None:
sort_parameter = self.temporal_parameter_name
if sort_parameter not in combined_data:
sort_parameter = None
return NumpyDictParameterData(combined_data, alignment_key=sort_parameter, as_rec_array=as_record_array)
def _add_filled_arrays(self, params, cov_dict, size):
new_arrays = {}
for param in params:
pc = self.get_parameter_context(param)
arr = np.empty(size, dtype=pc.param_type.value_encoding)
arr[:] = pc.fill_value
new_arrays[param] = arr
cov_dict.update(new_arrays)
def append_parameter(self, parameter_context):
# Dad doesn't store it so go to granddad
AbstractCoverage.append_parameter(self, parameter_context)
def append_reference_coverage(self, path, extents=None, **kwargs):
super(ComplexCoverage, self).append_reference_coverage(path, **kwargs)
rcov = AbstractCoverage.load(path)
self.set_reference_coverage_extents(rcov.persistence_guid, extents, append=True)
def set_reference_coverage_extents(self, coverage_id, extents, append=True):
if extents is None:
raise ValueError("Extents must be specified when appending reference coverages")
if not isinstance(extents, (list, tuple)):
extents = [extents]
# Check that the extents are proper Extents and that they reference the associated coverage
for extent in extents:
if not isinstance(extent, ReferenceCoverageExtents):
raise TypeError('Extents must be of type %s' % ReferenceCoverageExtents.__name__)
if extent.cov_id != coverage_id:
raise ValueError('Extent coverage_id, %s, does not match requested coverage id %s' % (extent.cov_id, coverage_id))
# Make a new one
if self._persistence_layer.rcov_extents is None:
self._persistence_layer.rcov_extents = ExtentsDict()
if append:
self._persistence_layer.rcov_extents.add_extents(coverage_id, extents)
else:
self._persistence_layer.rcov_extents.replace_extents(coverage_id, extents)
def get_reference_coverage_extents(self, coverage_id):
return self._persistence_layer.rcov_extents[coverage_id]
| {
"repo_name": "ooici/coverage-model",
"path": "coverage_model/coverages/complex_coverage.py",
"copies": "1",
"size": "8186",
"license": "bsd-2-clause",
"hash": -5764268990815822000,
"line_mean": 52.8552631579,
"line_max": 154,
"alpha_frac": 0.5973613486,
"autogenerated": false,
"ratio": 4.420086393088553,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5517447741688553,
"avg_score": null,
"num_lines": null
} |
__author__ = 'casey'
from coverage_model.storage.span_storage import SpanStorage
class InMemoryStorage(SpanStorage):
def __init__(self):
self.coverage_dict = {}
def write_span(self, span):
if span.coverage_id not in self.coverage_dict:
self.coverage_dict[span.coverage_id] = []
self.coverage_dict[span.coverage_id].append(span)
def get_spans(self, span_ids=None, coverage_ids=None, params=None, start_time=None, stop_time=None, decompressors=None):
if coverage_ids is None:
coverage_ids = self.coverage_dict.keys()
elif isinstance(coverage_ids, basestring):
coverage_ids = [coverage_ids]
return_spans = []
for coverage_id in coverage_ids:
if coverage_id not in self.coverage_dict:
raise KeyError('%s is not a valid coverage' % coverage_id)
for span in self.coverage_dict[coverage_id]:
if span_ids is None or span.id in span_ids:
return_spans.append(span)
return return_spans
def get_span_hash(self, span_id):
raise NotImplementedError("Hash not calculated for incoming stream")
| {
"repo_name": "ooici/coverage-model",
"path": "coverage_model/storage/in_memory_storage.py",
"copies": "1",
"size": "1185",
"license": "bsd-2-clause",
"hash": -8144909106374799000,
"line_mean": 37.2258064516,
"line_max": 124,
"alpha_frac": 0.6312236287,
"autogenerated": false,
"ratio": 3.872549019607843,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5003772648307843,
"avg_score": null,
"num_lines": null
} |
__author__ = 'casey'
from ooi.logging import log
from coverage_model.coverage import *
from coverage_model.parameter import ParameterDictionary
from coverage_model.parameter_data import NumpyDictParameterData
from coverage_model.parameter_values import get_value_class
from coverage_model.persistence import is_persisted
from coverage_model.storage.parameter_persisted_storage import PostgresPersistenceLayer, PostgresPersistedStorage
from coverage_model.util.numpy_utils import NumpyUtils
from coverage_model.utils import Interval
class AggregateCoverage(AbstractCoverage):
"""
References 1-n coverages
"""
def __init__(self, root_dir, persistence_guid, name=None, reference_coverage_locs=None, reference_coverage_extents=None, parameter_dictionary=None,
mode=None, complex_type=ComplexCoverageType.PARAMETRIC_STRICT, temporal_domain=None, spatial_domain=None):
# Initializes base class with proper mode.
super(AggregateCoverage, self).__init__(mode)
if root_dir != None:
log.info("\'root_dir\' specification is OBE. Use coverage configuration file to specify root_dir")
try:
# Make sure persistence_guid is string
if not isinstance(persistence_guid, basestring):
raise TypeError('\'persistence_guid\' must be instance of basestring')
root_dir = CoverageConfig().top_level_storage_location
if is_persisted(root_dir, persistence_guid):
self._existing_coverage(root_dir, persistence_guid)
else:
self._new_coverage(root_dir, persistence_guid, name, reference_coverage_locs, parameter_dictionary, complex_type, reference_coverage_extents)
except:
self._closed = True
raise
self._do_build()
def _existing_coverage(self, root_dir, persistence_guid):
if not is_persisted(root_dir, persistence_guid):
raise SystemError('Cannot find specified coverage: {0}'.format(persistence_guid))
self._persistence_layer = PostgresPersistenceLayer(root_dir, persistence_guid, mode=self.mode)
if self._persistence_layer.version != self.version:
raise IOError('Coverage Model Version Mismatch: %s != %s' %(self.version, self._persistence_layer.version))
self.name = self._persistence_layer.name
self.mode = self.mode
self._reference_covs = collections.OrderedDict()
def _new_coverage(self, root_dir, persistence_guid, name, reference_coverage_locs, parameter_dictionary, complex_type, reference_coverage_extents={}):
reference_coverage_locs = reference_coverage_locs or [] # Can be empty
# Coverage doesn't exist, make a new one
if name is None:
raise SystemError('\'reference_coverages\' and \'name\' cannot be None')
if not isinstance(name, basestring):
raise TypeError('\'name\' must be of type basestring')
self.name = name
if parameter_dictionary is None:
parameter_dictionary = ParameterDictionary()
# Must be in 'a' for a new coverage
self.mode = 'a'
self._reference_covs = collections.OrderedDict()
if not hasattr(reference_coverage_locs, '__iter__'):
reference_coverage_locs = [reference_coverage_locs]
self._persistence_layer = PostgresPersistenceLayer(root_dir,
persistence_guid,
name=self.name,
mode=self.mode,
param_dict=parameter_dictionary,
rcov_locs=reference_coverage_locs,
rcov_extents=reference_coverage_extents,
complex_type=complex_type,
coverage_type='complex',
version=self.version)
for pc in parameter_dictionary.itervalues():
self.append_parameter(pc[1])
def close(self, force=False, timeout=None):
if not hasattr(self, '_closed'):
# _closed is the first attribute added to the coverage object (in AbstractCoverage)
# If it's not there, a TypeError has likely occurred while instantiating the coverage
# nothing else exists and we can just return
return
if not self._closed:
for cov_pth, cov in self._reference_covs.iteritems():
log.info('Closing reference coverage \'%s\'', cov.name if hasattr(cov,'name') else 'unnamed')
cov.close(force, timeout)
AbstractCoverage.close(self, force, timeout)
def append_parameter(self, parameter_context):
raise NotImplementedError('Parameter value retrieval not implemented.')
def append_reference_coverage(self, path, **kwargs):
ncov = AbstractCoverage.load(path)
# Loading the coverage worked - proceed...
# Get the current set of reference coverages
if path in self._persistence_layer.rcov_locs:
# Already there, note it and just return
log.info('Coverage already referenced: \'%s\'', path)
else:
self._persistence_layer.rcov_locs.append(path)
if ncov.persistence_guid not in self._reference_covs:
self._reference_covs[ncov.persistence_guid] = ncov
self._do_build()
def _do_build(self):
# Reset things to ensure we don't munge everything
self._reference_covs = collections.OrderedDict()
self._range_dictionary = ParameterDictionary()
self._range_value = RangeValues()
self._reference_covs = self._build_ordered_coverage_dict()
for parameter_name in self._persistence_layer.parameter_metadata:
md = self._persistence_layer.parameter_metadata[parameter_name]
mm = self._persistence_layer.master_manager
pc = md.parameter_context
# Assign the coverage's domain object(s)
self._assign_domain(pc)
# Get the callbacks for ParameterFunctionType parameters
if hasattr(pc, '_pval_callback'):
pc._pval_callback = self.get_parameter_values
pc._pctxt_callback = self.get_parameter_context
self._range_dictionary.add_context(pc)
s = PostgresPersistedStorage(md, metadata_manager=mm, parameter_context=pc, dtype=pc.param_type.storage_encoding, fill_value=pc.param_type.fill_value, mode=self._persistence_layer.mode)
self._persistence_layer.value_list[parameter_name] = s
self._range_value[parameter_name] = get_value_class(param_type=pc.param_type, domain_set=pc.dom, storage=s)
def _build_ordered_coverage_dict(self):
covs = self._verify_rcovs(self._persistence_layer.rcov_locs)
cov_dict = collections.OrderedDict()
cov_list = []
for i in covs:
cov = i[1]
if isinstance(cov, AbstractCoverage):
try:
temporal_bounds = cov.get_data_bounds(cov.temporal_parameter_name)
cov_list.append((temporal_bounds[0], temporal_bounds[1], cov))
except ValueError:
cov_list.append((None, None, cov))
cov_list.sort(key=lambda tup: tup[0])
for start, end, cov in cov_list:
if isinstance(cov, AbstractCoverage):
cov_dict[cov.persistence_guid] = cov
self._head_coverage_path = cov.head_coverage_path
return cov_dict
def interval_map(self):
'''
Builds a reference structure and returns the bounds and the associated reference coverages
note: only works for 1-d right now
'''
interval_map = []
for scov in self._reference_covs.itervalues():
interval = scov.get_data_bounds(scov.temporal_parameter_name)
interval = Interval(interval[0], interval[1], None, None)
interval_map.append((interval, scov))
self._interval_qsort(interval_map)
return interval_map
@classmethod
def _interval_swap(cls, arr, x0, x1):
if x0 != x1:
t = arr[x0]
arr[x0] = arr[x1]
arr[x1] = t
@classmethod
def _interval_pivot(cls, arr, left, right, pivot):
val = arr[pivot][0]
cls._interval_swap(arr, pivot, right)
store_index = left
for i in xrange(left, right):
if arr[i][0] < val:
cls._interval_swap(arr, i, store_index)
store_index += 1
cls._interval_swap(arr, store_index, right)
return store_index
@classmethod
def _interval_qsort(cls, arr, left=None, right=None):
'''
Quicksort for the interval map
'''
if left is None:
left = 0
if right is None:
right = len(arr) - 1
if left < right:
pivot = (right - left) / 2 + left
pivot = cls._interval_pivot(arr, left, right, pivot)
cls._interval_qsort(arr, left, pivot-1)
cls._interval_qsort(arr, pivot+1, right)
def _merge_value_dicts(self, value_dicts, override_temporal_key=None, stride_length=None):
total_size = 0
dtype_map = {}
dict_by_param_name = {}
for param_dict, coverage in value_dicts:
skip_coverage = False
temporal_key = coverage.temporal_parameter_name
if override_temporal_key is not None:
temporal_key = override_temporal_key
cov_dict_size = param_dict[temporal_key].size
for key, np_arr in param_dict.iteritems():
if np_arr.shape[0] != cov_dict_size:
log.error("Internal coverage parameter dictionaries don't align! Skipping coverage")
skip_coverage = True
break
if key not in dtype_map:
dtype_map[key] = np_arr.dtype
else:
if dtype_map[key] != np_arr.dtype:
dtype_map[key] = np.dtype('object')
if not skip_coverage:
total_size += cov_dict_size
for param_name, np_arr in param_dict.iteritems():
if param_name not in dict_by_param_name:
dict_by_param_name[param_name] = []
dict_by_param_name[param_name].append(np_arr)
return_dict = {}
for param_name, arr_list in dict_by_param_name.iteritems():
if param_name not in self.list_parameters():
arr = np.empty(total_size, dtype=dtype_map[param_name])
current_pos = 0
for a in arr_list:
size = a.size
arr[current_pos:current_pos+size] = a[:]
current_pos += size
return_dict[param_name] = arr
else:
return_dict[param_name] = self.get_parameter_context(param_name).param_type.create_merged_value_array(arr_list)
# for key, dt in dtype_map.iteritems():
# arr = np.empty(total_size, dtype=dt)
# arr[:] = None
# return_dict[key] = arr
#
# current_index = 0
# for param_dict, coverage in value_dicts:
# if isinstance(coverage, SimplexCoverage):
# temporal_key = coverage.temporal_parameter_name
# if override_temporal_key is not None:
# temporal_key = override_temporal_key
# size = param_dict[temporal_key].size
# for key in dtype_map.keys():
# if key in param_dict:
# return_dict[key][current_index:current_index+size] = param_dict[key]
# elif key in coverage.list_parameters():
# return_dict[key][current_index:current_index+size] = coverage.get_parameter_context(key).param_type.fill_value
# current_index += size
if stride_length is not None:
for k,v in return_dict.iteritems():
return_dict[k] = v[::stride_length]
return return_dict
def _add_coverage_array(cls, param_dict, size, cov_id):
arr = np.chararray(size, len(cov_id))
arr[:] = cov_id
tmp_dict = {'coverage_id': arr}
param_dict.update(tmp_dict)
def get_time_values(self, time_segement=None, stride_length=None, return_value=None):
cov_value_list = []
dummy_key = "stripped_later"
for coverage in self._reference_covs.values():
if isinstance(coverage, AbstractCoverage):
params = coverage.get_time_values(time_segement, stride_length, return_value)
cov_dict = {dummy_key: params}
cov_value_list.append((cov_dict, coverage))
combined_data = self._merge_value_dicts(cov_value_list, override_temporal_key=dummy_key, stride_length=stride_length)
if dummy_key in combined_data:
combined_data = NumpyUtils.sort_flat_arrays(combined_data, dummy_key)
return combined_data[dummy_key] #TODO: Handle case where 'time' may not be temporal parameter name of all sub-coverages
else:
return np.array([])
def get_parameter_values(self, param_names=None, time_segment=None, time=None,
sort_parameter=None, stride_length=None, return_value=None, fill_empty_params=False,
function_params=None, as_record_array=False, remove_duplicate_records=False):
'''
Obtain the value set for a given parameter over a specified domain
'''
get_times_too = self.temporal_parameter_name in param_names
cov_value_list = []
for coverage in self._reference_covs.values():
if isinstance(coverage, SimplexCoverage):
if param_names is not None:
this_param_names = set(param_names)
this_param_names = this_param_names.intersection(set(coverage.list_parameters()))
this_param_names = list(this_param_names)
params = coverage.get_parameter_values(this_param_names, time_segment, time, sort_parameter, stride_length,
return_value, fill_empty_params, function_params, as_record_array=False,
remove_duplicate_records=remove_duplicate_records)
if len(params.get_data()) == 1 and coverage.temporal_parameter_name in params.get_data() and not get_times_too:
continue
cov_dict = params.get_data()
size = cov_dict[coverage.temporal_parameter_name].size
self._add_coverage_array(cov_dict, size, coverage.persistence_guid)
if time is not None and time_segment is None:
new = cov_dict[coverage.temporal_parameter_name][0]
old = cov_value_list[0][0][coverage.temporal_parameter_name][0]
if abs(new-time) < abs(old-time):
cov_value_list = [(cov_dict, coverage)]
else:
cov_value_list.append((cov_dict, coverage))
combined_data = self._merge_value_dicts(cov_value_list)
return NumpyDictParameterData(combined_data, alignment_key=sort_parameter, as_rec_array=as_record_array)
@classmethod
def _value_dict_swap(cls, value_dict, x0, x1):
'''
Value dictionary array swap
'''
if x0 != x1:
for name,arr in value_dict.iteritems():
t = arr[x0]
arr[x0] = arr[x1]
arr[x1] = t
@classmethod
def _value_dict_pivot(cls, value_dict, axis, left, right, pivot):
'''
Pivot algorithm, part of quicksort
'''
axis_arr = value_dict[axis]
idx_arr = value_dict['__idx__']
val = axis_arr[pivot]
cls._value_dict_swap(value_dict, pivot, right)
store_index = left
for i in xrange(left, right):
if axis_arr[i] < val:
cls._value_dict_swap(value_dict, i, store_index)
store_index += 1
# This part is critical to maintaining the precedence :)
if axis_arr[i] == val and idx_arr[i] < idx_arr[right]:
cls._value_dict_swap(value_dict, i, store_index)
store_index += 1
cls._value_dict_swap(value_dict, store_index, right)
return store_index
@classmethod
def _value_dict_qsort(cls, value_dict, axis, left=None, right=None):
'''
Quicksort, value dictionary edition
modifications are in-place for a stable search
'''
top_call = left is None and right is None
if top_call:
value_dict['__idx__'] = np.arange(len(value_dict[axis]))
if left is None:
left = 0
if right is None:
right = len(value_dict[axis]) - 1
if left < right:
pivot = (right - left) / 2 + left
pivot = cls._value_dict_pivot(value_dict, axis, left, right, pivot)
cls._value_dict_qsort(value_dict, axis, left, pivot-1)
cls._value_dict_qsort(value_dict, axis, pivot+1, right)
if top_call:
del value_dict['__idx__']
@classmethod
def _value_dict_unique(cls, value_dict, axis):
'''
A naive unique copy algorithm
Notes:
- Last unique axis value has precedence
'''
tarray = value_dict[axis]
truth_array = np.ones(tarray.shape, dtype=np.bool)
for i in xrange(1, len(tarray)):
if tarray[i-1] == tarray[i]:
truth_array[i-1] = False
vd_copy = {}
for k,v in value_dict.iteritems():
vd_copy[k] = v[truth_array]
return vd_copy
def _verify_rcovs(self, rcovs):
for cpth in rcovs:
pth, uuid = get_dir_and_id_from_path(cpth)
if not MetadataManagerFactory.is_persisted(uuid):
log.warn('Cannot find coverage \'%s\'; ignoring', cpth)
continue
if uuid in self._reference_covs:
yield uuid, self._reference_covs[uuid]
continue
try:
cov = AbstractCoverage.load(cpth)
except Exception as ex:
log.warn('Exception loading coverage \'%s\'; ignoring. Exception: %s' % (cpth, ex.message))
continue
if cov.temporal_parameter_name is None:
log.warn('Coverage \'%s\' does not have a temporal_parameter; ignoring' % cpth)
continue
yield cov.persistence_guid, cov
def get_complex_type(self):
return self._persistence_layer.complex_type
def set_parameter_values(self, values):
self._append_to_coverage(values)
def set_time_values(self, values):
self.set_parameter_values({self.temporal_parameter_name: values})
def insert_value_set(self, value_dictionary):
self._append_to_coverage(value_dictionary)
def append_value_set(self, value_dictionary):
self._append_to_coverage(value_dictionary)
def _append_to_coverage(self, values):
raise NotImplementedError('Aggregate coverages are read-only')
def num_timesteps(self):
ts = 0
for coverage in self._reference_covs.values():
ts += coverage.num_timesteps()
return ts
def get_data_bounds(self, parameter_name=None):
if parameter_name is None:
parameter_name = self.list_parameters()
if isinstance(parameter_name, Iterable) and not isinstance(parameter_name, basestring) and len(parameter_name)>1:
rt = {}
for coverage in self._reference_covs.values():
for param in parameter_name:
if param in coverage.list_parameters():
bounds = coverage.get_data_bounds(param)
if param in rt:
if len(bounds) > 0:
rt[param] = (min(bounds[0], rt[param][0]), max(bounds[1], rt[param][1]))
else:
rt[param] = bounds
return rt
else:
rt = None
for coverage in self._reference_covs.values():
bounds = coverage.get_data_bounds(parameter_name)
if rt is None:
rt = bounds
else:
rt = (min(bounds[0], rt[0]), max(bounds[1], rt[1]))
return rt
| {
"repo_name": "ooici/coverage-model",
"path": "coverage_model/coverages/aggregate_coverage.py",
"copies": "1",
"size": "21044",
"license": "bsd-2-clause",
"hash": -3729567070958117400,
"line_mean": 43.7744680851,
"line_max": 197,
"alpha_frac": 0.571231705,
"autogenerated": false,
"ratio": 4.196211365902293,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0018985730639912574,
"num_lines": 470
} |
__author__ = 'casey'
import ast
class AddressFactory(object):
@staticmethod
def from_db_str(st):
try:
if len(st) > 0 and ':::' in st:
s = st.split(":::")
if s[0] == BrickAddress.__name__:
return BrickAddress.from_db_str(st)
elif s[0] == BrickFileAddress.__name__:
return BrickFileAddress.from_db_str(st)
elif s[0] == FileAddress.__name__:
return FileAddress.from_db_str(st)
elif s[0] == Address.__name__:
return Address.from_db_str(st)
elif len(st) > 0:
return Address(st)
except Exception as ex:
pass
raise ValueError("Do not know how to build address from string: %s", st)
@staticmethod
def from_str(st):
try:
d = ast.literal_eval(st)
if isinstance(d, dict):
if 'type' in d:
t = d['type']
if t == BrickAddress.__name__:
return BrickAddress.from_dict(d)
elif t == BrickFileAddress.__name__:
return BrickFileAddress.from_dict(d)
elif t == FileAddress.__name__:
return FileAddress.from_dict(d)
elif t == Address.__name__:
return Address.from_dict(d)
except Exception as ex:
pass
raise ValueError("Do not know how to build address from string: %s", st)
@staticmethod
def from_tuple(tup):
if len(tup) > 1:
address_type = tup[0]
if address_type == Address.__name__:
return Address.from_tuple(tup)
elif address_type == FileAddress.__name__:
return FileAddress.from_tuple(tup)
elif address_type == BrickAddress.__name__:
return BrickAddress.from_tuple(tup)
elif address_type == BrickFileAddress.__name__:
return BrickFileAddress.from_tuple(tup)
else:
raise ValueError("".join(["Do not know how to build address type: ", tup[0]]))
class Address(object):
def __init__(self, coverage_uid):
self.coverage_uid = coverage_uid
pass
def get_top_level_key(self):
raise NotImplementedError('Not implemented by base class')
def as_dict(self):
return {'type': Address.__name__,
'coverage_uid': self.coverage_uid}
@staticmethod
def from_dict(dic):
if 'type' in dic and dic['type'] == Address.__name__:
if 'coverage_uid' in dic:
return Address(dic['coverage_uid'])
raise ValueError("Do not know how to build address from %s ", str(dic))
def as_tuple(self):
tup = "Address", self.coverage_uid
return tup
@staticmethod
def from_tuple(tup):
if len(tup) != 1:
raise ValueError("".join(["Expected tuple of size 1. Found ", str(tup)]))
if tup[0] == "Address":
return Address(tup[0])
else:
raise ValueError("".join(["Do not know how to build address type: ", tup[0]]))
@staticmethod
def from_str(st):
return Address.from_dict(ast.literal_eval(st))
def get_top_level_key(self):
return self.coverage_uid
def __lt__(self, other):
return self.__key__() < other.__key__()
def __eq__(self, other):
return self.as_dict() == other.as_dict()
def __key__(self):
return self.as_dict()
def __hash__(self):
return hash(self.__key__())
def __repr__(self):
return self.__str__()
def __str__(self):
return str(self.__key__())
import os
class IDAddress(Address):
def __init__(self, id):
Address.__init__(self, id)
self.id
def as_dict(self):
return {'type': IDAddress.__name__,
'id': self.id}
@staticmethod
def from_dict(dic):
if 'type' in dic and dic['type'] == IDAddress.__name__:
if 'id' in dic:
return IDAddress(dic['id'])
raise ValueError("Do not now how to build %s from %s" % (IDAddress.__name__, str(dic)))
def as_tuple(self):
tup = IDAddress.__name__, self.id
return tup
@staticmethod
def from_tuple(tup):
if len(tup) != 2:
raise ValueError("".join(["Expected tuple of size 2. Found ", str(tup)]))
if tup[0] == IDAddress.__name__:
return IDAddress(tup[1])
else:
raise ValueError("".join(["Do not know how to build address type: ", tup[0]]))
@staticmethod
def from_str(st):
return IDAddress.from_dict(ast.literal_eval(st))
def get_top_level_key(self):
return self.id
class FileAddress(Address):
def __init__(self, coverage_uid, file_path, begin=0, end=-1, validate=False):
Address.__init__(self, coverage_uid)
if validate:
if not os.path.exists(file_path):
raise ValueError("".join(["File does not exist at path: ", file_path]))
self.file_path = file_path
self.begin = begin
self.end = end
def as_dict(self):
return {'type': FileAddress.__name__,
'coverage_uid': self.coverage_uid,
'file_path': self.file_path,
'begin': self.begin,
'end': self.end}
@staticmethod
def from_dict(dic):
if 'type' in dic and dic['type'] == FileAddress.__name__:
if 'coverage_uid' in dic and 'file_path' in dic and 'begin' in dic and 'end' in dic:
return FileAddress(dic['coverage_uid'], dic['file_path'], dic['begin'], dic['end'])
raise ValueError("Do not know how to build address from %s ", str(dic))
def as_tuple(self):
tup = "FileAddress", self.coverage_uid, self.file_path, self.begin, self.end
return tup
@staticmethod
def from_tuple(tup):
if len(tup) != 5:
raise ValueError("".join(["Expected tuple of size 5. Found ", str(tup)]))
if tup[0] == "FileAddress":
return FileAddress(tup[1], tup[2], tup[3], tup[4])
else:
raise ValueError("".join(["Do not know how to build address type: ", tup[0]]))
@staticmethod
def from_str(st):
return BrickAddress.from_dict(ast.literal_eval(st))
def get_top_level_key(self):
return self.file_path
def __lt__(self, other):
return self.__key__() < other.__key__()
def __eq__(self, other):
return self.as_dict() == other.as_dict()
def __key__(self):
return self.as_dict()
def __hash__(self):
return hash(self.__key__())
def __repr__(self):
return self.__str__()
def __str__(self):
return str(self.__key__())
class BrickAddress(Address):
def __init__(self, coverage_uid, brick_id, brick_slice):
Address.__init__(self, coverage_uid)
self.brick_id = brick_id
self.brick_slice = brick_slice
def as_tuple(self):
tup = "BrickAddress", self.coverage_uid, self.brick_id, self.brick_slice
return tup
def as_dict(self):
return {'type': BrickAddress.__name__,
'coverage_uid': self.coverage_uid,
'brick_id': self.brick_id,
'brick_slice': self.brick_slice}
@staticmethod
def from_dict(dic):
if 'type' in dic and dic['type'] == BrickAddress.__name__:
if 'coverage_uid' in dic and 'brick_id' in dic and 'brick_slice':
return BrickAddress(dic['coverage_uid'], dic['brick_id'], dic['brick_slice'])
raise ValueError("Do not know how to build address from %s ", str(dic))
@staticmethod
def from_tuple(tup):
if len(tup) != 4:
raise ValueError("".join(["Expected tuple of size 5. Found ", str(len(tup))]))
if tup[0] == "BrickAddress":
return BrickAddress(tup[1], tup[2], tup[3])
else:
raise ValueError("".join(["Do not know how to build address type: ", tup[0]]))
@staticmethod
def from_str(st):
return BrickAddress.from_dict(ast.literal_eval(st))
def get_top_level_key(self):
return self.coverage_uid, self.brick_id
def __lt__(self, other):
return self.__key__() < other.__key__()
def __eq__(self, other):
return self.as_dict() == other.as_dict()
def __key__(self):
return self.as_dict()
def __hash__(self):
return hash(self.__key__())
def __repr__(self):
return self.__str__()
def __str__(self):
return str(self.__key__())
class BrickFileAddress(Address):
def __init__(self, coverage_uid, brick_id):
Address.__init__(self, coverage_uid)
self.brick_id = brick_id
def as_tuple(self):
tup = "BrickFileAddress", self.coverage_uid, self.brick_id
return tup
def as_dict(self):
return {'type': BrickFileAddress.__name__,
'coverage_uid': self.coverage_uid,
'brick_id': self.brick_id}
@staticmethod
def from_dict(dic):
if 'type' in dic and dic['type'] == BrickFileAddress.__name__:
if 'coverage_uid' in dic and 'brick_id' in dic:
return BrickFileAddress(dic['coverage_uid'], dic['brick_id'])
raise ValueError("Do not know how to build address from %s ", str(dic))
@staticmethod
def from_tuple(tup):
if len(tup) != 3:
raise ValueError("".join(["Expected tuple of size 5. Found ", str(tup)]))
if tup[0] == "BrickFileAddress":
return BrickFileAddress(tup[1], tup[2])
else:
raise ValueError("".join(["Do not know how to build address type: ", tup[0]]))
def get_db_str(self):
return ''.join([BrickFileAddress.__name__, ':::',
self.coverage_uid, ':::', self.brick_id])
@staticmethod
def from_db_str(db_str):
try:
tp, cov_id, brick_id = db_str.split(":::")
if tp == BrickFileAddress.__name__:
return BrickFileAddress(cov_id, brick_id)
except Exception as ex:
pass
raise ValueError("Do not know how to build address from %s ", str(db_str))
@staticmethod
def from_str(st):
return BrickFileAddress.from_dict(ast.literal_eval(st))
def get_top_level_key(self):
return self.coverage_uid + "::" + self.brick_id
def __lt__(self, other):
return self.__key__() < other.__key__()
def __eq__(self, other):
return self.as_dict() == other.as_dict()
def __ne__(self, other):
return not self.__eq__(other)
def __key__(self):
return str(self.as_dict())
def __hash__(self):
return hash(self.__key__())
def __repr__(self):
return self.__str__()
def __str__(self):
return str(self.__key__()) | {
"repo_name": "ooici/coverage-model",
"path": "coverage_model/address.py",
"copies": "1",
"size": "11055",
"license": "bsd-2-clause",
"hash": 1367302892378781000,
"line_mean": 30.5885714286,
"line_max": 99,
"alpha_frac": 0.5354138399,
"autogenerated": false,
"ratio": 3.8830347734457322,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4918448613345732,
"avg_score": null,
"num_lines": null
} |
__author__ = 'casey'
import json
from coverage_model.basic_types import Dictable
from coverage_model.util.jsonable import Jsonable, unicode_convert
class ReferenceCoverageExtents(Jsonable):
def __init__(self, name, reference_coverage_id, time_extents=None, domain_extents=None):
self.name = str(name)
self.cov_id = str(reference_coverage_id)
if time_extents is not None:
if not isinstance(time_extents, (tuple, set, list)) or len(time_extents) != 2:
raise ValueError('Time extents should be a tuple (min,max). Found: %s' % repr(domain_extents))
self.time_extents = tuple(time_extents)
else:
self.time_extents = time_extents
self.domain_extents = {}
if domain_extents is not None:
if not isinstance(domain_extents, dict):
raise ValueError('Domain extents should be a dictionary of name/tuple(min,max) key/value pairs. Found:', domain_extents)
for k,v in domain_extents.iteritems():
if not isinstance(v, (tuple, list, set)) or len(v) != 2:
raise ValueError('Domain extents should be a dictionary of name/tuple(min,max) key/value pairs. Found:', domain_extents)
self.domain_extents[str(k)] = tuple(v)
@classmethod
def from_json(cls, json_str):
d = json.loads(json_str, object_hook=unicode_convert)
return cls.from_dict(d)
@staticmethod
def from_dict(json_object):
if not set(['name', 'cov_id', 'time_extents', 'domain_extents']).issubset(set(json_object.keys())):
raise KeyError("Dictionary cannot be used to create object: %s" % json_object)
return ReferenceCoverageExtents(json_object['name'], json_object['cov_id'], json_object['time_extents'], json_object['domain_extents'])
def __str__(self):
return '%s: %s' % (self.__class__.__name__, repr(self.__dict__))
class ExtentsDict(Dictable):
def __init__(self, extent_dict=None):
self.data = {}
if extent_dict is not None:
for k,v in extent_dict.iteritems():
self.add_extents(k, v)
def add_extents(self, cov_id, extents):
if isinstance(extents, ReferenceCoverageExtents):
extents = [extents]
if cov_id in self.data:
self.data[cov_id].extend(extents)
else:
self.replace_extents(cov_id, extents)
def replace_extents(self, cov_id, extents):
if isinstance(extents, ReferenceCoverageExtents):
extents = [extents]
self.data[cov_id] = extents
def __getitem__(self, k):
return self.data[k]
| {
"repo_name": "ooici/coverage-model",
"path": "coverage_model/coverages/coverage_extents.py",
"copies": "1",
"size": "2671",
"license": "bsd-2-clause",
"hash": 1720814195428947700,
"line_mean": 38.2794117647,
"line_max": 143,
"alpha_frac": 0.6169973793,
"autogenerated": false,
"ratio": 3.6192411924119243,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9654424363648242,
"avg_score": 0.016362841612736573,
"num_lines": 68
} |
__author__ = 'casey'
def get_overlap(first, second):
if first is None and second is None:
return None
elif first is None and second is not None:
return second
elif first is not None and second is None:
return first
else:
if isinstance(first, (tuple, list, set)) and isinstance(second, (tuple, list, set)) \
and len(first) == 2 and len(second) == 2:
unset = '__unset__'
low = _get_overlap_nones(first[0], second[0], unset)
high = _get_overlap_nones(first[1], second[1], unset)
if low != unset and high != unset:
return (low, high)
if None not in first and first[0] > first[1]:
raise AttributeError("Argument tuples must be sorted.")
if None not in second and second[0] > second[1]:
raise AttributeError("Argument tuples must be sorted.")
if None in first or None in second:
low = unset
high = unset
if first[0] is None and second[0] is None:
low = None
elif first[0] is None:
low = second[0]
elif second[0] is None:
low = first[0]
else:
low = max(first[0], second[0])
if first[1] is None and second[1] is None:
high = None
elif first[1] is None:
high = second[1]
elif second[1] is None:
high = first[1]
else:
high = min(first[1], second[1])
return tuple((low, high))
elif first[0] > first[1] or second[0] > second[1]:
raise AttributeError("Argument tuples must be sorted.")
elif first[0] <= second[0]:
if first[1] < second[0]:
raise RuntimeError('No overlap')
elif first[1] >= second[1]:
return second
else:
return tuple((second[0], first[1]))
else:
if first[0] > second[1]:
raise RuntimeError('No overlap')
elif first[1] <= second[1]:
return first
else:
return tuple((first[0], second[1]))
else:
raise TypeError("Invalid arguments types %s type %s and %s type %s" % (first, type(first), second, type(second)))
def _get_overlap_nones(a,b,unset):
ret = None
if a is None and b is None:
ret = None
elif a is None:
ret = b
elif b is None:
ret = a
else:
ret = unset
return ret | {
"repo_name": "ooici/coverage-model",
"path": "coverage_model/util/extent_utils.py",
"copies": "1",
"size": "2756",
"license": "bsd-2-clause",
"hash": -5248907677937872000,
"line_mean": 34.3461538462,
"line_max": 125,
"alpha_frac": 0.4716981132,
"autogenerated": false,
"ratio": 4.416666666666667,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5388364779866667,
"avg_score": null,
"num_lines": null
} |
__author__ = 'casey'
from nose.plugins.attrib import attr
import numpy as np
import os, shutil, tempfile
import unittest
from pyon.core.bootstrap import CFG
from pyon.datastore.datastore_common import DatastoreFactory
import psycopg2
import psycopg2.extras
from coverage_model import *
from coverage_model.address import *
from coverage_model.data_span import *
from coverage_model.db_connectors import DBFactory, DB
from coverage_model.search.coverage_search import *
from coverage_model.search.search_parameter import *
from coverage_model.search.search_constants import *
from coverage_model.storage.span_storage_factory import SpanStorageFactory
from coverage_model.storage.postgres_span_storage import PostgresSpanStorage
from coverage_test_base import CoverageIntTestBase, get_props, get_parameter_dict, EXEMPLAR_CATEGORIES
@attr('UNIT',group='cov')
class TestSpanUnit(CoverageModelUnitTestCase):
def test_address_equality_and_serialization(self):
addr_list = [ [Address('some_id'), [Address('other')]],
[FileAddress('id2', 'file_name', 0, 100), [FileAddress('is', 'file_name', 0, 100),
FileAddress('id2', 'bad_file', 0, 100), FileAddress('id2', 'file_name', 1, 100),
FileAddress('id2', 'file_name', 0, 101)] ],
[BrickAddress('id3', 'brick1', (2, 200)), [BrickAddress('ir', 'brick1', (2, 200)),
BrickAddress('id3', 'bad_brick', (2, 200)), BrickAddress('id3', 'brick1', (-1, 200)),
BrickAddress('id3', 'brick1', (2, -1))] ],
[BrickFileAddress('id4', 'brick2'), [BrickFileAddress('iq', 'brick2'),
BrickFileAddress('id4', 'bad_brick')] ] ]
for addr_type in addr_list:
base_addr = addr_type[0]
addr_str = str(base_addr)
new_addr = AddressFactory.from_str(addr_str)
self.assertEqual(base_addr, new_addr)
for address in addr_type[1]:
self.assertNotEqual(base_addr, address)
def test_span_equality_and_serialization(self):
addr = BrickFileAddress('id4', 'brick2')
other_addr = BrickFileAddress('bad', 'brick')
base_span = SpanStats(addr, {'time': (1,2), 'lat': (0.0, 0.1), 'lon': (0.0, 179.9)})
s = str(base_span)
new_span = SpanStats.from_str(s)
self.assertEqual(base_span, new_span)
d = base_span.as_dict()
new_span = SpanStats.from_dict(d)
self.assertEqual(base_span, new_span)
bad_spans = [SpanStats(other_addr, {'time': (1,2), 'lat': (0.0, 0.1), 'lon': (0.0, 179.9)}),
SpanStats(addr, {'time': (1,2), 'lat': (0.0, 0.1), 'lon': (0.0, 179.9), 'dummy': (1,1)}),
SpanStats(addr, {'time': (1,1), 'lat': (0.0, 0.1), 'lon': (0.0, 179.9)}) ]
for span in bad_spans:
self.assertNotEqual(span, base_span)
def test_span_collection_equality_and_serialization(self):
spans = [SpanStats(BrickFileAddress('id2', 'Brick1'), {'time': (1,2), 'lat': (0.0, 0.1), 'lon': (0.0, 179.9)}),
SpanStats(BrickFileAddress('id2', 'Brick2'), {'time': (1,2), 'lat': (0.0, 0.1), 'lon': (0.0, 179.9), 'dummy': (1,1)}),
SpanStats(BrickFileAddress('id2', 'Brick3'), {'time': (1,1), 'lat': (0.0, 0.1), 'lon': (0.0, 179.9)}) ]
#spans = [ParamSpan(BrickFileAddress('id2', 'Brink1'), {})]
spans_collection = SpanCollectionByFile()
for span in spans:
spans_collection.add_span(span)
s = str(spans_collection)
new_col = SpanCollectionByFile.from_str(s)
self.assertEqual(spans_collection, new_col)
d = spans_collection.as_dict()
new_col = SpanCollectionByFile.from_dict(d)
self.assertEqual(spans_collection, new_col)
spans = [SpanStats(BrickFileAddress('id2', 'Brick'), {'time': (1,2), 'lat': (0.0, 0.1), 'lon': (0.0, 179.9)}),
SpanStats(BrickFileAddress('id2', 'Brick2'), {'time': (1,2), 'lat': (0.0, 0.1), 'lon': (0.0, 179.9), 'dummy': (1,1)}),
SpanStats(BrickFileAddress('id2', 'Brick3'), {'time': (1,1), 'lat': (0.0, 0.1), 'lon': (0.0, 179.9)}) ]
bad_col = SpanCollectionByFile()
self.assertNotEqual(spans_collection, bad_col)
for span in spans:
bad_col.add_span(span)
self.assertNotEqual(spans_collection, bad_col)
def test_span_serialization(self):
import random
import string
span_uuid = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10))
cov_id = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10))
param_dict = {'one': np.array([1234567.115,1,1,1,1,1], dtype=np.float32), 'two': np.array([2,2,2,2,2,2], dtype=np.float64), 'ingest_time': np.array([1,2,2,3,3,1], dtype=np.float32)}
span = Span(span_uuid, cov_id, param_dict, ingest_time=None, compressors=None, mutable=False)
txt = span.serialize()
json = span.as_json()
msgpack_span = Span.deserialize(txt)
json_span = Span.from_json(json)
self.assertEqual(msgpack_span, span)
self.assertEqual(json_span, msgpack_span)
@attr('INT',group='cov')
class TestSpanInt(CoverageModelUnitTestCase):
working_dir = os.path.join(tempfile.gettempdir(), 'cov_mdl_tests')
coverages = set()
@classmethod
def setUpClass(cls):
if os.path.exists(cls.working_dir):
shutil.rmtree(cls.working_dir)
os.mkdir(cls.working_dir)
@classmethod
def tearDownClass(cls):
# Removes temporary files
# Comment this out if you need to inspect the HDF5 files.
shutil.rmtree(cls.working_dir)
span_store = DatastoreFactory.get_datastore(datastore_name='coverage_spans', config=CFG)
coverage_store = DatastoreFactory.get_datastore(datastore_name='coverage', config=CFG)
if span_store is None:
raise RuntimeError("Unable to load datastore for coverage_spans")
if coverage_store is None:
raise RuntimeError("Unable to load datastore for coverages")
for guid in cls.coverages:
with span_store.pool.cursor(cursor_factory=psycopg2.extras.DictCursor) as cur:
cur.execute("DELETE FROM %s WHERE coverage_id='%s'" % (span_store._get_datastore_name(), guid))
cur.execute("DELETE FROM %s WHERE id='%s'" % (coverage_store._get_datastore_name(), guid))
def setUp(self):
pass
def tearDown(self):
pass
@classmethod
def construct_cov(cls, only_time=False, save_coverage=False, in_memory=False, inline_data_writes=True, brick_size=None, make_empty=False, nt=None, auto_flush_values=True):
"""
Construct coverage
"""
# Construct temporal and spatial Coordinate Reference System objects
tcrs = CRS([AxisTypeEnum.TIME])
scrs = CRS([AxisTypeEnum.LON, AxisTypeEnum.LAT])
# Construct temporal and spatial Domain objects
tdom = GridDomain(GridShape('temporal', [0]), tcrs, MutabilityEnum.EXTENSIBLE) # 1d (timeline)
sdom = GridDomain(GridShape('spatial', [0]), scrs, MutabilityEnum.IMMUTABLE) # 0d spatial topology (station/trajectory)
pname_filter = ['time',
'boolean',
'const_float',
'const_int',
'const_str',
'const_rng_flt',
'const_rng_int',
'numexpr_func',
'category',
'quantity',
'array',
'record',
'fixed_str',
'sparse',
'lat',
'lon',
'depth']
if only_time:
pname_filter = ['time']
pdict = get_parameter_dict(parameter_list=pname_filter)
if brick_size is not None:
bricking_scheme = {'brick_size':brick_size, 'chunk_size':True}
else:
bricking_scheme = None
# Instantiate the SimplexCoverage providing the ParameterDictionary, spatial Domain and temporal Domain
scov = SimplexCoverage(cls.working_dir, create_guid(), 'sample coverage_model', parameter_dictionary=pdict, temporal_domain=tdom, spatial_domain=sdom, inline_data_writes=inline_data_writes, in_memory_storage=in_memory, bricking_scheme=bricking_scheme, auto_flush_values=auto_flush_values)
# Insert some timesteps (automatically expands other arrays)
if (nt is None) or (nt == 0) or (make_empty is True):
return scov, 'TestTestSpanUnit'
else:
# Add data for each parameter
if only_time:
scov.set_parameter_values(make_parameter_data_dict({scov.temporal_parameter_name: np.arange(1000, 10000, nt+1)}))
else:
parameter_values = {}
# scov.set_parameter_values('sparse', [[[2, 4, 6], [8, 10, 12]]])
# scov.insert_timesteps(nt/2)
#
# scov.set_parameter_values('sparse', [[[4, 8], [16, 20]]])
# scov.insert_timesteps(nt/2)
scov.append_parameter(ParameterContext('m_lon'))
scov.append_parameter(ParameterContext('m_lat'))
scov.append_parameter(ParameterContext('depth'))
parameter_values['time']= np.arange(1000, 1000+nt)
parameter_values['depth']= 1000 * np.random.random_sample(nt)
parameter_values['m_lon'] = 160 * np.random.random_sample(nt)
parameter_values['m_lat'] = 70 * np.random.random_sample(nt)
scov.set_parameter_values(make_parameter_data_dict(parameter_values))
cls.coverages.add(scov.persistence_guid)
return scov, 'TestSpanInt'
def test_spans_in_coverage(self):
#Coverage construction will write data to bricks, create spans, and write spans to the db.
#Retrieve the parameter values from a brick, get the spans from the master manager.
#Make sure the min/max from the brick values match the min/max from master manager spans.
scov, cov_name = self.construct_cov(nt=10)
self.assertIsNotNone(scov)
cov_data = scov.get_parameter_values(['time', 'm_lat', 'm_lon']).get_data()
pmin, pmax = scov.get_data_bounds('time')
self.assertEqual(np.float32(pmin), cov_data['time'].min())
self.assertEqual(np.float32(pmax), cov_data['time'].max())
pmin, pmax = scov.get_data_bounds('m_lat')
self.assertEqual(np.float32(pmin), np.float32(cov_data['m_lat'].min()))
self.assertEqual(np.float32(pmax), np.float32(cov_data['m_lat'].max()))
pmin, pmax = scov.get_data_bounds('m_lon')
self.assertEqual(np.float32(pmin), np.float32(cov_data['m_lon'].min()))
self.assertEqual(np.float32(pmax), np.float32(cov_data['m_lon'].max()))
def test_span_insert(self):
scov, cov_name = self.construct_cov(nt=10)
self.coverages.add(scov.persistence_guid)
self.assertIsNotNone(scov)
span_store = DatastoreFactory.get_datastore(datastore_name='coverage_spans', config=CFG)
if span_store is None:
raise RuntimeError("Unable to load datastore for coverage_spans")
span_addr = []
with span_store.pool.cursor(cursor_factory=psycopg2.extras.DictCursor) as cur:
cur.execute("SELECT span_address from %s where coverage_id='%s'" % ('ion_coverage_span_stats', scov.persistence_guid))
self.assertGreater(cur.rowcount, 0)
for row in cur:
span_addr.append(row['span_address'])
with span_store.pool.cursor(cursor_factory=psycopg2.extras.DictCursor) as cur:
for addr in span_addr:
cur.execute("DELETE FROM %s WHERE span_address='%s'" % (span_store._get_datastore_name(), addr))
def test_get_coverage(self):
scov, cov_name = self.construct_cov(nt=10)
self.assertIsNotNone(scov)
orig_mm = scov._persistence_layer.master_manager
cov_id = orig_mm.guid
retrieved_cov = SimplexCoverage(TestSpanInt.working_dir, cov_id, 'sample coverage_model')
new_mm = retrieved_cov._persistence_layer.master_manager
self.assertEqual(new_mm, orig_mm)
self.assertNotEqual(id(new_mm), id(orig_mm))
def test_search_for_span(self):
scov, cov_name = self.construct_cov(nt=10)
self.assertIsNotNone(scov)
time_min, time_max = scov.get_data_bounds('time')
lat_min, lat_max = scov.get_data_bounds('m_lat')
lon_min, lon_max = scov.get_data_bounds('m_lon')
criteria = SearchCriteria()
param = ParamValueRange(IndexedParameters.Time, (time_min+1, time_max+1))
criteria.append(param)
param = Param2DValueRange(IndexedParameters.GeoBox, ((lat_min-1, lat_max+1),(lon_min-1, lon_max+1)))
criteria.append(param)
search = CoverageSearch(criteria)
results = search.select()
self.assertTrue(scov.persistence_guid in results.get_found_coverage_ids())
def test_search_for_span_time_fails(self):
scov, cov_name = self.construct_cov(nt=10)
self.assertIsNotNone(scov)
time_min, time_max = scov.get_data_bounds('time')
lat_min, lat_max = scov.get_data_bounds('m_lat')
lon_min, lon_max = scov.get_data_bounds('m_lon')
criteria = SearchCriteria()
param = ParamValueRange(IndexedParameters.Time, (time_min+20, time_max+30))
criteria.append(param)
param = Param2DValueRange(IndexedParameters.GeoBox, ((lat_min-1, lat_max+1),(lon_min-1, lon_max+1)))
criteria.append(param)
search = CoverageSearch(criteria)
results = search.select()
self.assertFalse(scov.persistence_guid in results.get_found_coverage_ids())
def test_search_for_span_lat_fails(self):
scov, cov_name = self.construct_cov(nt=10)
self.assertIsNotNone(scov)
time_min, time_max = scov.get_data_bounds('time')
lat_min, lat_max = scov.get_data_bounds('m_lat')
lon_min, lon_max = scov.get_data_bounds('m_lon')
criteria = SearchCriteria()
param = ParamValueRange(IndexedParameters.Time, (time_min+1, time_max+1))
criteria.append(param)
param = Param2DValueRange(IndexedParameters.GeoBox, ((lat_max+1, lat_max+2),(lon_min-1, lon_max+1)))
criteria.append(param)
search = CoverageSearch(criteria)
results = search.select()
self.assertFalse(scov.persistence_guid in results.get_found_coverage_ids())
def test_search_for_span_lon_fails(self):
scov, cov_name = self.construct_cov(nt=10)
self.assertIsNotNone(scov)
time_min, time_max = scov.get_data_bounds('time')
lat_min, lat_max = scov.get_data_bounds('m_lat')
lon_min, lon_max = scov.get_data_bounds('m_lon')
criteria = SearchCriteria()
param = ParamValueRange(IndexedParameters.Time, (time_min+1, time_max+1))
criteria.append(param)
param = Param2DValueRange(IndexedParameters.GeoBox, ((lat_min+1, lat_max+2),(lon_max+0.5, lon_max+1)))
criteria.append(param)
search = CoverageSearch(criteria)
results = search.select()
self.assertFalse(scov.persistence_guid in results.get_found_coverage_ids())
def test_search_for_span_that_barely_overlaps_searched_box(self):
scov, cov_name = self.construct_cov(nt=10)
self.assertIsNotNone(scov)
time_min, time_max = scov.get_data_bounds('time')
lat_min, lat_max = scov.get_data_bounds('m_lat')
lon_min, lon_max = scov.get_data_bounds('m_lon')
criteria = SearchCriteria()
param = ParamValueRange(IndexedParameters.Time, (time_min+1, time_max+1))
criteria.append(param)
param = Param2DValueRange(IndexedParameters.GeoBox, ((lat_max, lat_max+20), (lon_min-0.1, lon_max+20)))
criteria.append(param)
search = CoverageSearch(criteria)
results = search.select()
self.assertTrue(scov.persistence_guid in results.get_found_coverage_ids())
def test_search_for_span_using_lat_and_lon(self):
scov, cov_name = self.construct_cov(nt=10)
self.assertIsNotNone(scov)
lat_min, lat_max = scov.get_data_bounds('m_lat')
lon_min, lon_max = scov.get_data_bounds('m_lon')
criteria = SearchCriteria()
param = ParamValueRange(IndexedParameters.Latitude, (lat_min-1, lat_max+20))
criteria.append(param)
param = ParamValueRange(IndexedParameters.Longitude, (lon_min-1, lon_max+20))
criteria.append(param)
search = CoverageSearch(criteria)
results = search.select()
self.assertTrue(scov.persistence_guid in results.get_found_coverage_ids())
lat = ParamValueRange(IndexedParameters.Latitude, (lat_min-1, lat_min-0.5))
lon = ParamValueRange(IndexedParameters.Longitude, (lon_min-1, lon_min-0.5))
criteria = SearchCriteria([lat, lon])
search = CoverageSearch(criteria)
results = search.select()
self.assertFalse(scov.persistence_guid in results.get_found_coverage_ids())
lat = ParamValue(IndexedParameters.Latitude, (lat_min+lat_max)/2)
lon = ParamValue(IndexedParameters.Longitude, (lon_min+lon_max)/2)
criteria = SearchCriteria([lat, lon])
search = CoverageSearch(criteria)
results = search.select()
self.assertTrue(scov.persistence_guid in results.get_found_coverage_ids())
lat = ParamValueRange(IndexedParameters.Latitude, (lat_min-1, lat_max+1))
lon = ParamValueRange(IndexedParameters.Longitude, (lon_min-150, lon_max+10))
criteria = SearchCriteria([lat, lon])
search = CoverageSearch(criteria)
results = search.select()
self.assertTrue(scov.persistence_guid in results.get_found_coverage_ids())
lat = ParamValueRange(IndexedParameters.Latitude, (lat_min-1, lat_max+1))
criteria = SearchCriteria(lat)
search = CoverageSearch(criteria)
self.assertRaises(ValueError, search.select)
def test_search_for_span_that_contains_searched_box(self):
scov, cov_name = self.construct_cov(nt=10)
self.assertIsNotNone(scov)
time_min, time_max = scov.get_data_bounds('time')
lat_min, lat_max = scov.get_data_bounds('m_lat')
lon_min, lon_max = scov.get_data_bounds('m_lon')
criteria = SearchCriteria()
param = ParamValueRange(IndexedParameters.Time, (time_min-1, time_max+1))
criteria.append(param)
param = Param2DValueRange(IndexedParameters.GeoBox, ((lat_min-0.01, lat_max+0.01),(lon_min-0.5, lon_max+0.5)))
criteria.append(param)
search = CoverageSearch(criteria)
results = search.select()
self.assertTrue(scov.persistence_guid in results.get_found_coverage_ids())
def test_search_for_span_contained_inside_large_box(self):
scov, cov_name = self.construct_cov(nt=10)
self.assertIsNotNone(scov)
time_min, time_max = scov.get_data_bounds('time')
criteria = SearchCriteria()
param = ParamValueRange(IndexedParameters.Time, (time_min+1, time_max+1))
criteria.append(param)
param = Param2DValueRange(IndexedParameters.GeoBox, ((-15.5, 85.5), (0.5, 170.5)))
criteria.append(param)
search = CoverageSearch(criteria)
results = search.select()
self.assertTrue(scov.persistence_guid in results.get_found_coverage_ids())
def test_for_searched_time_range_smaller_than_span_time_range(self):
scov, cov_name = self.construct_cov(nt=10)
self.assertIsNotNone(scov)
time_min, time_max = scov.get_data_bounds('time')
criteria = SearchCriteria()
param = ParamValueRange(IndexedParameters.Time, (time_min+1, time_max-1))
criteria.append(param)
search = CoverageSearch(criteria)
results = search.select()
self.assertTrue(scov.persistence_guid in results.get_found_coverage_ids())
def test_for_searched_time_range_larger_than_span_time_range(self):
scov, cov_name = self.construct_cov(nt=10)
self.assertIsNotNone(scov)
time_min, time_max = scov.get_data_bounds('time')
criteria = SearchCriteria()
param = ParamValueRange(IndexedParameters.Time, (time_min-1, time_max+1))
criteria.append(param)
search = CoverageSearch(criteria)
results = search.select()
self.assertTrue(scov.persistence_guid in results.get_found_coverage_ids())
def test_for_searched_vertical_range(self):
scov, cov_name = self.construct_cov(nt=10)
self.assertIsNotNone(scov)
depth_min, depth_max = scov.get_data_bounds('depth')
criteria = SearchCriteria()
param = ParamValueRange(IndexedParameters.Vertical, (depth_min-1, depth_max-1))
criteria.append(param)
search = CoverageSearch(criteria)
results = search.select()
self.assertTrue(scov.persistence_guid in results.get_found_coverage_ids())
del criteria
criteria = SearchCriteria()
param = ParamValueRange(IndexedParameters.Vertical, (depth_max+0.00000001, depth_max+10.1))
criteria.append(param)
search = CoverageSearch(criteria)
results = search.select()
self.assertFalse(scov.persistence_guid in results.get_found_coverage_ids())
del criteria
criteria = SearchCriteria()
param = ParamValueRange(IndexedParameters.Vertical, (depth_max-0.00000001, depth_max-0.000000001))
criteria.append(param)
search = CoverageSearch(criteria)
results = search.select()
self.assertTrue(scov.persistence_guid in results.get_found_coverage_ids())
del criteria
criteria = SearchCriteria()
param = ParamValueRange(IndexedParameters.Vertical, (depth_min-1, depth_max+1))
criteria.append(param)
search = CoverageSearch(criteria)
results = search.select()
self.assertTrue(scov.persistence_guid in results.get_found_coverage_ids())
def test_minimum_search_criteria(self):
param = ParamValue('dummy', 10)
criteria = SearchCriteria(search_params=[param])
search = CoverageSearch(criteria)
self.assertRaises(ValueError, search.select)
criteria.append(ParamValue(IndexedParameters.Time, 5))
search = CoverageSearch(criteria)
self.assertRaises(ValueError, search.select)
| {
"repo_name": "ooici/coverage-model",
"path": "coverage_model/test/test_span_index.py",
"copies": "1",
"size": "22832",
"license": "bsd-2-clause",
"hash": -2708333229908296700,
"line_mean": 45.5010183299,
"line_max": 296,
"alpha_frac": 0.6236422565,
"autogenerated": false,
"ratio": 3.5120750653745576,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46357173218745573,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.