text stringlengths 0 1.05M | meta dict |
|---|---|
__author__ = 'alexs'
import cPickle
import random
import theano.tensor as T
import theano
import numpy as np
def getReferenceLabels():
referenceLabels = dict()
for i in range(0, 10):
reference_out = [0.1 for x in range(0, 10)]
reference_out[i] = 0.88
referenceLabels[i] = reference_out
return referenceLabels
def sigmoid(x):
return 1.0 / (1 + T.exp(-1.0 * x))
def compare(result_label, given_label, reference_labels):
givenKey = 0
resultedKey = 0
refGivenScore = 1000
refResultedScore = 1000
for key in reference_labels.keys():
score1 = np.sum(np.abs(np.array(given_label) - np.array(reference_labels[key])))
score2 = np.sum(np.abs(result_label - np.array(reference_labels[key])))
if score1 < refGivenScore:
refGivenScore = score1
givenKey = key
if score2 < refResultedScore:
refResultedScore = score2
resultedKey = key
if resultedKey == givenKey:
return True
return False
def makeW(rows, columns, start=-2, end=2):
w = np.random.uniform(start, end, (rows, columns))
return w
def updates_weights_function(weights, memories, cost_function, learning_rate=0.02, momentum_learning_rate=0.05):
gradients = T.grad(cost_function, weights) # keep in mind len(gradients) == len(weights)
update_lists = []
for i in range(0, len(weights)):
weight = weights[i]
gradient = gradients[i]
memory = memories[i]
change = learning_rate * gradient + momentum_learning_rate * memory
new_val = weight - change
update_lists.append((weight, new_val))
update_lists.append((memory, change))
return update_lists
class NN():
def __init__(self):
self.layers = []
self.weights = []
self.weights_memory = []
self.cost = None
self.train = None
self.updates = None
self.activate = None
self.activatwe = None
self.output = None
def build(self, givenWeights=None):
# first: init or build the in-between weight matrixes
for i in range(0, len(self.layers) - 1):
n = self.layers[i].size
m = self.layers[i + 1].size
if givenWeights:
w_values = givenWeights[i]
else:
w_values = makeW(n, m)
w_memory_values = np.zeros((n, m))
w = theano.shared(value=w_values, name="w_" + str(i) + "_" + str(i + 1))
w_memory = theano.shared(value=w_memory_values, name="w_memory_" + str(i) + "_" + str(i + 1))
self.weights.append(w)
self.weights_memory.append(w_memory)
# now build the model
inputVector = T.matrix("inputVector")
labels = T.matrix("labels")
out = None
net = None
workingV = inputVector
l2 = 0.0
l1 = 0.0
for i in range(0, len(self.weights)):
w = self.weights[i]
l2 += T.sum(w * w)
l1 += T.sum(T.abs_(w))
out = T.dot(workingV, w)
net = sigmoid(out)
workingV = net
self.cost = T.sum(T.pow(labels - net, 2)) # + 0.005 * l2 # + 0.005 * l1
self.output = net
self.updates = updates_weights_function(self.weights, self.weights_memory, self.cost)
self.train = theano.function([inputVector, labels], outputs=self.cost, updates=self.updates)
self.activate = theano.function([inputVector, labels], outputs=self.cost)
self.activatwe = theano.function([inputVector], outputs=self.output)
def addLayer(self, layer):
self.layers.append(layer)
def trainData(self, train_set_input, train_set_labels,
valid_set_input, valid_set_labels,
test_set_input, test_set_labels,
nrOfEpochs=10000, batch_size=1000):
reference_labels = getReferenceLabels()
for ep in range(0, nrOfEpochs):
overallError = 0.0
for j in range(0, len(train_set_input), batch_size):
endInterval = j + batch_size
if j + batch_size > len(train_set_input):
endInterval = len(train_set_input) - 1
i = train_set_input[j:endInterval]
r = train_set_labels[j:endInterval]
self.train(i, r)
for j in range(0, len(train_set_input), batch_size):
endInterval = j + batch_size
if j + batch_size > len(train_set_input):
endInterval = len(train_set_input) - 1
i = train_set_input[j:endInterval]
r = train_set_labels[j:endInterval]
overallError += self.activate(i, r)
posItems = 0.0
failedItems = 0.0
for valid_in, given_label in zip(valid_set_input, valid_set_labels):
result_label = self.activatwe([valid_in])
ok = compare(result_label, given_label, reference_labels)
if ok:
posItems += 1.0
else:
failedItems += 1.0
precision = posItems / (posItems + failedItems)
print(
"[{epoch}] error: {error} precision: {precision}".format(epoch=ep, error=overallError,
precision=precision))
# running tests
if test_set_input and test_set_labels:
print("=================== TESTS ==================")
posItems = 0.0
failedItems = 0.0
for valid_in, given_label in zip(test_set_input, test_set_labels):
result_label = self.activatwe([valid_in])
ok = compare(result_label, given_label, reference_labels)
if ok:
posItems += 1.0
else:
failedItems += 1.0
precision = posItems / (posItems + failedItems)
print("Accuracy on {nrOfTests} tests is {precision}".format(nrOfTests=str(len(test_set_input)),
precision=str(precision)))
print("============================================")
class Layer():
def __init__(self, size):
self.size = size
class SigmoidLayer(Layer):
def __init__(self, size):
self.size = size
class StandardOutputWithSigmoid(Layer):
def __init__(self, size):
self.size = size
def retrieveTrainValidationTest():
f = open("mnist.pkl")
train_set, valid_set, test_set = cPickle.load(f)
f.close()
return train_set, valid_set, test_set
def processData(nnset, sampleSize=None):
train_in = nnset[0]
train_label = nnset[1]
d = {}
for index in range(0, len(train_label)):
label = train_label[index]
d.setdefault(label, []).append(train_in[index])
if sampleSize:
d_sample = []
for key in d.keys():
for train_in in d[key][0:sampleSize]:
d_sample.append((key, train_in))
else:
d_sample = []
for key in d.keys():
for train_in in d[key]:
d_sample.append((key, train_in))
random.shuffle(d_sample)
results_in = []
results_label_out = []
for i in range(0, len(d_sample)):
label = d_sample[i][0]
train_in = d_sample[i][1]
# now create the arrays
label_out = [0.1 for x in range(0, 10)]
label_out[label] = 0.88
results_in.append(np.array(train_in, dtype="float32"))
results_label_out.append(np.array(label_out, dtype="float32"))
return results_in, results_label_out
def main():
nn = NN()
nn.addLayer(SigmoidLayer(784))
nn.addLayer(SigmoidLayer(100))
nn.addLayer(StandardOutputWithSigmoid(10))
nn.build()
train_set, valid_set, test_set = retrieveTrainValidationTest()
# TRAINING_SAMPLE_SIZE = 100; VALIDATION_SAMPLE_SIZE = 10; TEST_SAMPLE_SIZE = 10
TRAINING_SAMPLE_SIZE = VALIDATION_SAMPLE_SIZE = TEST_SAMPLE_SIZE = None
train_set_input, train_set_labels = processData(train_set, TRAINING_SAMPLE_SIZE)
valid_set_input, valid_set_labels = processData(valid_set, VALIDATION_SAMPLE_SIZE)
test_set_input, test_set_labels = processData(test_set, TEST_SAMPLE_SIZE)
nn.trainData(train_set_input, train_set_labels,
valid_set_input, valid_set_labels,
test_set_input, test_set_labels,
nrOfEpochs=10, batch_size=1000)
if __name__ == '__main__':
main() | {
"repo_name": "big-data-research/neuralnetworks_workshop_bucharest_2015",
"path": "nn_demo/01back_propagation.py",
"copies": "2",
"size": "8645",
"license": "apache-2.0",
"hash": 7315893204890876000,
"line_mean": 32.1264367816,
"line_max": 112,
"alpha_frac": 0.5539618276,
"autogenerated": false,
"ratio": 3.684995737425405,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0013693438941059731,
"num_lines": 261
} |
__author__ = 'alex styler'
import math
import pandas as pd
import numpy as np
import fiona as fio
from matplotlib.figure import Figure
from matplotlib.collections import PathCollection
from mpl_toolkits.basemap import Basemap
from matplotlib.patches import Path
from matplotlib.transforms import Bbox
# haversine function code courtesy of https://gist.github.com/rochacbruno/2883505
def haversine(origin, destination):
""" haversine formula to get distance in meters
:param origin: (lat, lon) pair
:param destination: (lat, lon) pair
:return: distance in meters
"""
lat1, lon1 = origin
lat2, lon2 = destination
radius = 6371000
dlat = math.radians(lat2 - lat1)
dlon = math.radians(lon2 - lon1)
a = math.sin(dlat / 2) * math.sin(dlat / 2) + math.cos(math.radians(lat1)) * math.cos(
math.radians(lat2)) * math.sin(dlon / 2) * math.sin(dlon / 2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
d = radius * c
return d
# TODO fix for poles and 180 degrees longitude
class MLMap(object):
def __init__(self, lower_left_corner, upper_right_corner, projection='merc'):
""" Create a map view over the specified projection
:param lower_left_corner: (lon, lat) coordinates in degrees
:param upper_right_corner: (lon, lat) coordinates in degrees
:param projection: which projection to use, legal list from mpl_toolkits.basemap
:return:
"""
self.basemap = MLMap.__create_basemap(lower_left_corner[0], lower_left_corner[1], upper_right_corner[0],
upper_right_corner[1], projection=projection)
self.shapes = pd.DataFrame()
self.shapes_to_draw = []
llc = self.basemap(lower_left_corner[0], lower_left_corner[1])
urc = self.basemap(upper_right_corner[0], upper_right_corner[1])
#self.bbox = Bbox([llc, urc])
self.bbox = (lower_left_corner[0], lower_left_corner[1], upper_right_corner[0], upper_right_corner[1])
@staticmethod
def __create_basemap(ll_lon, ll_lat, ur_lon, ur_lat, wpadding=0.03, hpadding=0.04, projection='merc'):
# Compute width and height in degrees
w, h = ur_lon - ll_lon, ur_lat - ll_lat
# This will work poorly at poles and +179 and -179
mlon = (ll_lon + ur_lon) / 2.
mlat = (ll_lat + ur_lat) / 2.
m = Basemap(
projection=projection,
lon_0=mlon,
lat_0=mlat,
ellps='WGS84',
llcrnrlon=ll_lon - wpadding * w,
llcrnrlat=ll_lat - hpadding * h,
urcrnrlon=ur_lon + wpadding * w,
urcrnrlat=ur_lat + hpadding * h,
lat_ts=0,
resolution='c',
suppress_ticks=True)
return m
def convert_coordinates(self, coordinates):
""" Converts coordinates to plot x,y coordinates
:param coordinates: List of coordinates pairs to convert [(lon1, lat1)...(lon9, lat9)]
:return: list of plot coordinates, [(x1,y1)...(x9,y9)]
"""
return np.array(zip(*self.basemap(*zip(*coordinates))))
# use osmapi to import a selected node into the map
def import_osm_node(self, node_id, server='defaultserveraddress'):
""" Not implemented, signature may change
:param node_id:
:param server:
:return:
"""
raise NotImplementedError('Not implemented, only Mapzen shapefiles at the moment')
# use osm turbo query library to import shapes into the database
def import_turbo_query(self, query, server='defaultserveraddress'):
""" Not implemented, signature may change
:param query:
:param server:
:return:
"""
raise NotImplementedError('Not implemented, only Mapzen shapefiles at the moment')
# may need to define a unifying shape/data class that can be used by Mapzen, osmapi,
# and turbo queries. something that is easily selectable
# can load polygons or line shapefiles. does not yet support points. only loads in paths
# for drawing, but not text or labels of any kind. will need to expand upon inspecting the
# properties of polys, lines, and points. also does not support Multipolygons yet, but that
# should be an easy fix.
def load_shape_file(self, file_name, clip_to_view=True):
""" Loads in a shapefile to the map, from OpenStreetMaps or other services
:param file_name: Filename of .shp file to be imported
:param clip_to_view: if true, only loads shapes in the specified view window
:return: None
"""
shape_paths = []
properties = []
# convert shape_paths to x,y of map view and remove all shapes outside the view window
# consider moving bbox check to lat/lon and checking before conversion to save computation
with fio.open(file_name) as shape_file:
if clip_to_view:
shape_file = shape_file.filter(bbox=self.bbox)
for shape in shape_file:
clist = []
shape_type = shape['geometry']['type']
coords = shape['geometry']['coordinates']
if shape_type == 'Polygon':
clist.append(coords[0])
elif shape_type == 'LineString':
clist.append(coords)
elif shape_type == 'MultiPolygon':
clist.extend(poly[0] for poly in coords)
for coords in clist:
path = Path(self.convert_coordinates(coords), readonly=True)
if path is not None:
properties.append(shape['properties'])
shape_paths.append(path)
new_shapes = pd.DataFrame(properties)
new_shapes['path'] = shape_paths
new_shapes = new_shapes[new_shapes.path.notnull()]
self.shapes = self.shapes.append(new_shapes)
def select_shape(self, feature, value, **kwargs):
""" Selects shapes for plotting where shape[feature] == value
:param feature: Feature string to match value on such as 'highway'
:param value: Value to select, such as 'motorway'
:param kwargs: arguments for the drawing
:return:
"""
self.shapes_to_draw.append(
{'shapes': self.shapes[(self.shapes[feature] == value)]['path'].values,
'args': kwargs})
def select_shapes(self, select_function, **kwargs):
""" Selects shapes based on an arbitrary function such as: lambda shape: shape['highway'] == 'motorway'
:param select_function: boolean function to include a shape or not
:param kwargs: arguments for the drawing
:return:
"""
self.shapes_to_draw.append(
{'shapes': self.shapes[self.shapes.apply(select_function, axis=1)]['path'].values,
'args': kwargs}
)
def draw_map(self, ax=None, map_fill='white'):
"""
:param ax: Matplotlib axes on which to draw this map
:param map_fill: base color of continents on the map
:return: handle to axes
"""
if ax is None:
fig = Figure()
ax = fig.add_subplot(111)
# is there some option to fill in oceans? maybe as background color to the fig/ax
self.basemap.fillcontinents(map_fill, ax=ax, zorder=1)
for shapec in self.shapes_to_draw:
ax.add_collection(PathCollection(shapec['shapes'], **shapec['args']))
return ax
def clear_selected_shapes(self):
""" Clears selected shapes for next draw
:return: None
"""
self.shapes_to_draw = []
def clear_loaded_shapefiles(self):
""" Clears the loaded shape database
:return: None
"""
self.shapes = pd.DataFrame()
| {
"repo_name": "astyler/osmapping",
"path": "osmapping.py",
"copies": "1",
"size": "7871",
"license": "mit",
"hash": 800463625213887500,
"line_mean": 37.2087378641,
"line_max": 112,
"alpha_frac": 0.6065303011,
"autogenerated": false,
"ratio": 3.8659135559921416,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49724438570921414,
"avg_score": null,
"num_lines": null
} |
__author__ = 'alexvanboxel'
from datetime import date
from datetime import timedelta
def month(current):
return date(current.year, current.month, 15)
def month_first_day(current):
return date(current.year, current.month, 1)
def month_last_day(current):
d = next_month(current)
return date(d.year, d.month, 1) - timedelta(days=1)
def rec_day_range(collect, current, stop):
value = current
collect.append(value)
if current == stop:
return collect
elif current > stop:
return collect
else:
return rec_day_range(collect, next_day(current), stop)
def rec_month_range(collect, current, stop):
value = month(current)
collect.append(value)
if current == stop:
return collect
elif current > stop:
return collect
else:
return rec_month_range(collect, next_month(current), stop)
def rec_year_range(collect, current, stop):
value = month(current)
collect.append(value)
if current == stop:
return collect
elif current > stop:
return collect
else:
return rec_year_range(collect, next_year(current), stop)
def day_range(range_from, range_till):
part_from = str(range_from).split('-')
part_till = str(range_till).split('-')
start = date(int(part_from[0]), int(part_from[1]), int(part_from[2]))
stop = date(int(part_till[0]), int(part_till[1]), int(part_till[2]))
return rec_day_range([], start, stop)
def month_range(range_from, range_till):
part_from = str(range_from).split('-')
part_till = str(range_till).split('-')
start = date(int(part_from[0]), int(part_from[1]), 15)
stop = date(int(part_till[0]), int(part_till[1]), 15)
return rec_month_range([], start, stop)
def year_range(range_from, range_till):
part_from = str(range_from).split('-')
part_till = str(range_till).split('-')
start = date(int(part_from[0]), 1, 15)
stop = date(int(part_till[0]), 1, 15)
return rec_year_range([], start, stop)
def this_month():
return month(date.today())
def last_month():
return prev_month(this_month())
def next_month(current):
return month(month(current) + timedelta(days=30))
def next_year(current):
return month(month(current) + timedelta(days=365))
def prev_month(current):
return month(month(current) - timedelta(days=30))
def substract_month(current, m):
if m == 0:
return current
else:
return substract_month(prev_month(current), m-1)
def prev_year(current):
return month(month(current) - timedelta(days=365))
def last_year():
return prev_year(this_month())
def year_first_month(current):
m = month(current)
return date(m.year, 1, 15)
def yester_day():
return prev_day(date.today());
def to_day():
return date.today();
def prev_day(current):
return current - timedelta(days=1)
def next_day(current):
return current + timedelta(days=1)
def end_of_month(current):
"""Return the current day when it's the last day of the month, otherwise return
a day from previous month. Has only month precision."""
if next_day(current).month != current.month:
return current
else:
return prev_month(current)
def generate_range_from_argv(argv, last):
if argv[0] == 'from':
part = argv[1].partition('-');
return month_range(part[0], part[2], last.year, last.month)
elif argv[0] == 'range':
part1 = argv[1].partition('-');
part2 = argv[2].partition('-');
return month_range(part1[0], part1[2], part2[0], part2[2])
elif argv[0] == 'month':
part = argv[1].partition('-');
return month_range(part[0], part[2], part[0], part[2])
elif argv[0] == 'last':
month = last_month()
return month_range(month.year, month.month, month.year, month.month)
elif argv[0] == 'this':
month = this_month()
return month_range(month.year, month.month, month.year, month.month)
else:
print "Known modes are: from, range"
| {
"repo_name": "alexvanboxel/demo-devoxx15-luigi",
"path": "dateutils.py",
"copies": "1",
"size": "4043",
"license": "apache-2.0",
"hash": 2151641734213315600,
"line_mean": 24.9166666667,
"line_max": 83,
"alpha_frac": 0.6297303982,
"autogenerated": false,
"ratio": 3.3413223140495867,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9425531097108821,
"avg_score": 0.009104323028152969,
"num_lines": 156
} |
"""Simulate Data
Simulate stochastic dynamic systems to model gene expression dynamics and
cause-effect data.
TODO
----
Beta Version. The code will be reorganized soon.
"""
import itertools
import shutil
import sys
from pathlib import Path
from types import MappingProxyType
from typing import Optional, Union, List, Tuple, Mapping
import numpy as np
import scipy as sp
from anndata import AnnData
from .. import _utils, readwrite, logging as logg
from .._settings import settings
from .._compat import Literal
def sim(
model: Literal['krumsiek11', 'toggleswitch'],
params_file: bool = True,
tmax: Optional[int] = None,
branching: Optional[bool] = None,
nrRealizations: Optional[int] = None,
noiseObs: Optional[float] = None,
noiseDyn: Optional[float] = None,
step: Optional[int] = None,
seed: Optional[int] = None,
writedir: Optional[Union[str, Path]] = None,
) -> AnnData:
"""\
Simulate dynamic gene expression data [Wittmann09]_ [Wolf18]_.
Sample from a stochastic differential equation model built from
literature-curated boolean gene regulatory networks, as suggested by
[Wittmann09]_. The Scanpy implementation is due to [Wolf18]_.
Parameters
----------
model
Model file in 'sim_models' directory.
params_file
Read default params from file.
tmax
Number of time steps per realization of time series.
branching
Only write realizations that contain new branches.
nrRealizations
Number of realizations.
noiseObs
Observatory/Measurement noise.
noiseDyn
Dynamic noise.
step
Interval for saving state of system.
seed
Seed for generation of random numbers.
writedir
Path to directory for writing output files.
Returns
-------
Annotated data matrix.
Examples
--------
See this `use case <https://github.com/theislab/scanpy_usage/tree/master/170430_krumsiek11>`__
"""
params = locals()
if params_file:
model_key = Path(model).with_suffix('').name
from .. import sim_models
pfile_sim = Path(sim_models.__file__).parent / f'{model_key}_params.txt'
default_params = readwrite.read_params(pfile_sim)
params = _utils.update_params(default_params, params)
adata = sample_dynamic_data(**params)
adata.uns['iroot'] = 0
return adata
def add_args(p):
"""
Update parser with tool specific arguments.
This overwrites was is done in utils.uns_args.
"""
# dictionary for adding arguments
dadd_args = {
'--opfile': {
'default': '',
'metavar': 'f',
'type': str,
'help': 'Specify a parameter file ' '(default: "sim/${exkey}_params.txt")',
}
}
p = _utils.add_args(p, dadd_args)
return p
def sample_dynamic_data(**params):
"""
Helper function.
"""
model_key = Path(params['model']).with_suffix('').name
writedir = params.get('writedir')
if writedir is None:
writedir = settings.writedir / (model_key + '_sim')
else:
writedir = Path(writedir)
writedir.mkdir(parents=True, exist_ok=True)
readwrite.write_params(writedir / 'params.txt', params)
# init variables
tmax = params['tmax']
branching = params['branching']
noiseObs = params['noiseObs']
noiseDyn = params['noiseDyn']
nrRealizations = params['nrRealizations']
step = params['step'] # step size for saving the figure
nrSamples = 1 # how many files?
maxRestarts = 1000
maxNrSamples = 1
# simple vector auto regressive process or
# hill kinetics process simulation
if 'krumsiek11' not in model_key:
# create instance, set seed
grnsim = GRNsim(model=model_key, params=params)
nrOffEdges_list = np.zeros(nrSamples)
for sample in range(nrSamples):
# random topology / for a given edge density
if 'hill' not in model_key:
Coupl = np.array(grnsim.Coupl)
for sampleCoupl in range(10):
nrOffEdges = 0
for gp in range(grnsim.dim):
for g in range(grnsim.dim):
# only consider off-diagonal edges
if g != gp:
Coupl[gp, g] = 0.7 if np.random.rand() < 0.4 else 0
nrOffEdges += 1 if Coupl[gp, g] > 0 else 0
else:
Coupl[gp, g] = 0.7
# check that the coupling matrix does not have eigenvalues
# greater than 1, which would lead to an exploding var process
if max(sp.linalg.eig(Coupl)[0]) < 1:
break
nrOffEdges_list[sample] = nrOffEdges
grnsim.set_coupl(Coupl)
# init type
real = 0
X0 = np.random.rand(grnsim.dim)
Xsamples = []
for restart in range(nrRealizations + maxRestarts):
# slightly break symmetry in initial conditions
if 'toggleswitch' in model_key:
X0 = np.array(
[0.8 for i in range(grnsim.dim)]
) + 0.01 * np.random.randn(grnsim.dim)
X = grnsim.sim_model(tmax=tmax, X0=X0, noiseDyn=noiseDyn)
# check branching
check = True
if branching:
check, Xsamples = _check_branching(X, Xsamples, restart)
if check:
real += 1
grnsim.write_data(
X[::step],
dir=writedir,
noiseObs=noiseObs,
append=(False if restart == 0 else True),
branching=branching,
nrRealizations=nrRealizations,
)
# append some zeros
if 'zeros' in writedir.name and real == 2:
grnsim.write_data(
noiseDyn * np.random.randn(500, 3),
dir=writedir,
noiseObs=noiseObs,
append=(False if restart == 0 else True),
branching=branching,
nrRealizations=nrRealizations,
)
if real >= nrRealizations:
break
logg.debug(
f'mean nr of offdiagonal edges {nrOffEdges_list.mean()} '
f'compared to total nr {grnsim.dim*(grnsim.dim-1)/2.}'
)
# more complex models
else:
initType = 'random'
dim = 11
step = 5
grnsim = GRNsim(dim=dim, initType=initType, model=model_key, params=params)
Xsamples = []
for sample in range(maxNrSamples):
# choose initial conditions such that branchings result
if initType == 'branch':
X0mean = grnsim.branch_init_model1(tmax)
if X0mean is None:
grnsim.set_coupl()
continue
real = 0
for restart in range(nrRealizations + maxRestarts):
if initType == 'branch':
# vary initial conditions around mean
X0 = X0mean + (0.05 * np.random.rand(dim) - 0.025 * np.ones(dim))
else:
# generate random initial conditions within [0.3,0.7]
X0 = 0.4 * np.random.rand(dim) + 0.3
if model_key in [5, 6]:
X0 = np.array([0.3, 0.3, 0, 0, 0, 0])
if model_key in [7, 8, 9, 10]:
X0 = 0.6 * np.random.rand(dim) + 0.2
X0[2:] = np.zeros(4)
if 'krumsiek11' in model_key:
X0 = np.zeros(dim)
X0[grnsim.varNames['Gata2']] = 0.8
X0[grnsim.varNames['Pu.1']] = 0.8
X0[grnsim.varNames['Cebpa']] = 0.8
X0 += 0.001 * np.random.randn(dim)
if False:
switch_gene = restart - (nrRealizations - dim)
if switch_gene >= dim:
break
X0[switch_gene] = 0 if X0[switch_gene] > 0.1 else 0.8
X = grnsim.sim_model(tmax, X0=X0, noiseDyn=noiseDyn, restart=restart)
# check branching
check = True
if branching:
check, Xsamples = _check_branching(X, Xsamples, restart)
if check:
real += 1
grnsim.write_data(
X[::step],
dir=writedir,
noiseObs=noiseObs,
append=(False if restart == 0 else True),
branching=branching,
nrRealizations=nrRealizations,
)
if real >= nrRealizations:
break
# load the last simulation file
filename = None
for filename in writedir.glob('sim*.txt'):
pass
logg.info(f'reading simulation results {filename}')
adata = readwrite._read(
filename, first_column_names=True, suppress_cache_warning=True
)
adata.uns['tmax_write'] = tmax / step
return adata
def write_data(
X,
dir=Path('sim/test'),
append=False,
header='',
varNames: Mapping[str, int] = MappingProxyType({}),
Adj=np.array([]),
Coupl=np.array([]),
boolRules: Mapping[str, str] = MappingProxyType({}),
model='',
modelType='',
invTimeStep=1,
):
"""Write simulated data.
Accounts for saving at the same time an ID
and a model file.
"""
dir.mkdir(parents=True, exist_ok=True)
# update file with sample ids
filename = dir / 'id.txt'
if filename.is_file():
with filename.open('r') as f:
id = int(f.read()) + (0 if append else 1)
else:
id = 0
with filename.open('w') as f:
id = '{:0>6}'.format(id)
f.write(str(id))
# dimension
dim = X.shape[1]
# write files with adjacancy and coupling matrices
if not append:
if False:
if Adj.size > 0:
# due to 'update formulation' of model, there
# is always a diagonal dependence
Adj = np.copy(Adj)
if 'hill' in model:
for i in range(Adj.shape[0]):
Adj[i, i] = 1
np.savetxt(dir + '/adj_' + id + '.txt', Adj, header=header, fmt='%d')
if Coupl.size > 0:
np.savetxt(
dir + '/coupl_' + id + '.txt', Coupl, header=header, fmt='%10.6f'
)
# write model file
if varNames and Coupl.size > 0:
with (dir / f'model_{id}.txt').open('w') as f:
f.write('# For each "variable = ", there must be a right hand side: \n')
f.write(
'# either an empty string or a python-style logical expression \n'
)
f.write('# involving variable names, "or", "and", "(", ")". \n')
f.write('# The order of equations matters! \n')
f.write('# \n')
f.write('# modelType = ' + modelType + '\n')
f.write('# invTimeStep = ' + str(invTimeStep) + '\n')
f.write('# \n')
f.write('# boolean update rules: \n')
for k, v in boolRules.items():
f.write(f'{k} = {v}\n')
# write coupling via names
f.write('# coupling list: \n')
names = list(varNames.keys())
for gp in range(dim):
for g in range(dim):
if np.abs(Coupl[gp, g]) > 1e-10:
f.write(
f'{names[gp]:10} '
f'{names[g]:10} '
f'{Coupl[gp, g]:10.3} \n'
)
# write simulated data
# the binary mode option in the following line is a fix for python 3
# variable names
if varNames:
header += f'{"it":>2} '
for v in varNames.keys():
header += f'{v:>7} '
with (dir / f'sim_{id}.txt').open('ab' if append else 'wb') as f:
np.savetxt(
f,
np.c_[np.arange(0, X.shape[0]), X],
header=('' if append else header),
fmt=['%4.f'] + ['%7.4f' for i in range(dim)],
)
class GRNsim:
"""
Simlulation of stochastic dynamic systems.
Main application: simulation of gene expression dynamics.
Also standard models are implemented.
"""
availModels = dict(
krumsiek11=(
'myeloid progenitor network, Krumsiek et al., PLOS One 6, e22649, '
'\n equations from Table 1 on page 3, '
'doi:10.1371/journal.pone.0022649 \n'
),
var='vector autoregressive process \n',
hill='process with hill kinetics \n',
)
writeOutputOnce = True
def __init__(
self,
dim=3,
model='ex0',
modelType='var',
initType='random',
show=False,
verbosity=0,
Coupl=None,
params=MappingProxyType({}),
):
"""
Params
------
model
either string for predefined model,
or directory with a model file and a couple matrix files
"""
self.dim = (
dim if Coupl is None else Coupl.shape[0]
) # number of nodes / dimension of system
self.maxnpar = 1 # maximal number of parents
self.p_indep = 0.4 # fraction of independent genes
self.model = model
self.modelType = modelType
self.initType = initType # string characterizing a specific initial
self.show = show
self.verbosity = verbosity
# checks
if initType not in ['branch', 'random']:
raise RuntimeError('initType must be either: branch, random')
if model not in self.availModels.keys():
message = 'model not among predefined models \n' # noqa: F841 # TODO FIX
# read from file
from .. import sim_models
model = Path(sim_models.__file__).parent / f'{model}.txt'
if not model.is_file():
raise RuntimeError(f'Model file {model} does not exist')
self.model = model
# set the coupling matrix, and with that the adjacency matrix
self.set_coupl(Coupl=Coupl)
# seed
np.random.seed(params['seed'])
# header
self.header = 'model = ' + self.model.name + ' \n'
# params
self.params = params
def sim_model(self, tmax, X0, noiseDyn=0, restart=0):
"""Simulate the model."""
self.noiseDyn = noiseDyn
#
X = np.zeros((tmax, self.dim))
X[0] = X0 + noiseDyn * np.random.randn(self.dim)
# run simulation
for t in range(1, tmax):
if self.modelType == 'hill':
Xdiff = self.Xdiff_hill(X[t - 1])
elif self.modelType == 'var':
Xdiff = self.Xdiff_var(X[t - 1])
else:
raise ValueError(f"Unknown modelType {self.modelType!r}")
X[t] = X[t - 1] + Xdiff
# add dynamic noise
X[t] += noiseDyn * np.random.randn(self.dim)
return X
def Xdiff_hill(self, Xt):
"""Build Xdiff from coefficients of boolean network,
that is, using self.boolCoeff. The employed functions
are Hill type activation and deactivation functions.
See Wittmann et al., BMC Syst. Biol. 3, 98 (2009),
doi:10.1186/1752-0509-3-98 for more details.
"""
verbosity = self.verbosity > 0 and self.writeOutputOnce
self.writeOutputOnce = False
Xdiff = np.zeros(self.dim)
for ichild, child in enumerate(self.pas.keys()):
# check whether list of parents is non-empty,
# otherwise continue
if self.pas[child]:
Xdiff_syn = 0 # synthesize term
if verbosity > 0:
Xdiff_syn_str = ''
else:
continue
# loop over all tuples for which the boolean update
# rule returns true, these are stored in self.boolCoeff
for ituple, tuple in enumerate(self.boolCoeff[child]):
Xdiff_syn_tuple = 1
Xdiff_syn_tuple_str = ''
for iv, v in enumerate(tuple):
iparent = self.varNames[self.pas[child][iv]]
x = Xt[iparent]
threshold = 0.1 / np.abs(self.Coupl[ichild, iparent])
Xdiff_syn_tuple *= (
self.hill_a(x, threshold) if v else self.hill_i(x, threshold)
)
if verbosity > 0:
Xdiff_syn_tuple_str += (
f'{"a" if v else "i"}'
f'({self.pas[child][iv]}, {threshold:.2})'
)
Xdiff_syn += Xdiff_syn_tuple
if verbosity > 0:
Xdiff_syn_str += ('+' if ituple != 0 else '') + Xdiff_syn_tuple_str
# multiply with degradation term
Xdiff[ichild] = self.invTimeStep * (Xdiff_syn - Xt[ichild])
if verbosity > 0:
Xdiff_str = (
f'{child}_{child}-{child} = '
f'{self.invTimeStep}*({Xdiff_syn_str}-{child})'
)
settings.m(0, Xdiff_str)
return Xdiff
def Xdiff_var(self, Xt, verbosity=0):
""""""
# subtract the current state
Xdiff = -Xt
# add the information from the past
Xdiff += np.dot(self.Coupl, Xt)
return Xdiff
def hill_a(self, x, threshold=0.1, power=2):
""" Activating hill function. """
x_pow = np.power(x, power)
threshold_pow = np.power(threshold, power)
return x_pow / (x_pow + threshold_pow)
def hill_i(self, x, threshold=0.1, power=2):
"""Inhibiting hill function.
Is equivalent to 1-hill_a(self,x,power,threshold).
"""
x_pow = np.power(x, power)
threshold_pow = np.power(threshold, power)
return threshold_pow / (x_pow + threshold_pow)
def nhill_a(self, x, threshold=0.1, power=2, ichild=2):
""" Normalized activating hill function. """
x_pow = np.power(x, power)
threshold_pow = np.power(threshold, power)
return x_pow / (x_pow + threshold_pow) * (1 + threshold_pow)
def nhill_i(self, x, threshold=0.1, power=2):
"""Normalized inhibiting hill function.
Is equivalent to 1-nhill_a(self,x,power,threshold).
"""
x_pow = np.power(x, power)
threshold_pow = np.power(threshold, power)
return threshold_pow / (x_pow + threshold_pow) * (1 - x_pow)
def read_model(self):
"""Read the model and the couplings from the model file."""
if self.verbosity > 0:
settings.m(0, 'reading model', self.model)
# read model
boolRules = []
for line in self.model.open():
if line.startswith('#') and 'modelType =' in line:
keyval = line
if '|' in line:
keyval, type = line.split('|')[:2]
self.modelType = keyval.split('=')[1].strip()
if line.startswith('#') and 'invTimeStep =' in line:
keyval = line
if '|' in line:
keyval, type = line.split('|')[:2]
self.invTimeStep = float(keyval.split('=')[1].strip())
if not line.startswith('#'):
boolRules.append([s.strip() for s in line.split('=')])
if line.startswith('# coupling list:'):
break
self.dim = len(boolRules)
self.boolRules = dict(boolRules)
self.varNames = {s: i for i, s in enumerate(self.boolRules.keys())}
names = self.varNames
# read couplings via names
self.Coupl = np.zeros((self.dim, self.dim))
boolContinue = True
for (
line
) in self.model.open(): # open(self.model.replace('/model','/couplList')):
if line.startswith('# coupling list:'):
boolContinue = False
if boolContinue:
continue
if not line.startswith('#'):
gps, gs, val = line.strip().split()
self.Coupl[int(names[gps]), int(names[gs])] = float(val)
# adjancecy matrices
self.Adj_signed = np.sign(self.Coupl)
self.Adj = np.abs(np.array(self.Adj_signed))
# build bool coefficients (necessary for odefy type
# version of the discrete model)
self.build_boolCoeff()
def set_coupl(self, Coupl=None):
"""Construct the coupling matrix (and adjacancy matrix) from predefined models
or via sampling.
"""
self.varNames = {str(i): i for i in range(self.dim)}
if self.model not in self.availModels.keys() and Coupl is None:
self.read_model()
elif 'var' in self.model.name:
# vector auto regressive process
self.Coupl = Coupl
self.boolRules = {s: '' for s in self.varNames.keys()}
names = list(self.varNames.keys())
for gp in range(self.dim):
pas = []
for g in range(self.dim):
if np.abs(self.Coupl[gp, g] > 1e-10):
pas.append(names[g])
self.boolRules[names[gp]] = ''.join(
pas[:1] + [' or ' + pa for pa in pas[1:]]
)
self.Adj_signed = np.sign(Coupl)
elif self.model in ['6', '7', '8', '9', '10']:
self.Adj_signed = np.zeros((self.dim, self.dim))
n_sinknodes = 2
# sinknodes = np.random.choice(np.arange(0,self.dim),
# size=n_sinknodes,replace=False)
sinknodes = np.array([0, 1])
# assume sinknodes have feeback
self.Adj_signed[sinknodes, sinknodes] = np.ones(n_sinknodes)
# # allow negative feedback
# if self.model == 10:
# plus_minus = (np.random.randint(0,2,n_sinknodes) - 0.5)*2
# self.Adj_signed[sinknodes,sinknodes] = plus_minus
leafnodes = np.array(sinknodes)
availnodes = np.array([i for i in range(self.dim) if i not in sinknodes])
# settings.m(0,leafnodes,availnodes)
while len(availnodes) != 0:
# parent
parent_idx = np.random.choice(
np.arange(0, len(leafnodes)), size=1, replace=False
)
parent = leafnodes[parent_idx]
# children
children_ids = np.random.choice(
np.arange(0, len(availnodes)), size=2, replace=False
)
children = availnodes[children_ids]
settings.m(0, parent, children)
self.Adj_signed[children, parent] = np.ones(2)
if self.model == 8:
self.Adj_signed[children[0], children[1]] = -1
if self.model in [9, 10]:
self.Adj_signed[children[0], children[1]] = -1
self.Adj_signed[children[1], children[0]] = -1
# update leafnodes
leafnodes = np.delete(leafnodes, parent_idx)
leafnodes = np.append(leafnodes, children)
# update availnodes
availnodes = np.delete(availnodes, children_ids)
# settings.m(0,availnodes)
# settings.m(0,leafnodes)
# settings.m(0,self.Adj)
# settings.m(0,'-')
else:
self.Adj = np.zeros((self.dim, self.dim))
for i in range(self.dim):
indep = np.random.binomial(1, self.p_indep)
if indep == 0:
# this number includes parents (other variables)
# and the variable itself, therefore its
# self.maxnpar+2 in the following line
nr = np.random.randint(1, self.maxnpar + 2)
j_par = np.random.choice(
np.arange(0, self.dim), size=nr, replace=False
)
self.Adj[i, j_par] = 1
else:
self.Adj[i, i] = 1
#
self.Adj = np.abs(np.array(self.Adj_signed))
# settings.m(0,self.Adj)
def set_coupl_old(self):
"""Using the adjacency matrix, sample a coupling matrix."""
if self.model == 'krumsiek11' or self.model == 'var':
# we already built the coupling matrix in set_coupl20()
return
self.Coupl = np.zeros((self.dim, self.dim))
for i in range(self.Adj.shape[0]):
for j, a in enumerate(self.Adj[i]):
# if there is a 1 in Adj, specify co and antiregulation
# and strength of regulation
if a != 0:
co_anti = np.random.randint(2)
# set a lower bound for the coupling parameters
# they ought not to be smaller than 0.1
# and not be larger than 0.4
self.Coupl[i, j] = 0.0 * np.random.rand() + 0.1
# set sign for coupling
if co_anti == 1:
self.Coupl[i, j] *= -1
# enforce certain requirements on models
if self.model == 1:
self.coupl_model1()
elif self.model == 5:
self.coupl_model5()
elif self.model in [6, 7]:
self.coupl_model6()
elif self.model in [8, 9, 10]:
self.coupl_model8()
# output
if self.verbosity > 1:
settings.m(0, self.Coupl)
def coupl_model1(self):
"""In model 1, we want enforce the following signs
on the couplings. Model 2 has the same couplings
but arbitrary signs.
"""
self.Coupl[0, 0] = np.abs(self.Coupl[0, 0])
self.Coupl[0, 1] = -np.abs(self.Coupl[0, 1])
self.Coupl[1, 1] = np.abs(self.Coupl[1, 1])
def coupl_model5(self):
"""Toggle switch."""
self.Coupl = -0.2 * self.Adj
self.Coupl[2, 0] *= -1
self.Coupl[3, 0] *= -1
self.Coupl[4, 1] *= -1
self.Coupl[5, 1] *= -1
def coupl_model6(self):
"""Variant of toggle switch."""
self.Coupl = 0.5 * self.Adj_signed
def coupl_model8(self):
"""Variant of toggle switch."""
self.Coupl = 0.5 * self.Adj_signed
# reduce the value of the coupling of the repressing genes
# otherwise completely unstable solutions are obtained
for x in np.nditer(self.Coupl, op_flags=['readwrite']):
if x < -1e-6:
x[...] = -0.2
def coupl_model_krumsiek11(self):
"""Variant of toggle switch."""
self.Coupl = self.Adj_signed
def sim_model_back_help(self, Xt, Xt1):
"""Yields zero when solved for X_t
given X_{t+1}.
"""
return -Xt1 + Xt + self.Xdiff(Xt)
def sim_model_backwards(self, tmax, X0):
"""Simulate the model backwards in time."""
X = np.zeros((tmax, self.dim))
X[tmax - 1] = X0
for t in range(tmax - 2, -1, -1):
sol = sp.optimize.root(
self.sim_model_back_help, X[t + 1], args=(X[t + 1]), method='hybr'
)
X[t] = sol.x
return X
def branch_init_model1(self, tmax=100):
# check whether we can define trajectories
Xfix = np.array([self.Coupl[0, 1] / self.Coupl[0, 0], 1])
if Xfix[0] > 0.97 or Xfix[0] < 0.03:
settings.m(
0,
'... either no fixed point in [0,1]^2! \n'
+ ' or fixed point is too close to bounds',
)
return None
#
XbackUp = self.sim_model_backwards(
tmax=tmax / 3, X0=Xfix + np.array([0.02, -0.02])
)
XbackDo = self.sim_model_backwards(
tmax=tmax / 3, X0=Xfix + np.array([-0.02, -0.02])
)
#
Xup = self.sim_model(tmax=tmax, X0=XbackUp[0])
Xdo = self.sim_model(tmax=tmax, X0=XbackDo[0])
# compute mean
X0mean = 0.5 * (Xup[0] + Xdo[0])
#
if np.min(X0mean) < 0.025 or np.max(X0mean) > 0.975:
settings.m(0, '... initial point is too close to bounds')
return None
if self.show and self.verbosity > 1:
pl.figure() # noqa: F821 TODO Fix me
pl.plot(XbackUp[:, 0], '.b', XbackUp[:, 1], '.g') # noqa: F821 TODO Fix me
pl.plot(XbackDo[:, 0], '.b', XbackDo[:, 1], '.g') # noqa: F821 TODO Fix me
pl.plot(Xup[:, 0], 'b', Xup[:, 1], 'g') # noqa: F821 TODO Fix me
pl.plot(Xdo[:, 0], 'b', Xdo[:, 1], 'g') # noqa: F821 TODO Fix me
return X0mean
def parents_from_boolRule(self, rule):
"""Determine parents based on boolean updaterule.
Returns list of parents.
"""
rule_pa = (
rule.replace('(', '')
.replace(')', '')
.replace('or', '')
.replace('and', '')
.replace('not', '')
)
rule_pa = rule_pa.split()
# if there are no parents, continue
if not rule_pa:
return []
# check whether these are meaningful parents
pa_old = []
pa_delete = []
for pa in rule_pa:
if pa not in self.varNames.keys():
settings.m(0, 'list of available variables:')
settings.m(0, list(self.varNames.keys()))
message = (
'processing of rule "'
+ rule
+ ' yields an invalid parent: '
+ pa
+ ' | check whether the syntax is correct: \n'
+ 'only python expressions "(",")","or","and","not" '
+ 'are allowed, variable names and expressions have to be separated '
+ 'by white spaces'
)
raise ValueError(message)
if pa in pa_old:
pa_delete.append(pa)
for pa in pa_delete:
rule_pa.remove(pa)
return rule_pa
def build_boolCoeff(self):
"""Compute coefficients for tuple space."""
# coefficients for hill functions from boolean update rules
self.boolCoeff = {s: [] for s in self.varNames.keys()}
# parents
self.pas = {s: [] for s in self.varNames.keys()}
#
for key in self.boolRules.keys():
rule = self.boolRules[key]
self.pas[key] = self.parents_from_boolRule(rule)
pasIndices = [self.varNames[pa] for pa in self.pas[key]]
# check whether there are coupling matrix entries for each parent
for g in range(self.dim):
if g in pasIndices:
if np.abs(self.Coupl[self.varNames[key], g]) < 1e-10:
raise ValueError(f'specify coupling value for {key} <- {g}')
else:
if np.abs(self.Coupl[self.varNames[key], g]) > 1e-10:
raise ValueError(
'there should be no coupling value for ' f'{key} <- {g}'
)
if self.verbosity > 1:
settings.m(0, '...' + key)
settings.m(0, rule)
settings.m(0, rule_pa) # noqa: F821
# now evaluate coefficients
for tuple in list(
itertools.product([False, True], repeat=len(self.pas[key]))
):
if self.process_rule(rule, self.pas[key], tuple):
self.boolCoeff[key].append(tuple)
#
if self.verbosity > 1:
settings.m(0, self.boolCoeff[key])
def process_rule(self, rule, pa, tuple):
"""Process a string that denotes a boolean rule."""
for i, v in enumerate(tuple):
rule = rule.replace(pa[i], str(v))
return eval(rule)
def write_data(
self,
X,
dir=Path('sim/test'),
noiseObs=0.0,
append=False,
branching=False,
nrRealizations=1,
seed=0,
):
header = self.header
tmax = int(X.shape[0])
header += 'tmax = ' + str(tmax) + '\n'
header += 'branching = ' + str(branching) + '\n'
header += 'nrRealizations = ' + str(nrRealizations) + '\n'
header += 'noiseObs = ' + str(noiseObs) + '\n'
header += 'noiseDyn = ' + str(self.noiseDyn) + '\n'
header += 'seed = ' + str(seed) + '\n'
# add observational noise
X += noiseObs * np.random.randn(tmax, self.dim)
# call helper function
write_data(
X,
dir,
append,
header,
varNames=self.varNames,
Adj=self.Adj,
Coupl=self.Coupl,
model=self.model,
modelType=self.modelType,
boolRules=self.boolRules,
invTimeStep=self.invTimeStep,
)
def _check_branching(
X: np.ndarray, Xsamples: np.ndarray, restart: int, threshold: float = 0.25
) -> Tuple[bool, List[np.ndarray]]:
"""\
Check whether time series branches.
Parameters
----------
X
current time series data.
Xsamples
list of previous branching samples.
restart
counts number of restart trials.
threshold
sets threshold for attractor identification.
Returns
-------
check
true if branching realization
Xsamples
updated list
"""
check = True
Xsamples = list(Xsamples)
if restart == 0:
Xsamples.append(X)
else:
for Xcompare in Xsamples:
Xtmax_diff = np.absolute(X[-1, :] - Xcompare[-1, :])
# If the second largest element is smaller than threshold
# set check to False, i.e. at least two elements
# need to change in order to have a branching.
# If we observe all parameters of the system,
# a new attractor state must involve changes in two
# variables.
if np.partition(Xtmax_diff, -2)[-2] < threshold:
check = False
if check:
Xsamples.append(X)
logg.debug(f'realization {restart}: {"" if check else "no"} new branch')
return check, Xsamples
def check_nocycles(Adj: np.ndarray, verbosity: int = 2) -> bool:
"""\
Checks that there are no cycles in graph described by adjacancy matrix.
Parameters
----------
Adj
adjancancy matrix of dimension (dim, dim)
Returns
-------
True if there is no cycle, False otherwise.
"""
dim = Adj.shape[0]
for g in range(dim):
v = np.zeros(dim)
v[g] = 1
for i in range(dim):
v = Adj.dot(v)
if v[g] > 1e-10:
if verbosity > 2:
settings.m(0, Adj)
settings.m(
0,
'contains a cycle of length',
i + 1,
'starting from node',
g,
'-> reject',
)
return False
return True
def sample_coupling_matrix(
dim: int = 3, connectivity: float = 0.5
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, int]:
"""\
Sample coupling matrix.
Checks that returned graphs contain no self-cycles.
Parameters
----------
dim
dimension of coupling matrix.
connectivity
fraction of connectivity, fully connected means 1.,
not-connected means 0, in the case of fully connected, one has
dim*(dim-1)/2 edges in the graph.
Returns
-------
coupl
coupling matrix
adj
adjancancy matrix
adj_signed
signed adjacancy matrix
n_edges
Number of edges
"""
max_trial = 10
check = False
for trial in range(max_trial):
# random topology for a given connectivity / edge density
Coupl = np.zeros((dim, dim))
n_edges = 0
for gp in range(dim):
for g in range(dim):
if gp == g:
continue
# need to have the factor 0.5, otherwise
# connectivity=1 would lead to dim*(dim-1) edges
if np.random.rand() < 0.5 * connectivity:
Coupl[gp, g] = 0.7
n_edges += 1
# obtain adjacancy matrix
Adj_signed = np.zeros((dim, dim), dtype='int_')
Adj_signed = np.sign(Coupl)
Adj = np.abs(Adj_signed)
# check for cycles and whether there is at least one edge
if check_nocycles(Adj) and n_edges > 0:
check = True
break
if not check:
raise ValueError(
'did not find graph without cycles after' f'{max_trial} trials'
)
return Coupl, Adj, Adj_signed, n_edges
class StaticCauseEffect:
"""
Simulates static data to investigate structure learning.
"""
availModels = dict(
line='y = αx \n',
noise='y = noise \n',
absline='y = |x| \n',
parabola='y = αx² \n',
sawtooth='y = x - |x| \n',
tanh='y = tanh(x) \n',
combi='combinatorial regulation \n',
)
def __init__(self):
# define a set of available functions
self.funcs = dict(
line=lambda x: x,
noise=lambda x: 0,
absline=np.abs,
parabola=lambda x: x ** 2,
sawtooth=lambda x: 0.5 * x - np.floor(0.5 * x),
tanh=lambda x: np.tanh(2 * x),
)
def sim_givenAdj(self, Adj: np.ndarray, model='line'):
"""\
Simulate data given only an adjacancy matrix and a model.
The model is a bivariate funtional dependence. The adjacancy matrix
needs to be acyclic.
Parameters
----------
Adj
adjacancy matrix of shape (dim,dim).
Returns
-------
Data array of shape (n_samples,dim).
"""
# nice examples
examples = [ # noqa: F841 TODO We are really unsure whether this is needed.
dict(
func='sawtooth',
gdist='uniform',
sigma_glob=1.8,
sigma_noise=0.1,
)
]
# nr of samples
n_samples = 100
# noise
sigma_glob = 1.8
sigma_noise = 0.4
# coupling function / model
func = self.funcs[model]
# glob distribution
sourcedist = 'uniform'
# loop over source nodes
dim = Adj.shape[0]
X = np.zeros((n_samples, dim))
# source nodes have no parents themselves
nrpar = 0
children = list(range(dim))
parents = []
for gp in range(dim):
if Adj[gp, :].sum() == nrpar:
if sourcedist == 'gaussian':
X[:, gp] = np.random.normal(0, sigma_glob, n_samples)
if sourcedist == 'uniform':
X[:, gp] = np.random.uniform(-sigma_glob, sigma_glob, n_samples)
parents.append(gp)
children.remove(gp)
# all of the following guarantees for 3 dim, that we generate the data
# in the correct sequence
# then compute all nodes that have 1 parent, then those with 2 parents
children_sorted = []
nrchildren_par = np.zeros(dim)
nrchildren_par[0] = len(parents)
for nrpar in range(1, dim):
# loop over child nodes
for gp in children:
if Adj[gp, :].sum() == nrpar:
children_sorted.append(gp)
nrchildren_par[nrpar] += 1
# if there is more than a child with a single parent
# order these children (there are two in three dim)
# by distance to the source/parent
if nrchildren_par[1] > 1:
if Adj[children_sorted[0], parents[0]] == 0:
help = children_sorted[0]
children_sorted[0] = children_sorted[1]
children_sorted[1] = help
for gp in children_sorted:
for g in range(dim):
if Adj[gp, g] > 0:
X[:, gp] += 1.0 / Adj[gp, :].sum() * func(X[:, g])
X[:, gp] += np.random.normal(0, sigma_noise, n_samples)
# fig = pl.figure()
# fig.add_subplot(311)
# pl.plot(X[:,0],X[:,1],'.',mec='white')
# fig.add_subplot(312)
# pl.plot(X[:,1],X[:,2],'.',mec='white')
# fig.add_subplot(313)
# pl.plot(X[:,2],X[:,0],'.',mec='white')
# pl.show()
return X
def sim_combi(self):
"""Simulate data to model combi regulation."""
n_samples = 500
sigma_glob = 1.8
X = np.zeros((n_samples, 3))
X[:, 0] = np.random.uniform(-sigma_glob, sigma_glob, n_samples)
X[:, 1] = np.random.uniform(-sigma_glob, sigma_glob, n_samples)
func = self.funcs['tanh']
# XOR type
# X[:,2] = (func(X[:,0])*sp.stats.norm.pdf(X[:,1],0,0.2)
# + func(X[:,1])*sp.stats.norm.pdf(X[:,0],0,0.2))
# AND type / diagonal
# X[:,2] = (func(X[:,0]+X[:,1])*sp.stats.norm.pdf(X[:,1]-X[:,0],0,0.2))
# AND type / horizontal
X[:, 2] = func(X[:, 0]) * sp.stats.norm.cdf(X[:, 1], 1, 0.2)
pl.scatter( # noqa: F821 TODO Fix me
X[:, 0], X[:, 1], c=X[:, 2], edgecolor='face'
)
pl.show() # noqa: F821 TODO Fix me
pl.plot(X[:, 1], X[:, 2], '.') # noqa: F821 TODO Fix me
pl.show() # noqa: F821 TODO Fix me
return X
def sample_static_data(model, dir, verbosity=0):
# fraction of connectivity as compared to fully connected
# in one direction, which amounts to dim*(dim-1)/2 edges
connectivity = 0.8
dim = 3
n_Coupls = 50
model = model.replace('static-', '')
np.random.seed(0)
if model != 'combi':
n_edges = np.zeros(n_Coupls)
for icoupl in range(n_Coupls):
Coupl, Adj, Adj_signed, n_e = sample_coupling_matrix(dim, connectivity)
if verbosity > 1:
settings.m(0, icoupl)
settings.m(0, Adj)
n_edges[icoupl] = n_e
# sample data
X = StaticCauseEffect().sim_givenAdj(Adj, model)
write_data(X, dir, Adj=Adj)
settings.m(0, 'mean edge number:', n_edges.mean())
else:
X = StaticCauseEffect().sim_combi()
Adj = np.zeros((3, 3))
Adj[2, 0] = Adj[2, 1] = 0
write_data(X, dir, Adj=Adj)
if __name__ == '__main__':
import argparse
# epilog = (' 1: 2dim, causal direction X_1 -> X_0, constraint signs\n'
# + ' 2: 2dim, causal direction X_1 -> X_0, arbitrary signs\n'
# + ' 3: 2dim, causal direction X_1 <-> X_0, arbitrary signs\n'
# + ' 4: 2dim, mix of model 2 and 3\n'
# + ' 5: 6dim double toggle switch\n'
# + ' 6: two independent evolutions without repression, sync.\n'
# + ' 7: two independent evolutions without repression, random init\n'
# + ' 8: two independent evolutions directed repression, random init\n'
# + ' 9: two independent evolutions mutual repression, random init\n'
# + ' 10: two indep. evol., diff. self-loops possible, mut. repr., rand init\n')
epilog = ''
for k, v in StaticCauseEffect.availModels.items():
epilog += ' static-' + k + ': ' + v
for k, v in GRNsim.availModels.items():
epilog += ' ' + k + ': ' + v
# command line options
p = argparse.ArgumentParser(
description=(
'Simulate stochastic discrete-time dynamical systems,\n'
'in particular gene regulatory networks.'
),
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=(
' MODEL: specify one of the following models, or one of \n'
' the filenames (without ".txt") in the directory "models" \n' + epilog
),
)
aa = p.add_argument
dir_arg = aa(
'--dir',
required=True,
type=str,
default='',
help=(
'specify directory to store data, '
+ ' must start with "sim/MODEL_...", see possible values for MODEL below '
),
)
aa('--show', action='store_true', help='show plots')
aa(
'--verbosity',
type=int,
default=0,
help='specify integer > 0 to get more output [default 0]',
)
args = p.parse_args()
# run checks on output directory
dir = Path(args.dir)
if not dir.resolve().parent.name == 'sim':
raise argparse.ArgumentError(
dir_arg,
"The parent directory of the --dir argument needs to be named 'sim'",
)
else:
model = dir.name.split('_')[0]
settings.m(0, f'...model is: {model!r}')
if dir.is_dir() and 'test' not in str(dir):
message = (
f'directory {dir} already exists, '
'remove it and continue? [y/n, press enter]'
)
if str(input(message)) != 'y':
settings.m(0, ' ...quit program execution')
sys.exit()
else:
settings.m(0, ' ...removing directory and continuing...')
shutil.rmtree(dir)
settings.m(0, model)
settings.m(0, dir)
# sample data
if 'static' in model:
sample_static_data(model=model, dir=dir, verbosity=args.verbosity)
else:
sample_dynamic_data(model=model, dir=dir)
| {
"repo_name": "theislab/scanpy",
"path": "scanpy/tools/_sim.py",
"copies": "1",
"size": "46870",
"license": "bsd-3-clause",
"hash": 673725933018243300,
"line_mean": 35.2467130704,
"line_max": 100,
"alpha_frac": 0.5042353895,
"autogenerated": false,
"ratio": 3.7814264966919477,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9784009137050903,
"avg_score": 0.000330549828208932,
"num_lines": 1293
} |
__author__ = 'Alfi'
import threading
import time
class pBuffer():
def __init__(self,nom):
self.n = nom
def addProc(self,proc):
self.bfr.append(proc)
if proc.p > self.p:
self.p = proc.p
proc.run()
class MyThread(threading.Thread):
def __init__(self,name,priority,duree,delay):
threading.Thread.__init__(self)
self.n = name
self.p = priority
self.d = duree
self.dl = delay
def getDuree(self):
return self.d
def run(self):
if self.p > activeProceses.p:
activeProceses.p = self.p
condition.acquire()
condition.notify_all()
print("Starting "+self.n)
t = time.time() + self.d
while t > time.time():
print(self.n+" is working!")
time.sleep(self.dl)
condition.release()
else:
condition.wait()
activeProceses = pBuffer()
threadLock = threading.RLock()
condition = threading.Condition(threadLock)
thread1 = MyThread("T1",1,7,1)
thread2 = MyThread("T2",2,5,1)
activeProceses.append(thread1)
activeProceses.append(thread2)
thread1.start()
thread2.start()
#thread1.resume(2)
| {
"repo_name": "sebid/se-irq-dev",
"path": "alf/test.py",
"copies": "1",
"size": "1244",
"license": "mit",
"hash": 2114954989293728000,
"line_mean": 19.393442623,
"line_max": 49,
"alpha_frac": 0.5618971061,
"autogenerated": false,
"ratio": 3.4175824175824174,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44794795236824175,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Alfi'
import threading
import time
class ThreadQueue():
def __init__(self):
self.list = []
self.event = threading.Event()
self.t
def addThread(self,t):
self.list.append(t)
self.list.sort()
def getWaitingThreadPriority(self):
return self.list[0].getPriorite()
def freeThread(self):
return self.list.pop(0)
class ThreadStack():
def __init__(self):
self.list = []
def pop(self):
return self.list.pop(len(self.list))
def peek(self):
return self.list[len(self.list)]
def getRuningThreadPriority(self):
return self.list(len(self.list))
def addToStack(self,t):
self.list[len(self.list)].sleep(t.getTime())
self.list.append(t)
self.list(len(self.list)).lockTh()
class MyThread(threading.Thread):
def __init__(self,name,priorite,timp,thread_lock):
self.timp = timp
self.prioritate = priorite
self.name = name
self.thread_lock = thread_lock
def getPriorite(self):
return self.prioritate
def lockTh(self):
self.thread_lock.acquire()
def releaseTh(self):
self.thread_lock.release()
def __cmp__(self, other):
if hasattr(other, 'prioritate'):
return self.prioritate.__cmp__(other.prioritate)
def __repr__(self):
return '{}: {}'.format(self.__class__.__name__,self.prioritate)
def getTime(self):
return self.timp
def run(self):
t = time.time() + self.timp
print "Starting thread "+self.name
while t > time.time():
print "Thread "+ self.name+" working!"
time.sleep(0.5)
th_queue = ThreadQueue()
th_stack = ThreadStack()
th_lock = threading.Lock()
th1 = MyThread("1",3,5)
th2 = MyThread("2",1,7)
th3 = MyThread("3",2,4)
th4 = MyThread("4",4,6)
th_queue.addThread(th1)
th_queue.addThread(th2)
th_queue.addThread(th3)
th_queue.addThread(th4)
while True :
# cod pentru adaugare de thraduri in queue
#facem readerul intr-un thread separat
if(th_queue.getWaitingThreadPriority() > th_stack.getRuningThreadPriority()):
th_stack.addToStack(th_queue.freeThread())
t = th_stack.peek()
t.run()
| {
"repo_name": "sebid/se-irq-dev",
"path": "alf/test5.py",
"copies": "1",
"size": "2269",
"license": "mit",
"hash": -7550489502275334000,
"line_mean": 21.2450980392,
"line_max": 81,
"alpha_frac": 0.6024680476,
"autogenerated": false,
"ratio": 3.2694524495677233,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43719204971677234,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Alfi'
import threading
import time
<<<<<<< HEAD
class ThreadQueue():
=======
class ThreadController():
>>>>>>> f89e10fff1e145a8723f0a8a708903dd8cf6d27e
def __init__(self):
self.list = []
self.event = threading.Event()
self.t
def addThread(self,t):
self.list.append(t)
self.list.sort()
<<<<<<< HEAD
def getWaitingThreadPriority(self):
return self.list[0].getPriorite()
def freeThread(self):
return self.list.pop(0)
class ThreadStack():
def __init__(self):
self.list = []
def pop(self):
return self.list.pop(len(self.list))
def peek(self):
return self.list[len(self.list)]
def getRuningThreadPriority(self):
return self.list(len(self.list))
def addToStack(self,t):
self.list[len(self.list)].sleep(t.getTime())
self.list.append(t)
self.list(len(self.list)).lockTh()
=======
>>>>>>> f89e10fff1e145a8723f0a8a708903dd8cf6d27e
class MyThread(threading.Thread):
def __init__(self,name,priorite,timp,thread_lock):
self.timp = timp
self.prioritate = priorite
self.name = name
self.thread_lock = thread_lock
def getPriorite(self):
return self.prioritate
def lockTh(self):
self.thread_lock.acquire()
def releaseTh(self):
self.thread_lock.release()
def __cmp__(self, other):
if hasattr(other, 'prioritate'):
return self.prioritate.__cmp__(other.prioritate)
def __repr__(self):
return '{}: {}'.format(self.__class__.__name__,self.prioritate)
def getTime(self):
return self.timp
def run(self):
t = time.time() + self.timp
print "Starting thread "+self.name
while t > time.time():
print "Thread "+ self.name+" working!"
time.sleep(0.5)
<<<<<<< HEAD
th_queue = ThreadQueue()
th_stack = ThreadStack()
th_lock = threading.Lock()
th1 = MyThread("1",3,5)
th2 = MyThread("2",1,7)
th3 = MyThread("3",2,4)
th4 = MyThread("4",4,6)
th_queue.addThread(th1)
th_queue.addThread(th2)
th_queue.addThread(th3)
th_queue.addThread(th4)
while True :
# cod pentru adaugare de thraduri in queue
#facem readerul intr-un thread separat
if(th_queue.getWaitingThreadPriority() > th_stack.getRuningThreadPriority()):
th_stack.addToStack(th_queue.freeThread())
t = th_stack.peek()
t.run()
=======
th_controler = ThreadController()
ev = threading.Event()
th1 = MyThread("1",ev,3,5)
th2 = MyThread("2",ev,1,7)
th3 = MyThread("3",ev,2,4)
th_controler.addThread(th1)
th_controler.addThread(th2)
th_controler.addThread(th3)
>>>>>>> f89e10fff1e145a8723f0a8a708903dd8cf6d27e
| {
"repo_name": "sebid/se-irq-dev",
"path": "alf/test4.py",
"copies": "1",
"size": "2728",
"license": "mit",
"hash": -2028933883674224000,
"line_mean": 21.5454545455,
"line_max": 81,
"alpha_frac": 0.6158357771,
"autogenerated": false,
"ratio": 3.004405286343612,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4120241063443612,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Alfredo Saglimbeni'
import re
import uuid
from django.forms.widgets import MultiWidget , to_current_timezone, DateTimeInput
from django.utils.translation import ugettext as _
from datetime import datetime
from django.utils import translation
I18N = """
$.fn.datetimepicker.dates['en'] = {
days: %s,
daysShort: %s,
daysMin: %s,
months: %s,
monthsShort: %s,
meridiem: %s,
suffix: %s,
today: %s
};
"""
datetimepicker_options = """
format : '%s',
startDate : '%s',
endDate : '%s',
weekStart : %s,
daysOfWeekDisabled : %s,
autoclose : %s,
startView : %s,
minView : %s,
maxView : %s,
todayBtn : %s,
todayHighlight : %s,
minuteStep : %s,
pickerPosition : '%s',
showMeridian : %s,
language : '%s',
"""
dateConversion = {
'P' : '%p',
'ss' : '%S',
'ii' : '%M',
'hh' : '%H',
'HH' : '%I',
'dd' : '%d',
'mm' : '%m',
#'M' : '%b',
#'MM' : '%B',
'yy' : '%y',
'yyyy' : '%Y',
}
class DateTimeWidget(MultiWidget):
def __init__(self, attrs=None, options = {}):
if attrs is None:
attrs = {'readonly':''}
self.option = ()
self.option += (options.get('format','dd/mm/yyyy hh:ii'),)
self.option += (options.get('startDate',''),)
self.option += (options.get('endDate',''),)
self.option += (options.get('weekStart','0'),)
self.option += (options.get('daysOfWeekDisabled','[]'),)
self.option += (options.get('autoclose','false'),)
self.option += (options.get('startView','2'),)
self.option += (options.get('minView','0'),)
self.option += (options.get('maxView','4'),)
self.option += (options.get('todayBtn','false'),)
self.option += (options.get('todayHighlight','false'),)
self.option += (options.get('minuteStep','5'),)
self.option += (options.get('pickerPosition','bottom-right'),)
self.option += (options.get('showMeridian','false'),)
pattern = re.compile(r'\b(' + '|'.join(dateConversion.keys()) + r')\b')
self.dataTimeFormat = self.option[0]
self.format = pattern.sub(lambda x: dateConversion[x.group()], self.option[0])
widgets = (DateTimeInput(attrs=attrs,format=self.format),)
super(DateTimeWidget, self).__init__(widgets, attrs)
def value_from_datadict(self, data, files, name):
date_time = [
widget.value_from_datadict(data, files, name + '_%s' % i)
for i, widget in enumerate(self.widgets)]
try:
D = to_current_timezone(datetime.strptime(date_time[0], self.format))
except ValueError:
return ''
else:
return str(D)
def decompress(self, value):
if value:
value = to_current_timezone(value)
return (value,)
return (None,)
def format_output(self, rendered_widgets):
"""
Given a list of rendered widgets (as strings), it inserts an HTML
linebreak between them.
Returns a Unicode string representing the HTML for the whole lot.
"""
WEEKDAYS = [ _("Sunday"), _("Monday"), _("Tuesday"), _("Wednesday"), _("Thursday"), _("Friday"), _("Saturday"), _("Sunday")]
WEEKDAYS_ABBR = [_("Sun"), _("Mon"), _("Tue"), _("Wed"), _("Thu"), _("Fri"), _("Sat"), _("Sun")]
WEEKDAYS_MIN = [_("Su"), _("Mo"), _("Tu"), _("We"), _("Th"), _("Fr"), _("Sa"), _("Su")]
MONTHS = [_("January"), _("February"), _("March"), _("April"), _("May"), _("June"), _("July"), _("August"), _("September"), _("October"), _("November"), _("December")]
MONTHS_ABBR = [_("Jan"), _("Feb"), _("Mar"), _("Apr"), _("May"), _("Jun"), _("Jul"), _("Aug"), _("Sep"), _("Oct"), _("Nov"), _("Dec")]
MERDIEM = [_("am"), _("pm")]
SUFFIX = [_("st"), _("nd"), _("rd"), _("th")]
TODAY = "'%s'"%_("Today")
js_i18n = I18N % (WEEKDAYS,WEEKDAYS_ABBR, WEEKDAYS_MIN, MONTHS, MONTHS_ABBR, MERDIEM, SUFFIX, TODAY)
options = self.option+(translation.get_language(),)
js_options = datetimepicker_options % options
id = uuid.uuid4().hex
return '<div id="%s" class="input-append date form_datetime">'\
'%s'\
'<span class="add-on"><i class="icon-th"></i></span>'\
'</div>'\
'<script type="text/javascript">'\
'%s$("#%s").datetimepicker({%s});'\
'</script> ' % ( id, rendered_widgets[0], js_i18n.replace(', u\'',', \'').replace('[u', '['), id , js_options)
class Media:
css = {
'all' : ('css/datetimepicker.css',)
}
js = (
"js/bootstrap-datetimepicker.js",
) | {
"repo_name": "jimr/django-datetime-widget",
"path": "datetimewidget/widgets.py",
"copies": "1",
"size": "4699",
"license": "bsd-3-clause",
"hash": 8229695850117963000,
"line_mean": 33.0579710145,
"line_max": 175,
"alpha_frac": 0.5201106618,
"autogenerated": false,
"ratio": 3.2906162464985993,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43107269082985994,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Alfredo Saglimbeni'
from distutils.core import setup
from setuptools import setup, find_packages
setup(name = "clean-image-crop-uploader",
version = "0.2.2",
description = "Clean Image Crop Uploader (CICU) provides AJAX file upload and image CROP functionalities for ImageFields with a simple widget replacement in the form. It use Modal from twitter-bootstrap.",
long_description=open('README.rst').read(),
author = "asagli",
author_email = "alfredo.saglimbeni@gmail.com",
url = "",
packages = find_packages(),
include_package_data=True,
install_requires = [
'PIL==1.1.7','django>=1.4.3','south>=0.7.6'
],
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| {
"repo_name": "hobarrera/clean-image-crop-uploader",
"path": "setup.py",
"copies": "2",
"size": "1026",
"license": "bsd-3-clause",
"hash": 3553599284615874000,
"line_mean": 37,
"line_max": 209,
"alpha_frac": 0.6403508772,
"autogenerated": false,
"ratio": 3.857142857142857,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5497493734342858,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Alfredo Saglimbeni'
from distutils.core import setup
from setuptools import setup, find_packages
setup(name = "django-datetime-widget",
version = "0.9.3",
description = "Django-datetime-widget is a simple and clean widget for DateField, Timefiled and DateTimeField in Django framework. It is based on Bootstrap datetime picker, supports both Bootstrap 3 and Bootstrap 2",
long_description=open('README.rst').read(),
author = "Alfredo Saglimbeni",
author_email = "alfredo.saglimbeni@gmail.com",
url = "",
license = "BSD",
packages = find_packages(),
include_package_data=True,
install_requires = ['django','pytz'],
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
"License :: OSI Approved :: BSD License",
'Topic :: Software Development :: Libraries :: Python Modules ',
],
zip_safe=False,
)
| {
"repo_name": "NoodleEducation/django-datetime-widget",
"path": "setup.py",
"copies": "2",
"size": "1093",
"license": "bsd-3-clause",
"hash": 8061587981970782000,
"line_mean": 38.0357142857,
"line_max": 221,
"alpha_frac": 0.6523330284,
"autogenerated": false,
"ratio": 4.109022556390977,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5761355584790977,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Alfredo Saglimbeni'
from distutils.core import setup
from setuptools import setup, find_packages
setup(name = "django-datetime-widget",
version = "0.9.5",
description = "Django-datetime-widget is a simple and clean widget for DateField, Timefiled and DateTimeField in Django framework. It is based on Bootstrap datetime picker, supports both Bootstrap 3 and Bootstrap 2",
long_description=open('README.rst').read(),
author = "Alfredo Saglimbeni",
author_email = "alfredo.saglimbeni@gmail.com",
url = "",
license = "BSD",
packages = find_packages(),
include_package_data=True,
install_requires = ['django','pytz'],
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
"License :: OSI Approved :: BSD License",
'Topic :: Software Development :: Libraries :: Python Modules ',
],
zip_safe=False,
)
| {
"repo_name": "michaeljones/django-datetime-widget",
"path": "setup.py",
"copies": "1",
"size": "1093",
"license": "bsd-3-clause",
"hash": -5797884592076391000,
"line_mean": 38.0357142857,
"line_max": 221,
"alpha_frac": 0.6523330284,
"autogenerated": false,
"ratio": 4.109022556390977,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5261355584790977,
"avg_score": null,
"num_lines": null
} |
__author__ = 'alicia.williams'
# CIS-125 FA 2015
# Week 4: piggetty.py
# File: piggetty.py
# Code to create the way to completely translate all words into the correct Pig
# Latin Terms
# Define a function called piggy(string) that returns a string
vowels = "aeiouAEIOU"
# Loop through word, one letter at a time
# Collaboration with Rebekah Orth throughout
def piggy(word):
n = 0
endword = ""
for letter in word:
# Check if letter is a vowel
if letter in vowels:
if n == 0:
# True? We are done
pig = word + "yay"
return pig
else:
pig = word[n:] + endword + "ay"
return pig
else:
endword = endword + word[n]
n = n + 1
# Open the file *getty.txt* for reading.
# (info in Ch. 5)
infile = open("getty.txt", "r")
# Open a new file *piggy.txt* for writing.
outfile = open("piggy.txt", "w")
# Read the getty.txt file into a string.
stringgetty = infile.read()
# Strip out bad characters (, - .).
stringgetty = stringgetty.replace(",","")
stringgetty = stringgetty.replace("-","")
stringgetty = stringgetty.replace(".","")
# Split the string into a list of words.
listgetty = stringgetty.split()
# Create a new empty string.
gettypig = ""
# Loop through the list of words, pigifying each one.
for word in listgetty:
# Add the pigified word (and a space) to the new string.
if len(word) > 0:
gettypig = gettypig + str(piggy(word)) + " "
#print(piggy(word))
# Write the new string to piggy.txt.
# print(gettypig, file = outfile) - this gave a syntax error so I used outfile.
outfile.write(gettypig)
# close the files.
infile.close()
outfile.close() | {
"repo_name": "ajanaew24/Week-Four-Assignment",
"path": "piggetty.py",
"copies": "1",
"size": "1629",
"license": "mit",
"hash": 7007290378076359000,
"line_mean": 21.9577464789,
"line_max": 79,
"alpha_frac": 0.6617556783,
"autogenerated": false,
"ratio": 2.828125,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39898806783,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Ali Hamdan'
__version__ = '0.1.0'
__license__ = 'MIT'
import csv
import json
class jsontocsvify():
def __init__(self, json_data, delimeter='.'):
self.delimeter = delimeter
self.json_data = json_data if isinstance(json_data, dict) else json.loads(json_data)
self.parsed_data = {}
def normalize_nodes(self, nodes, nlist = None):
if nlist is None: nlist = []
"""
nodes: A data collection of nested elements
nlist: a result list
Iterates over elements in nodes, will recurse if it finds a list/tuple
and appends them to a list which is returned.
"""
for node in nodes:
if any((isinstance(node, list), isinstance(node, tuple))):
self.normalize_nodes(node, nlist)
else:
nlist.append(node)
return nlist
def flatten(self, items, *parent_node):
"""
parent_node: Dictionary Key (Used as column title)
items: Data collection belonging to the passed in key
Takes a {key:value} pair if the value is a data collection
and returns a flattend version of the collection.
e.g.
{'city': {'name': 'Alekseyevka', 'country': 'RU', 'coord':
{'lat': 54.849998, 'lon': 55.23333}, 'sys':
{'population': 0}, 'id': 582854, 'population': 0}
{'city.name' : 'Alekseyevka',
'city.population' : '0',
'city.country' : 'RU',
'city.coord.lat' : '54.849998',
'city.coord.lon' : '55.23333',
'city.sys.population' : '0',
'city.id' : '582854'}
"""
if isinstance(items, dict):
for k, v in items.items():
self.flatten(v, parent_node, k)
if not any((isinstance(v, dict), isinstance(v, list))):
self.parsed_data.update({".".join(
self.normalize_nodes(parent_node)) +
self.delimeter + k: str(v)})
elif isinstance(items, list):
for item in items:
self.flatten(item, parent_node)
def to_csv(self, out_file):
for key, value in self.json_data.items():
if any((isinstance(value, list), isinstance(value, dict))):
self.flatten(value, key)
else:
self.parsed_data.update({key : str(value)})
for k, v in sorted(self.parsed_data.items(), key=lambda k_v: k_v[0]):
with open(out_file, "w") as csv_file:
writer = csv.writer(csv_file)
writer.writerow(self.parsed_data.keys())
writer.writerow(self.parsed_data.values()) | {
"repo_name": "yxorP/jsontocsvify",
"path": "jsontocsvify.py",
"copies": "1",
"size": "2288",
"license": "mit",
"hash": -4005138739428953600,
"line_mean": 25.3103448276,
"line_max": 86,
"alpha_frac": 0.6341783217,
"autogenerated": false,
"ratio": 2.9408740359897174,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8914460699685638,
"avg_score": 0.0321183316008158,
"num_lines": 87
} |
__author__ = 'alimanfoo@googlemail.com'
__version__ = '0.9-SNAPSHOT'
from itertools import cycle
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['ytick.direction'] = 'out'
plt.rcParams['xtick.direction'] = 'out'
def allele_balance_plot(G, AD, coverage=None, colors='bgrcmyk', legend=True, ax=None, **kwargs):
"""
Plot allele depths coloured by genotype for a single sample. N.B., assumes biallelic variants.
Parameters
---------
G: array
A 1-dimensional array of genotypes coded as integers (e.g., 0 = hom ref, 1 = het, 2 = hom alt)
AD: array
A 2-dimensional array of integers with shape (#variants, 2) where
the second axis represents depths of the first and second alleles
coverage: int
Maximum coverage expected (used to limit axes)
colors: sequence
Colors to use for hom ref, het and hom alt genotypes respectively
legend: boolean
If True add a legend
ax: axes
Axes on which to draw
All further keyword arguments are passed to ax.plot().
"""
# set up axes
if ax is None:
fig = plt.figure(figsize=(5, 5))
ax = fig.add_subplot(111)
# define coverage limit
if coverage is None:
coverage = np.percentile(AD, 98)
# set plotting defaults
pltargs = {
'alpha': .05,
'marker': 'o',
'linestyle': ' ',
'markeredgewidth': 0,
}
pltargs.update(kwargs)
# plot each genotype separately
states = range(np.max(G)+1)
for g, color in zip(states, cycle(colors)):
# include only calls with given genotype
indices = np.nonzero(G == g)[0]
ADf = np.take(AD, indices, axis=0)
X = ADf[:, 0]
Y = ADf[:, 1]
ax.plot(X, Y, color=color, label=g, **pltargs)
# set axis limits
ax.set_xlim(-2, coverage)
ax.set_ylim(-2, coverage)
# plot diagonal
ax.plot([0, coverage], [0, coverage], color='gray', linestyle=':')
# make pretty
for s in 'top', 'right', 'bottom', 'left':
ax.spines[s].set_visible(False)
ax.set_xlabel('ref allele depth')
ax.set_ylabel('alt allele depth')
ax.grid(axis='both')
# make legend
if legend:
proxies = list()
for g, color in zip(states, cycle(colors)):
p = plt.Rectangle([0, 0], 1, 1, fc=color)
proxies.append(p)
ax.legend(proxies, states)
return ax
def allele_balance_hist(G, AD, colors='bgrcmyk', bins=30, legend=True, ax=None, **kwargs):
"""
Plot a histogram of the fraction of reads supporting the alternate allele.
N.B., assumes biallelic variants.
Parameters
---------
G: array
A 1-dimensional array of genotypes coded as integers (e.g., 0 = hom ref, 1 = het, 2 = hom alt)
AD: array
A 2-dimensional array of integers with shape (#variants, 2) where
the second axis represents depths of the first and second alleles
colors: str or sequence
Colors to use for hom ref, het and hom alt genotypes respectively
bins: int
Number of bins to use
legend: boolean
If True add a legend
ax: axes
Axes on which to draw
All further keyword arguments are passed to ax.hist().
"""
# set up axes
if ax is None:
fig = plt.figure(figsize=(5, 5))
ax = fig.add_subplot(111)
# set plotting defaults
pltargs = {
'alpha': .5,
'histtype': 'bar',
'linewidth': 0,
}
pltargs.update(kwargs)
N = dict()
states = range(np.max(G)+1)
for g, color in zip(states, cycle(colors)):
# include only calls with given genotype
indices = np.nonzero((G == g) & (np.sum(AD, axis=1) > 0))[0]
ADf = np.take(AD, indices, axis=0)
X = ADf[:, 1] * 1. / np.sum(ADf, axis=1)
n, _, _ = ax.hist(X, bins=np.linspace(0, 1, bins), color=color, **pltargs)
N[g] = n
# plot 50% line
ax.axvline(.5, color='gray', linestyle=':')
# make pretty
for s in 'top', 'right', 'left':
ax.spines[s].set_visible(False)
ax.xaxis.tick_bottom()
ax.set_yticks([])
ax.set_xlabel('alt allele fraction')
ax.set_ylabel('frequency')
# set axis limits based on het frequencies (genotype coded as 1)
ax.set_ylim(0, max(N[1]) * 2)
# make legend
if legend:
proxies = list()
for g, color in zip(states, cycle(colors)):
p = plt.Rectangle([0, 0], 1, 1, fc=color, alpha=pltargs['alpha'])
proxies.append(p)
ax.legend(proxies, states)
return ax
def allele_balance_hexbin(G, AD, g=1, coverage=None, ax=None, **kwargs):
"""
Plot allele depths for genotypes as a hexbin.
Parameters
---------
G: array
A 1-dimensional array of genotypes coded as integers (e.g., 0 = hom ref, 1 = het, 2 = hom alt)
AD: array
A 2-dimensional array of integers with shape (#variants, 2) where
the second axis represents depths of the first and second alleles
g: int
Genotype to plot allele depths for (defaults to 1 = het)
coverage: int
Maximum coverage expected (used to limit axes)
ax: axes
Axes on which to draw
All further keyword arguments are passed to ax.hexbin().
"""
# set up axes
if ax is None:
fig = plt.figure(figsize=(5, 5))
ax = fig.add_subplot(111)
# define coverage limit
if coverage is None:
coverage = np.percentile(AD, 98)
# set plotting defaults
pltargs = {
'extent': (0, coverage, 0, coverage),
'gridsize': coverage/2,
}
pltargs.update(kwargs)
# include only het calls
indices = np.nonzero(G == g)[0]
ADf = np.take(AD, indices, axis=0)
X = ADf[:, 0]
Y = ADf[:, 1]
ax.hexbin(X, Y, **pltargs)
# plot diagonal
ax.plot([0, coverage], [0, coverage], color='gray', linestyle=':')
# set axis limits
ax.set_xlim(0, coverage)
ax.set_ylim(0, coverage)
# make pretty
ax.set_xlabel('ref allele depth')
ax.set_ylabel('alt allele depth')
return ax
def variant_density_plot(POS, window_size=10000, lim=None, ax=None, **kwargs):
"""
Plot density (per bp) of variants.
Parameters
---------
POS: array
1-dimensional array of genome positions of variants
window_size: int
Window size to calculate density within
lim: pair of ints
Genome region to plot
ax: axes
Axes on which to draw
All further keyword arguments are passed to ax.plot().
"""
# set up axes
if ax is None:
fig = plt.figure(figsize=(7, 2))
ax = fig.add_subplot(111)
# set plotting defaults
pltargs = {
'alpha': .5,
'marker': '.',
'color': 'm',
'linestyle': ' ',
}
pltargs.update(kwargs)
# make a histogram of positions
bins = np.arange(0, np.max(POS), window_size)
pos_hist, _ = np.histogram(POS, bins=bins)
# define X and Y variables
X = (bins[:-1] + window_size/2)
Y = pos_hist*1./window_size
# plot
ax.plot(X, Y, **pltargs)
# make pretty
ax.spines['top'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.grid(axis='y')
ax.xaxis.tick_bottom()
ax.set_xlabel('position')
ax.set_ylabel('density')
if lim is not None:
ax.set_xlim(*lim)
return ax
def genotype_density_plot(POS, G, g=1, window_size=10000, lim=None, ax=None, **kwargs):
"""
Plot density (per bp) of calls of given genotype.
Parameters
---------
POS: array
1-dmensional array of genome positions of variants
G: array
A 1-dimensional array of genotypes coded as integers (e.g., 0 = hom ref, 1 = het, 2 = hom alt)
g: int
Genotype to plot density of (defaults to 1 = het)
window_size: int
Window size to calculate density within
lim: pair of ints
Genome region to plot
ax: axes
Axes on which to draw
All further keyword arguments are passed to ax.plot().
"""
# take only genotype calls matching selected genotype
indices = np.nonzero(G == g)[0]
POSg = np.take(POS, indices, axis=0)
return variant_density_plot(POSg, window_size=window_size, lim=lim, ax=ax, **kwargs)
def variant_density_fill(POS, window_size=10000, lim=None, ax=None, **kwargs):
"""
Plot density (per bp) of variants as a filled area.
Parameters
---------
POS: array
1-dimensional array of genome positions of variants
window_size: int
Window size to calculate density within
ax: axes
Axes on which to draw
All further keyword arguments are passed to ax.fill_between().
"""
# set up axes
if ax is None:
fig = plt.figure(figsize=(7, 2))
ax = fig.add_subplot(111)
# set plotting defaults
pltargs = {
'alpha': .5,
'color': 'm',
'linestyle': '-',
}
pltargs.update(kwargs)
# make a histogram of positions
bins = np.arange(0, np.max(POS), window_size)
pos_hist, _ = np.histogram(POS, bins=bins)
# define X and Y variables
X = (bins[:-1] + window_size/2)
Y = pos_hist*1./window_size
# plot
ax.fill_between(X, 0, Y, **pltargs)
# make pretty
ax.spines['top'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.grid(axis='y')
ax.xaxis.tick_bottom()
ax.set_xlabel('position')
ax.set_ylabel('density')
if lim is not None:
ax.set_xlim(*lim)
return ax
def genotype_density_fill(POS, G, g=1, window_size=10000, lim=None, ax=None, **kwargs):
"""
Plot density (per bp) of calls of given genotype as a filled area.
Parameters
---------
POS: array
1-dmensional array of genome positions of variants
G: array
A 1-dimensional array of genotypes coded as integers (e.g., 0 = hom ref, 1 = het, 2 = hom alt)
g: int
Genotype to plot density of (defaults to 1 = het)
window_size: int
Window size to calculate density within
lim: pair of ints
Genome region to plot
ax: axes
Axes on which to draw
All further keyword arguments are passed to ax.plot().
"""
# take only genotype calls matching selected genotype
indices = np.nonzero(G == g)[0]
POSg = np.take(POS, indices, axis=0)
return variant_density_fill(POSg, window_size=window_size, lim=lim, ax=ax, **kwargs)
from scipy.spatial.distance import pdist, squareform
def pairwise_distance_heatmap(X, labels=None, metric='hamming', cmap='jet', ax=None):
"""
Plot a heatmap of pairwise distances (e.g., between samples).
Parameters
---------
X: array
2-dimensional array of shape (#variants, #samples) to use for distance calculations
labels: sequence of strings
Axis labels (e.g., sample IDs)
metric: string
Name of metric to use for distance calculations (see scipy.spatial.distance.pdist)
cmap: colour map
Colour map to use
ax: axes
Axes on which to draw
"""
# set up axes
if ax is None:
fig = plt.figure(figsize=(5, 5))
ax = fig.add_subplot(111)
D = pdist(X.T, metric)
ax.imshow(squareform(D), interpolation='none', cmap=cmap)
ax.set_xticks(range(X.shape[1]))
ax.set_yticks(range(X.shape[1]))
if labels is not None:
labels = ['%s [%s] ' % (s, i) for (i, s) in enumerate(labels)]
ax.set_xticklabels(labels, rotation=90)
ax.set_yticklabels(labels, rotation=0)
return ax
def genotype_abundance_by_sample_bar(G, labels=None, colors='wbgrcmyk', legend=True, ax=None, **kwargs):
"""
Plot a bar chard of genotype abundance by sample.
Parameters
---------
G: array
2-dimensional array of genotypes coded as integers (e.g., 0 = hom ref, 1 = het, 2 = hom alt),
of shape (#variants, #samples)
labels: sequence of strings
Axis labels (e.g., sample IDs)
colors: sequence
Colors to use for each genotype
legend: boolean
If True add a legend
ax: axes
Axes on which to draw
All further keyword arguments are passed to ax.bar()
"""
# set up axes
if ax is None:
fig = plt.figure(figsize=(7, 4))
ax = fig.add_subplot(111)
# set plotting defaults
pltargs = {
'alpha': .8,
}
pltargs.update(kwargs)
X = np.arange(G.shape[1])
width = 1.
states = np.unique(G)
cumy = None
for g, color in zip(states, cycle(colors)):
Y = np.sum(G == g, axis=0) * 100. / G.shape[0]
if cumy is None:
ax.bar(X, Y, width, label=g, color=color, **pltargs)
cumy = Y
else:
ax.bar(X, Y, width, label=g, bottom=cumy, color=color, **pltargs)
cumy += Y
if legend:
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
ax.set_xticks(X + width/2)
if labels is not None:
labels = ['%s [%s] ' % (s, i) for (i, s) in enumerate(labels)]
ax.set_xticklabels(labels, rotation=90)
ax.set_ylim(0, 100)
ax.set_xlim(0, G.shape[1])
ax.set_ylabel('percent')
return ax
def calldata_by_sample_boxplot(X, labels=None, lim=None, ax=None, **kwargs):
"""
Make a boxplot of calldata by sample (e.g., GQ, DP).
Parameters
---------
X: array
2-dimensional array of shape (#variants, #samples)
labels: sequence of strings
Axis labels (e.g., sample IDs)
lim: pair of numers
Lower and upper limits to plot
ax: axes
Axes on which to draw
Remaining keyword arguments are passed to ax.boxplot.
"""
# set up axes
if ax is None:
fig = plt.figure(figsize=(7, 4))
ax = fig.add_subplot(111)
# set plotting defaults
pltargs = {
'sym': '',
}
pltargs.update(kwargs)
ax.boxplot(X, **pltargs)
if lim is not None:
ax.set_ylim(*lim)
if labels is not None:
labels = ['%s [%s] ' % (s, i) for (i, s) in enumerate(labels)]
ax.set_xticklabels(labels, rotation=90)
return ax
from matplotlib.colors import ListedColormap
def discrete_calldata_colormesh(X, labels=None, colors='wbgrcmyk', states=None, ax=None, **kwargs):
"""
Make a meshgrid from discrete calldata (e.g., genotypes).
Parameters
----------
X: array
2-dimensional array of integers of shape (#variants, #samples)
labels: sequence of strings
Axis labels (e.g., sample IDs)
colors: sequence
Colors to use for different values of the array
states: sequence
Manually specify discrete calldata states (if not given will be determined from the data)
ax: axes
Axes on which to draw
Remaining keyword arguments are passed to ax.pcolormesh.
"""
# set up axes
if ax is None:
fig = plt.figure(figsize=(7, 5))
ax = fig.add_subplot(111)
# determine discrete states
if states is None:
states = np.unique(X)
colors = colors[:max(states)-min(states)+1] # only need as many colors as states
# plotting defaults
pltargs = {
'cmap': ListedColormap(colors),
'norm': plt.normalize(min(states), max(states)+1),
}
pltargs.update(kwargs)
ax.pcolormesh(X.T, **pltargs)
ax.set_xlim(0, X.shape[0])
ax.set_ylim(0, X.shape[1])
ax.set_yticks(np.arange(X.shape[1]) + .5)
if labels is not None:
# labels = ['%s [%s] ' % (s, i) for (i, s) in enumerate(labels)]
ax.set_yticklabels(labels, rotation=0)
return ax
def continuous_calldata_colormesh(X, labels=None, ax=None, **kwargs):
"""
Make a meshgrid from continuous calldata (e.g., DP).
Parameters
----------
X: array
2-dimensional array of integers or floats of shape (#variants, #samples)
labels: sequence of strings
Axis labels (e.g., sample IDs)
ax: axes
Axes on which to draw
Remaining keyword arguments are passed to ax.pcolormesh.
"""
# set up axes
if ax is None:
fig = plt.figure(figsize=(7, 5))
ax = fig.add_subplot(111)
# plotting defaults
pltargs = {
'cmap': 'jet',
}
pltargs.update(kwargs)
ax.pcolormesh(X.T, **pltargs)
ax.set_xlim(0, X.shape[0])
ax.set_ylim(0, X.shape[1])
ax.set_yticks(np.arange(X.shape[1]) + .5)
if labels is not None:
# labels = ['%s [%s] ' % (s, i) for (i, s) in enumerate(labels)]
ax.set_yticklabels(labels, rotation=0)
return ax
def genome_locator(POS, step=100, lim=None, ax=None, **kwargs):
"""
Map variant index to genome position.
Parameters
---------
POS: array
1-dmensional array of genome positions of variants
step: int
How often to draw a line
lim: pair of ints
Lower and upper bounds on genome position
ax: axes
Axes on which to draw
Remaining keyword arguments are passed to Line2D.
"""
# set up axes
if ax is None:
fig = plt.figure(figsize=(7, 1))
ax = fig.add_subplot(111)
if lim is None:
lim = 0, np.max(POS)
start, stop = lim
ax.set_xlim(start, stop)
for i, pos in enumerate(POS[::step]):
xfrom = pos
xto = start + ((i * step * 1. / POS.size) * (stop-start))
l = plt.Line2D([xfrom, xto], [0, 1], **kwargs)
ax.add_line(l)
ax.set_xlabel('position')
ax.set_yticks([])
ax.xaxis.tick_bottom()
for l in 'left', 'right':
ax.spines[l].set_visible(False)
return ax
| {
"repo_name": "alimanfoo/vcfplt",
"path": "vcfplt.py",
"copies": "1",
"size": "17760",
"license": "mit",
"hash": -4500869078521965000,
"line_mean": 24.8515283843,
"line_max": 104,
"alpha_frac": 0.5914977477,
"autogenerated": false,
"ratio": 3.4451988360814743,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9526736946145367,
"avg_score": 0.0019919275272217035,
"num_lines": 687
} |
__author__ = 'Alireza Omidi <alireza530@gmail.com>'
__license__ = 'MIT'
class Go:
def __init__(self):
self.timebank = 0
self.time_per_move = 0
self.player_names = []
self.my_bot = ''
self.my_botid = 0
self.opponent_bot = ''
self.opponent_botid = 0
self.field_width = 0
self.field_height = 0
self.round = 0
self.move = 0
self.field = []
self.my_points = 0
self.opponent_points = 0
def place_move(self, x, y):
if self.field[x][y] == 1:
pass # TODO not valid
print('place_move %d %d' % (x, y))
def _pass(self):
print('pass')
def is_empty(self, x, y):
return self.field[x][y] == 0
def liberties(self, srcx=None, srcy=None):
if srcx is None and srcy is None:
libs = set()
for i in range(self.field_width):
for j in range(self.field_height):
if self.field[i][j] == self.my_botid:
libs.update(self.liberties(i, j))
return list(libs)
stack = [(srcx, srcy)]
visited = {(srcx, srcy)}
libs = []
while stack:
x, y = stack.pop()
if self.field[x][y] != self.my_botid:
if self.field[x][y] == 0:
libs.append((x, y))
else:
relative_neighbours = [(-1, 0), (1, 0), (0, -1), (0, 1)]
for relx, rely in relative_neighbours:
neix, neiy = x + relx, y + rely
if (neix, neiy) not in visited and \
0 <= neix < self.field_width and 0 <= neiy < self.field_height:
stack.append((neix, neiy))
visited.add((neix, neiy))
return libs
def is_suicide(self, x, y):
if not self.is_empty(x, y):
pass # TODO not valid
self.field[x][y] = self.my_botid
libs = self.liberties(x, y)
self.field[x][y] = 0
return len(libs) == 0
def available_moves(self):
moves = []
for i in range(self.field_width):
for j in range(self.field_height):
if self.is_empty(i, j) and not self.is_suicide(i, j):
moves.append((i, j))
return moves
def print_all(self):
print('timebank = %d' % self.timebank)
print('time_per_move = %d' % self.time_per_move)
print('player_names = %s' % str(self.player_names))
print('my_bot = %s' % self.my_bot)
print('my_botid = %d' % self.my_botid)
print('opponent_bot = %s' % self.opponent_bot)
print('opponent_botid = %d' % self.opponent_botid)
print('field_width = %d' % self.field_width)
print('field_height = %d' % self.field_height)
print('round = %d' % self.round)
print('move = %d' % self.move)
print('field = %s' % str(self.field))
print('my_points = %d' % self.my_points)
print('opponent_points = %d' % self.opponent_points)
print() | {
"repo_name": "alirezaomidi/theaigames-go-starterbot",
"path": "Go.py",
"copies": "1",
"size": "3113",
"license": "mit",
"hash": -6993731610382133000,
"line_mean": 33.2197802198,
"line_max": 91,
"alpha_frac": 0.4863475747,
"autogenerated": false,
"ratio": 3.3011664899257687,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42875140646257687,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Alireza Omidi <alireza530@gmail.com>'
__license__ = 'MIT'
import sys
from random import choice
class AI:
# Just change the do_turn function to write your own bot
# You can define your own functions inside this class
# This is a random bot and have little chance to win the game :)
# So go ahead and write yours
# Everything you need is in Go module
# Which is passed every cycle as 'go' to do_turn
def do_turn(self, go):
liberties = go.liberties()
# To print debugs, use print as follows
print(file=sys.stderr)
print(liberties, file=sys.stderr)
if liberties:
x, y = choice(liberties)
go.place_move(x, y)
print('place_move %d %d' % (x, y), file=sys.stderr)
else:
available = go.available_moves()
print(available, file=sys.stderr)
if available:
x, y = choice(available)
go.place_move(x, y)
print('place_move %d %d' % (x, y), file=sys.stderr)
else:
go._pass()
print('pass', file=sys.stderr) | {
"repo_name": "alirezaomidi/theaigames-go-starterbot",
"path": "AI.py",
"copies": "1",
"size": "1146",
"license": "mit",
"hash": -4974555110446316000,
"line_mean": 31.7714285714,
"line_max": 68,
"alpha_frac": 0.5636998255,
"autogenerated": false,
"ratio": 3.673076923076923,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4736776748576923,
"avg_score": null,
"num_lines": null
} |
__author__ = 'alisonbento'
import abstractdao
import src.entities.hsfullgroup as hsfullgroup
import fullappliancedao as hsappliancedao
import groupdao as hsgroupdao
class FullGroupDAO(abstractdao.AbstractDAO):
def __init__(self, connection):
abstractdao.AbstractDAO.__init__(self, connection)
def list(self, criteria=None, arguments=()):
groupdao = hsgroupdao.GroupDAO(self.connection)
all_groups = groupdao.list(criteria, arguments)
if len(all_groups) <= 0:
return all_groups
fullappliancedao = hsappliancedao.FullApplianceDAO(self.connection)
all_full_groups = []
for group in all_groups:
fullgroup = hsfullgroup.HomeShellFullGroup()
fullgroup.group = group
subquery = "appliance_id IN (SELECT appliance_id FROM hs_group_appliances WHERE group_id = ?)"
all_appliances = fullappliancedao.list(subquery, (group.id,))
fullgroup.appliances = all_appliances
all_full_groups.append(fullgroup)
return all_full_groups
def get(self, entity_id, criteria=None, arguments=()):
fullgroup = hsfullgroup.HomeShellFullGroup()
groupdao = hsgroupdao.GroupDAO(self.connection)
fullgroup.group = groupdao.get(entity_id, criteria, arguments)
if fullgroup.group is None:
return None
fullappliancedao = hsappliancedao.FullApplianceDAO(self.connection)
subquery = "appliance_id IN (SELECT appliance_id FROM hs_group_appliances WHERE group_id = ?)"
all_appliances = fullappliancedao.list(subquery, (fullgroup.group.id,))
fullgroup.appliances = all_appliances
return fullgroup
| {
"repo_name": "m4nolo/home-shell",
"path": "src/dao/fullgroupdao.py",
"copies": "3",
"size": "1712",
"license": "apache-2.0",
"hash": 7980896032941098000,
"line_mean": 33.9387755102,
"line_max": 106,
"alpha_frac": 0.6775700935,
"autogenerated": false,
"ratio": 3.890909090909091,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00038886800523675575,
"num_lines": 49
} |
__author__ = 'alisonbento'
import abstractdao
import appliancedao as hsappliancedao
import fullservicedao as hsservicedao
import statusdao as hsstatusdao
import src.entities.hsfullappliance as hsfullappliance
class FullApplianceDAO(abstractdao.AbstractDAO):
def list(self, criteria=None, arguments=()):
appliancedao = hsappliancedao.ApplianceDAO(self.connection)
servicedao = hsservicedao.FullServiceDAO(self.connection)
statusdao = hsstatusdao.StatusDAO(self.connection)
all_appliances = appliancedao.list(criteria, arguments)
if len(all_appliances) <= 0:
return all_appliances
all_full_appliances = []
for appliance in all_appliances:
fullappliance = hsfullappliance.HomeShellFullAppliance()
fullappliance.appliance = appliance
fullappliance.services = servicedao.list("appliance_id = ?", (appliance.id,))
fullappliance.status = statusdao.list("appliance_id = ?", (appliance.id,))
all_full_appliances.append(fullappliance)
return all_full_appliances
def get(self, entity_id, criteria=None):
fullappliance = hsfullappliance.HomeShellFullAppliance()
appliancedao = hsappliancedao.ApplianceDAO(self.connection)
fullappliance.appliance = appliancedao.get(entity_id, criteria)
if fullappliance.appliance is None:
return None
servicedao = hsservicedao.FullServiceDAO(self.connection)
fullappliance.services = servicedao.list("appliance_id = ?", (entity_id,))
statusdao = hsstatusdao.StatusDAO(self.connection)
fullappliance.status = statusdao.list("appliance_id = ?", (entity_id,))
return fullappliance | {
"repo_name": "m4nolo/home-shell",
"path": "src/dao/fullappliancedao.py",
"copies": "3",
"size": "1742",
"license": "apache-2.0",
"hash": 8062805042379620000,
"line_mean": 34.5714285714,
"line_max": 89,
"alpha_frac": 0.7003444317,
"autogenerated": false,
"ratio": 3.8201754385964914,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0018895931569850328,
"num_lines": 49
} |
__author__ = 'alisonbento'
import abstractdao
import servicedao as hsservicedao
import paramdao as hsparamdao
import src.entities.hsfullservice as hsfullservice
class FullServiceDAO(abstractdao.AbstractDAO):
def list(self, criteria=None, arguments=()):
servicedao = hsservicedao.ServiceDAO(self.connection)
paramdao = hsparamdao.ParamDAO(self.connection)
all_services = servicedao.list(criteria, arguments)
if len(all_services) <= 0:
return all_services
all_full_services = []
for service in all_services:
fullservice = hsfullservice.HomeShellFullService()
fullservice.service = service
fullservice.params = paramdao.list("service_id = ?", (service.id,))
all_full_services.append(fullservice)
return all_full_services
def get(self, entity_id, criteria=None):
fullservice = hsfullservice.HomeShellFullService()
servicedao = hsservicedao.ServiceDAO(self.connection)
fullservice.service= servicedao.get(entity_id, criteria)
if fullservice.service is None:
return None
paramdao = hsparamdao.ParamDAO(self.connection)
fullservice.params = paramdao.list("service_id = ?", (entity_id,))
return fullservice
| {
"repo_name": "m4nolo/home-shell",
"path": "src/dao/fullservicedao.py",
"copies": "3",
"size": "1306",
"license": "apache-2.0",
"hash": -9050709362974765000,
"line_mean": 29.3720930233,
"line_max": 79,
"alpha_frac": 0.6761102603,
"autogenerated": false,
"ratio": 4.08125,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.023613595706618962,
"num_lines": 43
} |
__author__ = 'alisonbento'
import abstractdao
class BaseDAO(abstractdao.AbstractDAO):
def __init__(self, connection, table, primary_key):
abstractdao.AbstractDAO.__init__(self, connection)
self.table = table
self.primary_key = primary_key
def list(self, criteria=None, arguments=()):
cursor = self.connection.cursor()
sql = "SELECT * FROM " + str(self.table)
if criteria is not None:
sql += ' WHERE ' + str(criteria)
cursor.execute(sql, arguments)
else:
cursor.execute(sql)
entity_rows = cursor.fetchall()
all_entities = []
if len(entity_rows) <= 0:
return all_entities
for entity_row in entity_rows:
all_entities.append(self.convert_row_to_object(entity_row))
return all_entities
def get(self, entity_id, criteria=None, arguments=()):
cursor = self.connection.cursor()
sql = "SELECT * FROM " + str(self.table) + " WHERE " + str(self.primary_key) + " = " + str(entity_id)
if criteria is not None:
sql += " AND " + str(criteria)
cursor.execute(sql, arguments)
else:
cursor.execute(sql)
entity_row = cursor.fetchone()
if entity_row is None:
return None
return self.convert_row_to_object(entity_row)
def select(self, criteria=None, arguments=()):
cursor = self.connection.cursor()
sql = "SELECT * FROM " + str(self.table)
if criteria is not None:
sql += " WHERE " + str(criteria)
cursor.execute(sql, arguments)
else:
cursor.execute(sql)
entity_rows = cursor.fetchall()
all_entities = []
if len(entity_rows) <= 0:
return all_entities
for entity_row in entity_rows:
all_entities.append(self.convert_row_to_object(entity_row))
return all_entities
def delete(self, entity_id):
cursor = self.connection.cursor()
sql = "DELETE FROM " + str(self.table) + " WHERE " + str(self.primary_key) + " = ?"
return cursor.execute(sql, entity_id)
# def insert(self, entity):
# pass
#
# def update(self, entity):
# pass
#
# def convert_row_to_object(self, entity_row):
# pass
| {
"repo_name": "alisonbnt/home-shell",
"path": "src/dao/basedao.py",
"copies": "3",
"size": "2357",
"license": "apache-2.0",
"hash": 5908555568561122000,
"line_mean": 27.0595238095,
"line_max": 109,
"alpha_frac": 0.5638523547,
"autogenerated": false,
"ratio": 3.9087893864013266,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5972641741101327,
"avg_score": null,
"num_lines": null
} |
__author__ = 'alisonbento'
import basedao
from src.entities.hsappliance import HomeShellAppliance
import datetime
import configs
class ApplianceDAO(basedao.BaseDAO):
def __init__(self, connection):
basedao.BaseDAO.__init__(self, connection, 'hs_appliances', 'appliance_id')
def convert_row_to_object(self, entity_row):
appliance = HomeShellAppliance()
appliance.id = entity_row['appliance_id']
appliance.package = entity_row['package']
appliance.type = entity_row['type']
appliance.name = entity_row['name']
appliance.key = None
appliance.address = entity_row['address']
appliance.hash = entity_row['appliance_hash']
appliance.modified = entity_row['modified']
appliance.modified_datetime = datetime.datetime.strptime(appliance.modified, configs.DATABASE_DATE_FORMAT)
return appliance
def update(self, entity):
cursor = self.connection.cursor()
sql = "UPDATE " + self.table + " SET modified = ? WHERE appliance_id = ?"
cursor.execute(sql, (entity.modified, entity.id))
| {
"repo_name": "alisonbnt/home-shell",
"path": "src/dao/appliancedao.py",
"copies": "2",
"size": "1109",
"license": "apache-2.0",
"hash": -7074807794030177000,
"line_mean": 33.65625,
"line_max": 114,
"alpha_frac": 0.6681695221,
"autogenerated": false,
"ratio": 4.018115942028985,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5686285464128986,
"avg_score": null,
"num_lines": null
} |
__author__ = 'alisonbento'
import basedao
from src.entities.hsextra import HomeShellExtra
class ExtraDAO(basedao.BaseDAO):
def __init__(self, connection):
basedao.BaseDAO.__init__(self, connection, 'hs_appliance_extras', 'extra_id')
def convert_row_to_object(self, entity_row):
extra = HomeShellExtra()
extra.id = entity_row['extra_id']
extra.extra_key = entity_row['extra_key']
extra.extra_value = entity_row['extra_value']
extra.extra_date = entity_row['extra_date']
extra.created = entity_row['created']
return extra
def insert(self, entity):
cursor = self.connection.cursor()
sql = "INSERT INTO " + self.table + "(appliance_id, extra_key, extra_value, extra_date, created) VALUES "
sql += "(:appliance_id, :extra_key, :extra_value, :extra_date, :created)"
values = {
'appliance_id': entity.appliance_id,
'extra_key': entity.extra_key,
'extra_value': entity.extra_value,
'extra_date': entity.extra_date,
'created': entity.created
}
print(values)
cursor.execute(sql, values)
entity.id = cursor.lastrowid
print(entity.id)
return entity.id is not None and entity.id > 0 | {
"repo_name": "m4nolo/home-shell",
"path": "src/dao/extradao.py",
"copies": "3",
"size": "1297",
"license": "apache-2.0",
"hash": -8946659913087954000,
"line_mean": 29.9047619048,
"line_max": 113,
"alpha_frac": 0.6037008481,
"autogenerated": false,
"ratio": 3.684659090909091,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5788359939009091,
"avg_score": null,
"num_lines": null
} |
__author__ = 'alisonbento'
import basedao
from src.entities.hsstatus import HomeShellStatus
class StatusDAO(basedao.BaseDAO):
def __init__(self, connection):
basedao.BaseDAO.__init__(self, connection, 'hs_appliance_status', 'status_id')
def convert_row_to_object(self, entity_row):
status = HomeShellStatus()
status.id = entity_row['status_id']
status.name = entity_row['status_key']
status.value = entity_row['status_value']
return status
def update(self, entity):
sql = "UPDATE " + self.table + " SET status_key = :key, status_value = :value WHERE status_id = :id"
cursor = self.connection.cursor()
values = {
'key': entity.name,
'value': entity.value,
'id': entity.id
}
cursor.execute(sql, values)
return
def update_appliance_status(self, appjson, appliance_id):
for status, value in appjson.items():
print("try update status " + str(status) + " to value: " + str(value))
basestatus = self.get_status_by_name(status, appliance_id)
if basestatus is not None:
print('updating ' + str(status))
basestatus.value = value
self.update(basestatus)
def get_status_by_name(self, status_name, appliance_id):
status_list = self.select('status_key = ? AND appliance_id = ?', (status_name, appliance_id))
if len(status_list) <= 0:
return None
return status_list[0]
| {
"repo_name": "alisonbnt/home-shell",
"path": "src/dao/statusdao.py",
"copies": "3",
"size": "1548",
"license": "apache-2.0",
"hash": 447007764764173950,
"line_mean": 30.5918367347,
"line_max": 108,
"alpha_frac": 0.5878552972,
"autogenerated": false,
"ratio": 3.812807881773399,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5900663178973399,
"avg_score": null,
"num_lines": null
} |
__author__ = 'alisonbento'
import basedao
from src.entities.token.hstoken import HomeShellToken
class TokenDAO(basedao.BaseDAO):
def __init__(self, connection):
basedao.BaseDAO.__init__(self, connection, 'hs_tokens', 'token_id')
def insert(self, entity):
cursor = self.connection.cursor()
sql = "INSERT INTO " + self.table + " (user_id, token, created, valid) VALUES (?, ?, ?, ?)"
entity_tuple = (
entity.user_id,
entity.token,
entity.created,
entity.valid
)
cursor.execute(sql, entity_tuple)
return cursor.lastrowid
def update(self, entity):
cursor = self.connection.cursor()
sql = "UPDATE " + self.table + " SET user_id = :user_id, token = :token, valid= :valid"
sql += " WHERE token_id = :token_id"
cursor.execute(sql, {
'user_id': entity.user_id,
'token': entity.token,
'valid': entity.valid,
'token_id': entity.id
})
def convert_row_to_object(self, entity_row):
token = HomeShellToken()
token.id = entity_row['token_id']
token.token = entity_row['token']
token.user_id = entity_row['user_id']
token.created = entity_row['created']
token.valid = entity_row['valid']
return token
| {
"repo_name": "alisonbnt/home-shell",
"path": "src/dao/tokendao.py",
"copies": "3",
"size": "1362",
"license": "apache-2.0",
"hash": 7668540877616938000,
"line_mean": 26.24,
"line_max": 99,
"alpha_frac": 0.5609397944,
"autogenerated": false,
"ratio": 3.721311475409836,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5782251269809835,
"avg_score": null,
"num_lines": null
} |
__author__ = 'alisonbento'
import flask_restful
from flask_restful import reqparse
import hsres
from src.dao.appliancedao import ApplianceDAO
import src.resstatus as _status
from src.scheme_loader import SchemeLoader
from src.lib.service_caller import call_service
class EventResource(hsres.HomeShellResource):
def options(self, appliance_id):
return {'Allow': 'POST,PUT'}, 200, \
{
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Headers': 'Content-Type, Accept',
'Access-Control-Allow-Methods': 'POST,GET,PUT'
}
def post(self, appliance_id):
parser = reqparse.RequestParser()
parser.add_argument('control_id', type=str)
parser.add_argument('value', type=str)
parser.add_argument('callback_key', type=str)
args = parser.parse_args()
# Preciso da appliance e seu scheme
dao = ApplianceDAO(self.get_dbc())
appliance = dao.get(appliance_id)
appliance_package = appliance.package
loader = SchemeLoader()
real_package = appliance_package + '.' + appliance.type
scheme = loader.get_scheme(real_package)
# Em seguida preciso achar o control que foi modificado
control_id = args['control_id']
control = None
for single_control in scheme['controls']:
if single_control['id'] == control_id:
control = single_control
break
# Por fim, devo verificar o callback associado ao novo estado do control
callback_key = args['callback_key']
callback = control['event-callbacks'][callback_key]
if 'param' in callback:
if 'value' not in args:
self.set_status(_status.STATUS_SERVICE_REQUIRE_PARAMETER)
return self.end()
value = args['value']
form = {callback['param']: value}
else:
form = {}
# Executar os servicos definidos
return call_service(self, appliance_id, callback['service'], form)
| {
"repo_name": "m4nolo/home-shell",
"path": "src/resources/event.py",
"copies": "3",
"size": "2066",
"license": "apache-2.0",
"hash": 2730533936341178400,
"line_mean": 34.0169491525,
"line_max": 80,
"alpha_frac": 0.6161665053,
"autogenerated": false,
"ratio": 3.988416988416988,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0019041640510567065,
"num_lines": 59
} |
__author__ = 'alisonbento'
import flask_restful
import hsres
import src.resstatus as _status
from src.dao.servicedao import ServiceDAO
from src.lib.service_caller import call_service
class ListServicesResource(hsres.HomeShellResource):
def get(self, appliance_id):
dao = ServiceDAO(self.get_dbc())
services = dao.list("appliance_id = ?", (appliance_id,))
if len(services) > 0:
self.set_status(_status.STATUS_OK)
all_services = []
for service in services:
all_services.append(service.to_array())
self.add_content('services', all_services)
else:
self.set_status(_status.STATUS_GENERAL_ERROR)
return self.end()
class ServiceResource(hsres.HomeShellResource):
def get(self, appliance_id, service_id):
dao = ServiceDAO(self.get_dbc())
if service_id.isdigit():
service = dao.get(service_id, "appliance_id = " + str(appliance_id))
else:
services = dao.select("appliance_id = ? AND service_trigger = ?", (appliance_id, service_id))
if len(services) > 0:
service = services[0]
else:
service = None
if service is None:
self.set_status(_status.STATUS_APPLIANCE_NOT_FOUND)
return self.end()
self.set_status(_status.STATUS_OK)
self.add_content('service', service.to_array())
return self.end()
def post(self, appliance_id, service_id):
return call_service(self, appliance_id, service_id, flask_restful.request.form)
# servicedao = ServiceDAO(self.get_dbc())
#
# if service_id.isdigit():
# service = servicedao.get(service_id, "appliance_id = " + str(appliance_id))
# else:
# services = servicedao.select("appliance_id = ? AND service_trigger = ?", (appliance_id, service_id))
# if len(services) > 0:
# service = services[0]
# else:
# service = None
#
# if service is None:
# self.set_status(_status.STATUS_APPLIANCE_NOT_FOUND)
# return self.end()
# else:
# paramdao = ParamDAO(self.get_dbc())
# params = paramdao.select("service_id = ?", (service.id,))
#
# all_params_with_values = []
# for param in params:
# p_value = flask_restful.request.form[param.name]
# if p_value is not None:
# all_params_with_values.append(param.name + '=' + p_value)
#
# if len(all_params_with_values) > 0:
# param_string = '&'.join(all_params_with_values)
# param_string = '?' + param_string
# else:
# param_string = ''
#
# appliancedao = ApplianceDAO(self.get_dbc())
# appliance = appliancedao.get(appliance_id)
# # address = 'http://' + appliance.address + '/services/' + service.name + '/' + param_string
# address = 'http://' + appliance.address + '/services/' + service.name + param_string
#
# try:
# r = requests.get(address)
#
# if r.status_code == '404':
# self.set_status(_status.STATUS_APPLIANCE_UNREACHABLE)
# elif r.status_code:
# print(r.text)
# # Update status
# updater = StatusUpdater(self.get_dbc())
# appjson = r.json()
# fullappliance = updater.updateStatus(appliance, appjson)
# self.set_status(_status.STATUS_OK)
# self.add_content('appliance', fullappliance.to_array())
#
# except requests.ConnectionError:
# self.set_status(_status.STATUS_APPLIANCE_UNREACHABLE)
# self.get_dbc().rollback()
#
#
# return self.end()
| {
"repo_name": "alisonbnt/home-shell",
"path": "src/resources/services.py",
"copies": "3",
"size": "4034",
"license": "apache-2.0",
"hash": -8570981666471979000,
"line_mean": 34.6991150442,
"line_max": 114,
"alpha_frac": 0.5292513634,
"autogenerated": false,
"ratio": 3.8164616840113528,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5845713047411353,
"avg_score": null,
"num_lines": null
} |
__author__ = 'alisonbento'
import nmap
import src.resources.hsres as hsres
import src.dao.fullappliancedao as fullappliancedao
import src.dao.appliancedao as appliancedao
import src.resstatus as _status
import requests
import datetime
import configs
from src.appliances.statusupdater import StatusUpdater
class ApplianceListResource(hsres.HomeShellResource):
def get(self):
dao = fullappliancedao.FullApplianceDAO(self.get_dbc())
appliances = dao.list()
if len(appliances) > 0:
self.set_status(_status.STATUS_OK)
all_appliances = []
for appliance in appliances:
all_appliances.append(appliance.to_array())
self.add_content('appliances', all_appliances)
return self.end()
class ApplianceResource(hsres.HomeShellResource):
def get(self, appliance_id):
dao = fullappliancedao.FullApplianceDAO(self.get_dbc())
fullappliance = dao.get(appliance_id)
self.set_status(_status.STATUS_OK)
if fullappliance is None:
self.set_status(_status.STATUS_GENERAL_ERROR)
else:
try:
if self.is_time_to_refresh_appliance(fullappliance):
fullappliance = self.refresh_appliance(fullappliance, self.get_dbc())
self.add_content('appliance', fullappliance.to_array())
except requests.ConnectionError, requests.HTTPError:
self.set_status(_status.STATUS_APPLIANCE_UNREACHABLE)
return self.end()
def refresh_appliance(self, fullappliance, connection):
appliance = fullappliance.appliance
address = 'http://' + appliance.address
response = requests.get(address)
if response.status_code:
appliance_json = response.json()
print 'Appliance response: ' + str(appliance_json)
updater = StatusUpdater(connection)
fullappliance = updater.updateStatus(appliance, appliance_json)
else:
raise requests.HTTPError(response)
return fullappliance
def is_time_to_refresh_appliance(self, fullappliance):
modified_datetime = fullappliance.appliance.modified_datetime
delta = datetime.datetime.now() - modified_datetime
delta_seconds = delta.total_seconds()
return delta_seconds >= configs.MIN_SECONDS_TO_REFRESH_APPLIANCE
class ScanAppliancesResource(hsres.HomeShellResource):
def get(self):
hsappliancedao = appliancedao.ApplianceDAO(self.get_dbc())
all_appliances = hsappliancedao.list()
already_know_hosts = []
for appliance in all_appliances:
already_know_hosts.append(appliance.address)
nm = nmap.PortScanner()
nm.scan(hosts=configs.NETWORK_IP_RANGE, arguments='-n -sP -PE -PA21,23,80,3389')
hosts_list = [(x, nm[x]['status']['state']) for x in nm.all_hosts()]
valid_hosts = []
for host, status in hosts_list:
try:
address = 'http://' + host
r = requests.get(address)
r.json()
if host not in already_know_hosts:
valid_hosts.append(host)
except:
pass
self.set_status(_status.STATUS_OK)
self.add_content('new_appliances', len(valid_hosts))
return self.end()
| {
"repo_name": "m4nolo/home-shell",
"path": "src/resources/appliances.py",
"copies": "3",
"size": "3383",
"license": "apache-2.0",
"hash": 3274162276158458000,
"line_mean": 30.9150943396,
"line_max": 89,
"alpha_frac": 0.631687851,
"autogenerated": false,
"ratio": 3.8885057471264366,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6020193598126437,
"avg_score": null,
"num_lines": null
} |
__author__ = 'alisonbento'
import requests
import hsres
import src.resstatus as _status
import src.base.connector
from src.dao.appliancedao import ApplianceDAO
from src.dao.statusdao import StatusDAO
from src.answer.answer import Answer
class ListStatusResource(hsres.HomeShellResource):
def get(self, appliance_id):
connection = src.base.connector.getcon()
dao = StatusDAO(connection)
status = dao.list("appliance_id = " + str(appliance_id))
reply = Answer()
if len(status) > 0:
reply.set_status(_status.STATUS_OK)
all_services = []
for single_status in status:
all_services.append(single_status.to_array())
reply.add_content('status', all_services)
else:
reply.set_status(_status.STATUS_GENERAL_ERROR)
connection.close()
return reply.to_array()
def post(self, appliance_id):
appliancedao = ApplianceDAO(self.get_dbc())
appliance_list = appliancedao.select('appliance_hash = ?', (appliance_id,))
if len(appliance_list) <= 0:
self.set_status(_status.STATUS_APPLIANCE_NOT_FOUND)
return self.end()
appliance = appliance_list[0]
address = 'http://' + appliance.address + '/status/'
r = requests.get(address)
appjson = r.json()
statusdao = StatusDAO(self.get_dbc())
statusdao.update_appliance_status(appjson, appliance.id)
self.get_dbc().commit()
self.set_status(_status.STATUS_OK)
return self.end()
class StatusResource(hsres.HomeShellResource):
def get(self, appliance_id, status_id):
dao = StatusDAO(self.get_dbc())
status = dao.get(status_id, "appliance_id = " + str(appliance_id))
if status is None:
self.set_status(_status.STATUS_GENERAL_ERROR)
else:
self.set_status(_status.STATUS_OK)
self.add_content('status', status.to_array())
return self.end() | {
"repo_name": "m4nolo/home-shell",
"path": "src/resources/status.py",
"copies": "3",
"size": "2023",
"license": "apache-2.0",
"hash": -559714627989351040,
"line_mean": 26.7260273973,
"line_max": 83,
"alpha_frac": 0.6178942165,
"autogenerated": false,
"ratio": 3.8169811320754716,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007110241356816699,
"num_lines": 73
} |
__author__ = 'alisonbento'
import time
import hsres
import src.resstatus as _status
from src.entities.hsextra import HomeShellExtra
from src.dao.appliancedao import ApplianceDAO
from src.dao.extradao import ExtraDAO
from flask import request
class ExtraResource(hsres.HomeShellResource):
def get(self, appliance_id, extra_key):
appliancedao = ApplianceDAO(self.get_dbc())
if not appliance_id.isdigit():
self.set_status(_status.STATUS_INVALID_REQUEST)
return self.end()
appliance = appliancedao.get(appliance_id)
if appliance is None:
self.set_status(_status.STATUS_APPLIANCE_NOT_FOUND)
return self.end()
extradao = ExtraDAO(self.get_dbc())
all_extras = extradao.list('appliance_id = ? AND extra_key = ?', (appliance.id, extra_key))
parsed_extras = []
for extra in all_extras:
parsed_extras.append(extra.to_array())
self.set_status(_status.STATUS_OK)
self.add_content('extras', parsed_extras)
return self.end()
def post(self, appliance_id, extra_key):
appliancedao = ApplianceDAO(self.get_dbc())
appliance_list = appliancedao.select('appliance_hash = ?', (appliance_id,))
if len(appliance_list) <= 0:
self.set_status(_status.STATUS_APPLIANCE_NOT_FOUND)
return self.end()
appliance = appliance_list[0]
value = str(request.form.get('value'))
date = str(request.form.get('date'))
created = time.strftime('%Y-%m-%d %H:%M:%S')
extradao = ExtraDAO(self.get_dbc())
extra = HomeShellExtra(0, appliance.id, str(extra_key), value, date, created)
result = extradao.insert(extra)
if result:
self.get_dbc().commit()
self.set_status(_status.STATUS_OK)
else:
self.get_dbc().rollback()
self.set_status(_status.STATUS_GENERAL_ERROR)
return self.end()
class ListExtrasResource(hsres.HomeShellResource):
def get(self, appliance_id):
appliancedao = ApplianceDAO(self.get_dbc())
appliance = appliancedao.get(appliance_id)
if appliance is None:
self.set_status(_status.STATUS_APPLIANCE_NOT_FOUND)
return self.end()
extradao = ExtraDAO(self.get_dbc())
all_extras = extradao.list('appliance_id = ?', (appliance.id,))
parsed_extras = []
for extra in all_extras:
parsed_extras.append(extra.to_array())
self.set_status(_status.STATUS_OK)
self.add_content('extras', parsed_extras)
return self.end() | {
"repo_name": "alisonbnt/home-shell",
"path": "src/resources/extras.py",
"copies": "3",
"size": "2653",
"license": "apache-2.0",
"hash": -1213405456083694000,
"line_mean": 27.5376344086,
"line_max": 99,
"alpha_frac": 0.6181681116,
"autogenerated": false,
"ratio": 3.731364275668073,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007906738589409256,
"num_lines": 93
} |
__author__ = 'alisonbnt'
# -*- coding: utf-8 -*-
import os
import locale
import gettext
# Change this variable to your app name!
# The translation files will be under
# @LOCALE_DIR@/@LANGUAGE@/LC_MESSAGES/@APP_NAME@.mo
APP_NAME = "SteeringAll"
# This is ok for maemo. Not sure in a regular desktop:
# APP_DIR = os.path.join(sys.prefix, 'assets')
APP_DIR = os.path.dirname(os.path.realpath(__file__))
LOCALE_DIR = os.path.join(APP_DIR, 'assets/i18n') # .mo files will then be located in APP_Dir/i18n/LANGUAGECODE/LC_MESSAGES/
# Now we need to choose the language. We will provide a list, and gettext
# will use the first translation available in the list
#
# In maemo it is in the LANG environment variable
# (on desktop is usually LANGUAGES)
DEFAULT_LANGUAGES = os.environ.get('LANG', '').split(':')
DEFAULT_LANGUAGES += ['en_US']
lc, encoding = locale.getdefaultlocale()
if lc:
languages = [lc]
else:
languages = ['pt_BR']
languages = ['en_US']
# Concat all languages (env + default locale),
# and here we have the languages and location of the translations
languages += DEFAULT_LANGUAGES
mo_location = LOCALE_DIR
print(mo_location)
print(languages)
# Lets tell those details to gettext
# (nothing to change here for you)
gettext.install(True, localedir=None, unicode=1)
gettext.find(APP_NAME, mo_location)
gettext.textdomain(APP_NAME)
gettext.bind_textdomain_codeset(APP_NAME, "UTF-8")
language = gettext.translation(APP_NAME, mo_location, languages=languages, fallback=True) | {
"repo_name": "alisonbnt/steering-all",
"path": "i18n.py",
"copies": "2",
"size": "1505",
"license": "mit",
"hash": -4029411310620458000,
"line_mean": 27.4150943396,
"line_max": 124,
"alpha_frac": 0.7249169435,
"autogenerated": false,
"ratio": 3.1818181818181817,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9903105699835884,
"avg_score": 0.0007258850964596142,
"num_lines": 53
} |
__author__ = 'alisonbnt'
from flask import g, jsonify
from flask_restful import Resource
from app import db
from conf.auth import auth
from app.resources import parser
from app.models.UserModel import User
class UsersResource(Resource):
@staticmethod
@auth.login_required
def get():
return jsonify({
'id': g.user.id,
'name': g.user.name,
'email': g.user.email,
'username': g.user.username
})
@staticmethod
@auth.login_required
def post():
parser.add_argument('email', type=str)
parser.add_argument('password', type=str)
parser.add_argument('name', type=str)
parser.add_argument('username', type=str)
args = parser.parse_args()
if args['username'] is not None :
g.user.username = args['username']
if args['password'] is not None :
g.user.password = args['password']
if args['name'] is not None :
g.user.name = args['name']
if args['email'] is not None:
g.user.email = args['email']
db.session.add(g.user)
db.session.commit()
return jsonify({'operation_status': 'SUCCESS'})
@staticmethod
def put():
# parser.add_argument('uid', type=int)
parser.add_argument('email', type=str)
parser.add_argument('password', type=str)
parser.add_argument('name', type=str)
parser.add_argument('username', type=str)
args = parser.parse_args()
if args['username'] is None or args['password'] is None or args['name'] is None or args['email'] is None:
return jsonify({'required_fields': ['username', 'password', 'name', 'email']})
teste_cliente = User(args['name'], args['email'], args['username'])
teste_cliente.hash_password(args['password'])
db.session.add(teste_cliente)
db.session.commit()
return {'user': teste_cliente.id}
@staticmethod
@auth.login_required
def delete():
parser.add_argument('username', type=str)
args = parser.parse_args()
print(args)
if args['username'] == g.user.username:
db.session.delete(g.user)
db.session.commit()
else:
return jsonify({'operation_status': 'FAILURE', 'reason': 'Confirmation failure'})
return jsonify({'operation_status': 'SUCCESS'}) | {
"repo_name": "processos-2015-1/api",
"path": "app/resources/user_resource.py",
"copies": "1",
"size": "2417",
"license": "mit",
"hash": -2329411449736494000,
"line_mean": 30,
"line_max": 113,
"alpha_frac": 0.5891601158,
"autogenerated": false,
"ratio": 3.8983870967741936,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4987547212574194,
"avg_score": null,
"num_lines": null
} |
__author__ = 'alisonbnt'
import os
import urllib
import ConfigParser
def setup():
print('-- GCM REPOSITORY SETUP --')
print('Checking setup')
already_installed_hook = False
git_hook_path = '.git/hooks/commit-msg'
cfg_file_path = '.git/hooks/gcm.cfg'
if os.path.isfile(git_hook_path):
already_installed_hook = True
already_installed_config = False
if os.path.isfile(cfg_file_path):
already_installed_config = True
if already_installed_hook and already_installed_config:
print('Hook and Config files already exists.')
rerun = yes_no_dialog('Would you like to replace then?')
if not rerun:
print('Quitting...')
exit()
else:
print('Removing old files... '),
os.remove(git_hook_path)
os.remove(cfg_file_path)
already_installed_config = False
already_installed_hook = False
print('DONE')
print('Running script... ')
print('Current directory')
print(os.getcwd())
print('Make sure this script is running at the repository root')
running_root_dir = yes_no_dialog('Running in root dir?')
if running_root_dir:
print('Running setup')
if not already_installed_hook:
print('Downloading hook script... '),
testfile = urllib.URLopener()
testfile.retrieve(
"https://raw.githubusercontent.com/alisonbnt/gcm-commit-msg-hook/master/commit-msg",
".git/hooks/commit-msg"
)
print('DONE')
print('Hook retrieved successfully - Please give execution permission for downloaded hook \'commit-msg\'')
else:
print('Git hook already downloaded.. skipping')
if not already_installed_config:
correct_config = False
config = ConfigParser.RawConfigParser()
config.add_section('GCM')
owner = None
repository = None
print('Creating config file')
while not correct_config:
print('Repository access details')
owner = raw_input('Enter the repository owner: ')
repository = raw_input('Enter the repository name: ')
print('')
print('Please verify the given data')
print('Repository owner: ' + owner)
print('Repository name: ' + repository)
correct_config = yes_no_dialog('Is this correct?')
config.set('GCM', 'repo', repository)
config.set('GCM', 'owner', owner)
with open(cfg_file_path, 'wb') as configfile:
print('Writing data to file... '),
config.write(configfile)
print('DONE')
else:
print('Config file already set... Skipping')
print('')
print('Setup complete')
print('Remember to give execution rights for downloaded hook (Use the command below)')
print('chmod +x .git/hooks/commit-msg')
else:
print('Quitting...')
exit()
def yes_no_dialog(prompt):
answer = raw_input(prompt + ' (Y/n) ')
if answer.lower() == "n":
return False
elif answer is True and answer.lower() != "y":
print('Invalid option - quitting')
exit()
return True
if __name__ == '__main__':
setup()
| {
"repo_name": "alisonbnt/imagequiz",
"path": "gcm.py",
"copies": "2",
"size": "3420",
"license": "mit",
"hash": 1061123586989678100,
"line_mean": 30.9626168224,
"line_max": 118,
"alpha_frac": 0.5631578947,
"autogenerated": false,
"ratio": 4.430051813471502,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001334493282178835,
"num_lines": 107
} |
__author__ = 'alisonbnt'
import requests
import src.resstatus as _status
from src.appliances.statusupdater import StatusUpdater
from src.dao.appliancedao import ApplianceDAO
from src.dao.servicedao import ServiceDAO
from src.dao.paramdao import ParamDAO
def call_service(resource, appliance_id, service_id, form, method="get"):
servicedao = ServiceDAO(resource.get_dbc())
if service_id.isdigit():
service = servicedao.get(service_id, "appliance_id = " + str(appliance_id))
else:
services = servicedao.select("appliance_id = ? AND service_trigger = ?", (appliance_id, service_id))
if len(services) > 0:
service = services[0]
else:
service = None
if service is None:
resource.set_status(_status.STATUS_APPLIANCE_NOT_FOUND)
return resource.end()
else:
paramdao = ParamDAO(resource.get_dbc())
params = paramdao.select("service_id = ?", (service.id,))
all_params_with_values = []
for param in params:
p_value = form[param.name]
if p_value is not None:
all_params_with_values.append(param.name + '=' + p_value)
if len(all_params_with_values) > 0:
param_string = '&'.join(all_params_with_values)
param_string = '?' + param_string
else:
param_string = ''
appliancedao = ApplianceDAO(resource.get_dbc())
appliance = appliancedao.get(appliance_id)
address = 'http://' + appliance.address + '/services/' + service.name + param_string
try:
if method == 'get':
r = requests.get(address)
elif method == 'post':
r = requests.get(address)
if r.status_code == '404':
resource.set_status(_status.STATUS_APPLIANCE_UNREACHABLE)
elif r.status_code:
print(r.text)
# Update status
updater = StatusUpdater(resource.get_dbc())
appjson = r.json()
fullappliance = updater.updateStatus(appliance, appjson)
resource.set_status(_status.STATUS_OK)
resource.add_content('appliance', fullappliance.to_array())
except requests.ConnectionError:
resource.set_status(_status.STATUS_APPLIANCE_UNREACHABLE)
resource.get_dbc().rollback()
return resource.end()
| {
"repo_name": "souzabrizolara/py-home-shell",
"path": "src/lib/service_caller.py",
"copies": "3",
"size": "2430",
"license": "apache-2.0",
"hash": -4079729213727053300,
"line_mean": 34.2173913043,
"line_max": 108,
"alpha_frac": 0.5917695473,
"autogenerated": false,
"ratio": 3.9384116693679094,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00046132988410739744,
"num_lines": 69
} |
__author__ = 'allan'
from model.contacts import Contacts
from random import randrange
def test_modify_contact_name(app):
if app.contacts.count() == 0:
app.contacts.create(Contacts(lastname="Test"))
old_contacts = app.contacts.get_contact_list()
index = randrange(len(old_contacts))
contact = Contacts(firstname = "Sam", lastname="Ford")
contact.id = old_contacts[index].id
app.contacts.modify_contact_by_index(index, contact)
assert len(old_contacts) == app.contacts.count()
new_contacts = app.contacts.get_contact_list()
old_contacts[index] = contact
assert sorted(old_contacts, key=Contacts.id_or_max) == sorted(new_contacts, key=Contacts.id_or_max)
#def test_modify_first_contact_email(app):
# old_contacts = app.contacts.get_contact_list()
# if app.contacts.count() == 0:
# app.contacts.create(Contacts(firstname="Test"))
# app.contacts.modify_first_contact(Contacts(email="Sam@gmail.com"))
# new_contacts = app.contacts.get_contact_list()
# assert len(old_contacts) == len(new_contacts)
| {
"repo_name": "Latypov/Py_start",
"path": "test/test_modify_contact.py",
"copies": "1",
"size": "1067",
"license": "apache-2.0",
"hash": 1414970927399914500,
"line_mean": 38.5185185185,
"line_max": 103,
"alpha_frac": 0.6963448922,
"autogenerated": false,
"ratio": 3.213855421686747,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4410200313886747,
"avg_score": null,
"num_lines": null
} |
__author__ = 'allan'
from model.contacts import Contacts
class ContactHelper:
def __init__(self, app):
self.app = app
def add_new_contact(self):
wd = self.app.wd
wd.find_element_by_link_text("add new").click()
def create(self, contacts):
wd = self.app.wd
self.add_new_contact()
self.fill_contact_form(contacts)
#submit new contact
wd.find_element_by_name("submit").click()
self.return_to_home_page()
self.contact_cache = None
def select_first_contact(self):
wd = self.app.wd
wd.find_element_by_name("selected[]").click()
def select_contact_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
def delete_first_contact(self):
self.delete_contact_by_index(0)
def delete_contact_by_index(self, index):
wd = self.app.wd
self.return_to_home_page()
self.select_contact_by_index(index)
#delete selection
wd.find_element_by_xpath("//input[@value='Delete']").click()
wd.switch_to_alert().accept()
self.return_to_home_page()
self.contact_cache = None
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def fill_contact_form(self, contacts):
wd = self.app.wd
self.change_field_value("firstname", contacts.firstname)
self.change_field_value("lastname", contacts.lastname)
self.change_field_value("mobile", contacts.mobile)
self.change_field_value("email", contacts.email)
def modify_first_contact(self, new_contact_data):
self.modify_contact_by_index(0)
def modify_contact_by_index(self, index, new_contact_data):
wd = self.app.wd
self.return_to_home_page()
self.select_contact_by_index(index)
#open modification form
wd.find_element_by_xpath("//tbody/tr/td[8]").click()
self.fill_contact_form(new_contact_data)
#update contact
wd.find_element_by_name("update").click()
self.return_to_home_page()
self.contact_cache = None
def return_to_home_page(self):
wd = self.app.wd
if not (wd.find_elements_by_id("maintable")):
wd.find_element_by_link_text("home").click()
def count(self):
wd = self.app.wd
self.return_to_home_page()
return len(wd.find_elements_by_name("selected[]"))
contact_cache = None
def get_contact_list(self):
if self.contact_cache is None:
wd = self.app.wd
self.return_to_home_page()
self.contact_cache = []
for row in wd.find_elements_by_name("entry"):
cells = row.find_elements_by_tag_name("td")
firstname = cells[2].text
lastname = cells[1].text
id = cells[0].find_element_by_tag_name("input").get_attribute("value")
self.contact_cache.append(Contacts(firstname=firstname, lastname=lastname, id=id))
return list(self.contact_cache)
| {
"repo_name": "Latypov/Py_start",
"path": "fixture/contacts.py",
"copies": "1",
"size": "3290",
"license": "apache-2.0",
"hash": -7315586617608673000,
"line_mean": 33.2708333333,
"line_max": 98,
"alpha_frac": 0.5981762918,
"autogenerated": false,
"ratio": 3.5224839400428265,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.961187441133152,
"avg_score": 0.001757164102261085,
"num_lines": 96
} |
__author__ = 'allan'
from model.group import Group
class GroupHelper:
def __init__(self, app):
self.app = app
def open_groups_page(self):
wd = self.app.wd
if not(wd.current_url.endswith("/group.php") and len(wd.find_elements_by_name("new")) > 0):
wd.find_element_by_link_text("groups").click()
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def fill_group_form(self, group):
wd = self.app.wd
self.change_field_value("group_name", group.name)
self.change_field_value("group_header", group.header)
self.change_field_value("group_footer", group.footer)
def create(self, group):
wd = self.app.wd
self.open_groups_page()
# init group creation
wd.find_element_by_name("new").click()
self.fill_group_form(group)
#submit group creation
wd.find_element_by_name("submit").click()
self.return_to_groups_page()
self.group_cache = None
def select_first_group(self):
wd = self.app.wd
wd.find_element_by_name("selected[]").click()
def select_group_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
def delete_first_group(self):
self.delete_group_by_index(0)
def delete_group_by_index(self, index):
wd = self.app.wd
self.open_groups_page()
self.select_group_by_index(index)
#submit deletion
wd.find_element_by_name("delete").click()
self.return_to_groups_page()
self.group_cache = None
def modify_first_group(self, new_group_data):
self.modify_group_by_index(0)
def modify_group_by_index(self, index, new_group_data):
wd = self.app.wd
self.open_groups_page()
self.select_group_by_index(index)
#open modification form
wd.find_element_by_name("edit").click()
#fill group form
self.fill_group_form(new_group_data)
#submit modification
wd.find_element_by_name("update").click()
self.return_to_groups_page()
self.group_cache = None
def return_to_groups_page(self):
wd = self.app.wd
wd.find_element_by_link_text("group page").click()
def count(self):
wd = self.app.wd
self.open_groups_page()
return len(wd.find_elements_by_name("selected[]"))
group_cache = None
def get_group_list(self):
if self.group_cache is None:
wd = self.app.wd
self.open_groups_page()
self.group_cache = []
for element in wd.find_elements_by_css_selector("span.group"):
text = element.text
id = element.find_element_by_name("selected[]").get_attribute("value")
self.group_cache.append(Group(name=text, id=id))
return list(self.group_cache) | {
"repo_name": "Latypov/Py_start",
"path": "fixture/group.py",
"copies": "1",
"size": "3127",
"license": "apache-2.0",
"hash": -4617765198687584000,
"line_mean": 31.5833333333,
"line_max": 99,
"alpha_frac": 0.592260953,
"autogenerated": false,
"ratio": 3.478309232480534,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9553193808365665,
"avg_score": 0.003475275422973615,
"num_lines": 96
} |
__author__ = 'allan'
class SessionHelper:
def __init__(self, app):
self.app = app
def login(self, username, password):
wd = self.app.wd
self.app.open_home_page()
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys(username)
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys(password)
wd.find_element_by_css_selector("input[type=\"submit\"]").click()
def logout(self):
wd = self.app.wd
wd.find_element_by_link_text("Logout").click()
def is_logged_in(self):
wd = self.app.wd
return len(wd.find_elements_by_link_text("Logout")) > 0
def is_logged_in_as(self, username):
wd = self.app.wd
return wd.find_element_by_xpath("//div/div[1]/form/b").text == "("+username+")"
def ensure_logout(self):
wd = self.app.wd
if self.is_logged_in():
self.logout()
def ensure_login(self, username, password):
wd = self.app.wd
if self.is_logged_in():
if self.is_logged_in_as(username):
return
else:
self.logout()
self.login(username, password)
| {
"repo_name": "Latypov/Py_start",
"path": "fixture/session.py",
"copies": "1",
"size": "1339",
"license": "apache-2.0",
"hash": 5086349193003641000,
"line_mean": 27.4893617021,
"line_max": 87,
"alpha_frac": 0.5593726662,
"autogenerated": false,
"ratio": 3.3813131313131315,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4440685797513132,
"avg_score": null,
"num_lines": null
} |
_absent = object()
_not_found = object()
_BITS = 5
_SIZE = 2 ** _BITS
_MASK = _SIZE - 1
class _TrieNode(object):
kind = None
def iteritems(self):
"""
Iterate over all of the items in this node and all sub-nodes.
Yields (key, value) pairs.
"""
raise NotImplementedError(self.iteritems)
def find(self, shift, keyHash, key):
"""
Return the value for C{key}.
If C{key} is not found in this node or any sub-nodes, return
C{_not_found}.
@param shift: ???
@param keyHash: a hash of C{key}, using the same hash function that
it was stored with
@param key: the key to look for
"""
raise NotImplementedError(self.find)
def assoc(self, shift, keyHash, key, val):
"""
Return a new node that has C{key} mapped to C{val}.
@param shift: ???
@param keyHash: a hash of C{key}, using the same hash function that
it was stored with
@param key: the key to add
@param val: the value to set C{key} to
"""
raise NotImplementedError(self.assoc)
def without(self, shift, keyHash, key):
"""
Return a new node that does not have C{key}.
@param shift: ???
@param keyHash: a hash of C{key}, using the same hash function that
it was stored with
@param key: the key to remove
"""
raise NotImplementedError(self.without)
class _BitmapIndexedNode(_TrieNode):
kind = 'BitmapIndexedNode'
def __init__(self, bitmap, array):
self.bitmap = bitmap
self.array = array
def iteritems(self):
for i in range(0, len(self.array), 2):
if self.array[i] is _absent:
for item in self.array[i + 1].iteritems():
yield item
else:
yield (self.array[i], self.array[i + 1])
def find(self, shift, keyHash, key):
bit = bitpos(keyHash, shift)
if (self.bitmap & bit) == 0:
return _not_found
idx = index(self.bitmap, bit)
k = self.array[2 * idx]
v = self.array[2 * idx + 1]
if k is _absent:
return v.find(shift + _BITS, keyHash, key)
if k == key:
return v
else:
return _not_found
def assoc(self, shift, keyHash, key, val):
"""
Create new nodes as needed to include a new key/val pair.
"""
bit = bitpos(keyHash, shift)
idx = index(self.bitmap, bit)
#look up hash in the current node
if(self.bitmap & bit) != 0:
#this spot's already occupied.
someKey = self.array[2 * idx]
someVal = self.array[2 * idx + 1]
if someKey is _absent:
#value slot is a subnode
n, addedLeaf = someVal.assoc(shift + _BITS, keyHash, key, val)
if n is someVal:
return self, False
else:
newArray = self.array[:]
newArray[2 * idx + 1] = n
return _BitmapIndexedNode(self.bitmap, newArray), addedLeaf
if key == someKey:
if val == someVal:
return self, False
else:
newArray = self.array[:]
newArray[2 * idx + 1] = val
return _BitmapIndexedNode(self.bitmap, newArray), False
else:
#there was a hash collision in the local _BITS bits of the bitmap
newArray = self.array[:]
newArray[2 * idx] = _absent
newArray[2 * idx + 1] = createNode(shift + _BITS, someKey,
someVal, keyHash, key, val)
newNode = _BitmapIndexedNode(self.bitmap, newArray)
return newNode, True
else:
#spot for this hash is open
n = bitcount(self.bitmap)
if n >= (_SIZE / 2):
# this node is full, convert to ArrayNode
nodes = [_absent] * _SIZE
jdx = mask(keyHash, shift)
nodes[jdx], addedLeaf = EMPTY_BITMAP_INDEXED_NODE.assoc(
shift + _BITS, keyHash, key, val)
j = 0
for i in range(_SIZE):
if ((self.bitmap >> i) & 1) != 0:
if self.array[j] is _absent:
nodes[i] = self.array[j + 1]
else:
nodes[i], al = EMPTY_BITMAP_INDEXED_NODE.assoc(
shift + _BITS, hash(self.array[j]),
self.array[j], self.array[j + 1])
addedLeaf = True
j += 2
return _ArrayNode(n + 1, nodes), addedLeaf
else:
newArray = [_absent] * (2 * (n + 1))
newArray[:2 * idx] = self.array[:2 * idx]
newArray[2 * idx] = key
newArray[2 * idx + 1] = val
newArray[2 * (idx + 1):2 * (n + 1)] = self.array[2 * idx:2 * n]
return _BitmapIndexedNode(self.bitmap | bit, newArray), True
def without(self, shift, keyHash, key):
bit = bitpos(keyHash, shift)
if (self.bitmap & bit) == 0:
return self
idx = index(self.bitmap, bit)
someKey = self.array[2 * idx]
someVal = self.array[(2 * idx) + 1]
if someKey is _absent:
# delegate to subnode
n = someVal.without(shift + _BITS, keyHash, key)
if n is someVal:
return self
if n is not _absent:
newArray = self.array[:]
newArray[2 * idx + 1] = n
return _BitmapIndexedNode(self.bitmap, newArray)
if self.bitmap == bit:
return _absent
newArray = self.array[:]
del newArray[2 * idx:2 * idx + 2]
return _BitmapIndexedNode(self.bitmap ^ bit, newArray)
if someKey == key:
if len(self.array) == 2:
#last pair in this node
return _absent
newArray = self.array[:]
del newArray[2 * idx:2 * idx + 2]
return _BitmapIndexedNode(self.bitmap ^ bit, newArray)
else:
return self
EMPTY_BITMAP_INDEXED_NODE = _BitmapIndexedNode(0, [])
class _ArrayNode(_TrieNode):
kind = "ArrayNode"
def __init__(self, count, array):
self.count = count
self.array = array
def iteritems(self):
for node in self.array:
if node is not _absent:
for item in node.iteritems():
yield item
def find(self, shift, keyHash, key):
idx = mask(keyHash, shift)
node = self.array[idx]
if node is _absent:
return _not_found
else:
return node.find(shift + _BITS, keyHash, key)
def assoc(self, shift, keyHash, key, val):
idx = mask(keyHash, shift)
node = self.array[idx]
if node is _absent:
newArray = self.array[:]
newArray[idx], _ = EMPTY_BITMAP_INDEXED_NODE.assoc(shift + _BITS, keyHash, key, val)
return _ArrayNode(self.count + 1, newArray), True
else:
n, addedLeaf = node.assoc(shift + _BITS, keyHash, key, val)
if n is node:
return self, False
newArray = self.array[:]
newArray[idx] = n
return _ArrayNode(self.count, newArray), addedLeaf
def without(self, shift, keyHash, key):
idx = mask(keyHash, shift)
node = self.array[idx]
if node is _absent:
return self
n = node.without(shift + _BITS, keyHash, key)
if n is node:
return self
newArray = self.array[:]
newArray[idx] = n
if n is _absent:
# XXX: What does 8 mean?
if self.count <= 8:
return self.pack(idx)
return _ArrayNode(self.count - 1, newArray)
else:
return _ArrayNode(self.count, newArray)
def pack(self, idx):
newArray = [_absent] * (2 * (self.count - 1))
j = 1
bitmap = 0
for i in range(len(self.array)):
if i != idx and self.array[i] is not _absent:
newArray[j] = self.array[i]
bitmap |= 1 << i
j += 2
return _BitmapIndexedNode(bitmap, newArray)
class _HashCollisionNode(_TrieNode):
kind = "HashCollisionNode"
def __init__(self, hash, count, array):
self.hash = hash
self.count = count
self.array = array
def iteritems(self):
for i in range(0, len(self.array), 2):
yield (self.array[i], self.array[i + 1])
def find(self, shift, keyHash, key):
try:
idx = 2 * self.array[::2].index(key)
except ValueError:
return _not_found
return self.array[idx + 1]
def assoc(self, shift, keyHash, key, val):
if keyHash == self.hash:
try:
idx = 2 * self.array[::2].index(key)
except ValueError:
newArray = self.array[:]
newArray.extend([key, val])
return _HashCollisionNode(self.hash, self.count + 1, newArray), True
else:
if self.array[idx + 1] == val:
return self, False
newArray = self.array[:]
newArray[idx + 1] = val
return _HashCollisionNode(self.hash, self.count, newArray), False
else:
# nest it in a bitmap node
return _BitmapIndexedNode(bitpos(self.hash, shift), [_absent, self]).assoc(shift, keyHash, key, val)
def without(self, shift, keyHash, key):
try:
idx = 2 * self.array[::2].index(key)
except ValueError:
return self
else:
if self.count == 1:
return _absent
else:
newArray = self.array[:]
del newArray[idx:idx + 2]
return _HashCollisionNode(self.hash, self.count - 1, newArray)
## implementation crap
def createNode(shift, oldKey, oldVal, newHash, newKey, newVal):
oldHash = hash(oldKey)
if oldHash == newHash:
return _HashCollisionNode(oldHash, 2, [oldKey, oldVal, newKey, newVal])
else:
# something collided in a node's _BITS-bit window that isn't a real hash collision.
return EMPTY_BITMAP_INDEXED_NODE.assoc(shift, oldHash, oldKey, oldVal
)[0].assoc(shift, newHash, newKey, newVal)[0]
def mask(h, sh):
return (h >> sh) & _MASK
def bitpos(h, sh):
return 1 << mask(h, sh)
def index(bitmap, bit):
return bitcount(bitmap & (bit - 1))
def bitcount(i):
"""
How many bits are in a binary representation of 'i'?
"""
count = 0
while i:
i &= i - 1
count += 1
return count
| {
"repo_name": "jml/perfidy",
"path": "perfidy/_hamt.py",
"copies": "1",
"size": "11368",
"license": "mit",
"hash": 5228302401206069000,
"line_mean": 30.4903047091,
"line_max": 112,
"alpha_frac": 0.505981703,
"autogenerated": false,
"ratio": 3.974825174825175,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4980806877825175,
"avg_score": null,
"num_lines": null
} |
__author__ = 'allentran'
import json
import os
import multiprocessing
import numpy as np
def _update_min_dict(candidate_node, depth, min_set):
if candidate_node in min_set:
if min_set[candidate_node] <= depth:
return
else:
min_set[candidate_node] = depth
else:
min_set[candidate_node] = depth
def _get_connected_nodes((node_idx, adjancency_list, max_degree), current_depth=1):
connected_dict = {}
single_degree_nodes = [other_idx for other_idx in adjancency_list[node_idx] if adjancency_list[node_idx][other_idx] == 1]
for other_idx in single_degree_nodes:
_update_min_dict(other_idx, current_depth, connected_dict)
if current_depth <= max_degree:
for other_node_idx in single_degree_nodes:
if other_node_idx in adjancency_list:
new_connected_nodes = _get_connected_nodes((other_node_idx, adjancency_list, max_degree), current_depth + 1)
if new_connected_nodes is not None:
for other_idx, depth in new_connected_nodes.iteritems():
_update_min_dict(other_idx, depth, connected_dict)
return connected_dict
class Graph(object):
def __init__(self, graph_path):
self.from_nodes_mapping = {}
self.to_nodes_mapping = {}
self.edge_dict = {}
self._load_graph(graph_path=graph_path)
self._create_mappings()
def save_mappings(self, output_dir):
with open(os.path.join(output_dir, 'from.map'), 'w') as from_map_file:
json.dump(self.from_nodes_mapping, from_map_file)
with open(os.path.join(output_dir, 'to.map'), 'w') as to_map_file:
json.dump(self.to_nodes_mapping, to_map_file)
def get_mappings(self):
return self.from_nodes_mapping, self.to_nodes_mapping
def _create_mappings(self):
for key in self.edge_dict:
self.from_nodes_mapping[key] = len(self.from_nodes_mapping)
for to_nodes in self.edge_dict.values():
for to_node in to_nodes:
if to_node not in self.to_nodes_mapping:
self.to_nodes_mapping[to_node] = len(self.to_nodes_mapping)
def _add_edge(self, from_idx, to_idx, degree=1):
if from_idx not in self.edge_dict:
self.edge_dict[from_idx] = dict()
if to_idx in self.edge_dict[from_idx]:
if degree >= self.edge_dict[from_idx][to_idx]:
return
self.edge_dict[from_idx][to_idx] = degree
def _load_graph(self, graph_path):
with open(graph_path, 'r') as graph_file:
for line in graph_file:
parsed_line = line.strip().split(' ')
if len(parsed_line) in [2, 3]:
from_idx = int(parsed_line[0])
to_idx = int(parsed_line[1])
if len(parsed_line) == 3:
degree = int(parsed_line[2])
self._add_edge(from_idx, to_idx, degree)
else:
self._add_edge(from_idx, to_idx)
def extend_graph(self, max_degree, penalty=2):
def _zip_args_for_parallel_fn():
for key in self.from_nodes_mapping.keys():
yield (key, self.edge_dict, max_degree)
from_to_idxs = []
degrees = []
pool = multiprocessing.Pool(multiprocessing.cpu_count())
connected_nodes_list = pool.map(_get_connected_nodes, _zip_args_for_parallel_fn())
pool.close()
pool.join()
for node_idx, connected_nodes in zip(self.from_nodes_mapping.keys(), connected_nodes_list):
for other_node, degree in connected_nodes.iteritems():
from_to_idxs.append([self.from_nodes_mapping[node_idx], self.to_nodes_mapping[other_node]])
degrees.append(float(1)/(degree ** penalty))
return np.array(from_to_idxs).astype(np.int32), np.array(degrees).astype(np.float32)
| {
"repo_name": "allentran/graph2vec",
"path": "graph2vec/parser.py",
"copies": "1",
"size": "3985",
"license": "apache-2.0",
"hash": 8196103958095603000,
"line_mean": 37.3173076923,
"line_max": 125,
"alpha_frac": 0.5849435383,
"autogenerated": false,
"ratio": 3.5172109443954103,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9595092725180799,
"avg_score": 0.0014123515029222226,
"num_lines": 104
} |
__author__ = 'allentran'
import json
import os
import re
import datetime
import unidecode
from spacy.en import English
import requests
import pandas as pd
import numpy as np
import allen_utils
logger = allen_utils.get_logger(__name__)
class Interval(object):
def __init__(self, start, end):
assert isinstance(start, datetime.date) and isinstance(end, datetime.date)
self.start = start
self.end = end
def contains(self, new_date):
assert isinstance(new_date, datetime.date)
return (new_date >= self.start) and (new_date <= self.end)
fed_regimes = {
0: Interval(datetime.date(1951, 4, 2), datetime.date(1970, 1, 31)),
1: Interval(datetime.date(1970, 2, 1), datetime.date(1978, 3, 7)),
2: Interval(datetime.date(1978, 3, 8), datetime.date(1979, 8, 6)),
3: Interval(datetime.date(1979, 8, 7), datetime.date(1987, 8, 11)),
4: Interval(datetime.date(1987, 8, 12), datetime.date(2006, 1, 31)),
5: Interval(datetime.date(2006, 2, 1), datetime.date(2020, 1, 31)),
}
def find_regime(date):
for regime, interval in fed_regimes.iteritems():
if interval.contains(date):
return regime
raise ValueError("Could not find regime for date, %s", date)
class PairedDocAndRates(object):
def __init__(self, date, sentences, is_minutes):
self.date = date
self.sentences = sentences
self.is_minutes = is_minutes
self.rates = None
self.regime = find_regime(date)
def match_rates(self, rates_df, days = [30, 90, 180]):
def get_closest_rate(days_to_add):
future_date = self.date + datetime.timedelta(days=days_to_add)
diff = abs(future_date - rates_df['date'])
if (last_available_date - future_date).total_seconds() >= 0:
closest_index = diff.argmin()
return float(rates_df.iloc[closest_index]['value'])
else:
return None
future_rates = {}
last_available_date = rates_df['date'].iloc[-1]
current_rate = get_closest_rate(0)
if current_rate:
future_rates['0'] = current_rate
for add_days in days:
future_rate = get_closest_rate(add_days)
if future_rate:
future_rates[str(add_days)] = future_rate
self.rates = future_rates
def to_dict(self):
return dict(
date = self.date.strftime('%Y-%m-%d'),
sentences = self.sentences,
rates = self.rates,
is_minutes = self.is_minutes,
regime = self.regime
)
class Vocab(object):
def __init__(self):
self.vocab = {}
self.special_words = [
'$CARDINAL$',
'$DATE$',
'$UNKNOWN$'
]
def update_count(self, word):
if word not in self.vocab:
self.vocab[word] = 1
else:
self.vocab[word] += 1
def to_dict(self, min_count=5):
position_dict = {word: idx for idx, word in enumerate(self.special_words)}
counter = len(self.special_words)
for word, word_count in self.vocab.iteritems():
if word_count >= min_count:
position_dict[word] = counter
counter += 1
return position_dict
class DataTransformer(object):
def __init__(self, data_dir, min_sentence_length):
self.url = 'https://api.stlouisfed.org/fred/series/observations'
self.data_dir = data_dir
self.min_sentence_length = min_sentence_length
self.replace_entities = {
'DATE': '$DATE$',
'CARDINAL': '$CARDINAL$'
}
self.nlp = English()
# custom token replacement
self.regexes = [
(re.compile(r'\d{4}'), '$DATE$'),
(re.compile(r'\d+[\.,]*\d+'), '$CARDINAL$')
]
self.vocab = Vocab()
self.word_positions = None
self.rates = None
self.docs = None
def get_rates(self, api_key):
params = dict(
api_key=api_key,
file_type='json',
series_id='FEDFUNDS'
)
r = requests.get(self.url, params=params)
if r.status_code == 200:
self.rates = pd.DataFrame(r.json()['observations'])
self.rates['date'] = self.rates['date'].apply(lambda s: datetime.datetime.strptime(s, '%Y-%m-%d').date())
self.rates.sort('date')
def build_vocab(self):
def process_doc(doc_path):
with open(doc_path, 'r') as f:
text = unidecode.unidecode(unicode(f.read().decode('iso-8859-1')))
text = ' '.join(text.split()).strip()
if len(text) > 0:
doc = self.nlp(unicode(text.lower()))
doc_words = set()
for sent in doc.sents:
if len(sent) > self.min_sentence_length:
for token in doc:
if token.text not in doc_words:
self.vocab.update_count(token.text)
doc_words.add(token.text)
file_re = re.compile(r'\d{8}')
for root, dirs, filenames in os.walk(self.data_dir):
for filename in filenames:
if file_re.search(filename):
filepath = os.path.join(root, filename)
process_doc(filepath)
logger.info("Built vocab from: %s", filepath)
self.word_positions = self.vocab.to_dict()
def strip_text(self, text):
doc = self.nlp(unicode(text).lower())
# spacy entity replacement
ents_dict = {ent.text: self.replace_entities[ent.label_] for ent in doc.ents if ent.label_ in self.replace_entities.keys()}
for ent in ents_dict:
text = text.replace(ent, ents_dict[ent])
return text
def get_docs(self, min_sentence_length=8):
def parse_doc(doc_path):
with open(doc_path, 'r') as f:
text = unidecode.unidecode(unicode(f.read().decode('iso-8859-1')))
text = ' '.join(text.split()).strip()
if len(text) > 0:
date = datetime.datetime.strptime(date_re.search(doc_path).group(0), '%Y%m%d').date()
stripped_text = self.strip_text(text)
doc = self.nlp(unicode(stripped_text))
sentences = list(doc.sents)
doc_sents = []
for sent in sentences[1:]:
if len(sent) > min_sentence_length:
sentence_as_idxes = []
for token in sent:
skip = False
for regex, replacement_token in self.regexes:
match = regex.match(token.text)
if match:
sentence_as_idxes.append(self.word_positions[replacement_token])
skip = True
if not skip:
try:
sentence_as_idxes.append(self.word_positions[token.text])
except KeyError:
sentence_as_idxes.append(self.word_positions['$UNKNOWN$'])
doc_sents.append(sentence_as_idxes)
paired_doc = PairedDocAndRates(date, doc_sents, doc_path.find('minutes') > -1)
paired_doc.match_rates(self.rates)
return paired_doc
date_re = re.compile(r'\d{8}')
file_re = re.compile(r'\d{8}')
docs = []
for root, dirs, filenames in os.walk(self.data_dir):
for filename in filenames:
if file_re.search(filename):
filepath = os.path.join(root, filename)
parsed_doc = parse_doc(filepath)
if parsed_doc:
logger.info("Parsed %s", filepath)
docs.append(parsed_doc)
self.docs = docs
def save_output(self):
with open(os.path.join(self.data_dir, 'paired_data.json'), 'w') as f:
json.dump([doc.to_dict() for doc in self.docs], f, indent=2, sort_keys=True)
with open(os.path.join(self.data_dir, 'dictionary.json'), 'w') as f:
json.dump(self.vocab.to_dict(), f, indent=2, sort_keys=True)
if __name__ == "__main__":
data_transformer = DataTransformer('data', min_sentence_length=8)
data_transformer.build_vocab()
data_transformer.get_rates('51c09c6b8aa464671aa8ac96c76a8416')
data_transformer.get_docs()
data_transformer.save_output()
| {
"repo_name": "allentran/fed-rates-bot",
"path": "fed_bot/model/data.py",
"copies": "1",
"size": "8747",
"license": "mit",
"hash": 5731184400335391000,
"line_mean": 32.3854961832,
"line_max": 131,
"alpha_frac": 0.5311535384,
"autogenerated": false,
"ratio": 3.817983413356613,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.981541542467903,
"avg_score": 0.006744305415516369,
"num_lines": 262
} |
__author__ = 'allentran'
import models
class TestNetwork(object):
def setUp(self):
self.n_minibatch = 32
self.n_assets = 2
self.n_actions = 2 * self.n_assets
self.k_info = 16
self.preprocessed_size = 10
self.lstm_size = 5
self.merge_size = 5
self.q_dense_sizes = [20, 10]
def policy_test(self):
policy_network = models.DeepPolicyNetwork(
self.n_minibatch,
self.n_assets,
self.n_actions,
self.k_info,
self.preprocessed_size,
self.lstm_size,
self.merge_size
)
policy_network.get_noisy_action(1e-2)
def q_test(self):
q_network = models.DeepQNetwork(
self.n_minibatch,
self.n_assets,
self.n_actions,
self.k_info,
self.preprocessed_size,
self.lstm_size,
self.merge_size,
self.q_dense_sizes
)
def gen_fake_states(self):
pass
| {
"repo_name": "allentran/rl-l2t",
"path": "model/tests.py",
"copies": "1",
"size": "1040",
"license": "apache-2.0",
"hash": 6738674117378407000,
"line_mean": 20.2244897959,
"line_max": 50,
"alpha_frac": 0.5134615385,
"autogenerated": false,
"ratio": 3.6363636363636362,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46498251748636366,
"avg_score": null,
"num_lines": null
} |
__author__ = 'allentran'
import numpy as np
import lasagne
import theano
import theano.tensor as TT
from lasagne.regularization import regularize_network_params, l2
class LastTimeStepLayer(lasagne.layers.Layer):
def __init__(self, incoming, batch_size, last_indexes, **kwargs):
super(LastTimeStepLayer, self).__init__(incoming, **kwargs)
self.batch_size = batch_size
self.last_indexes = last_indexes
# from batch x T x k to batch x k
def get_output_for(self, input, **kwargs):
return input[TT.arange(self.batch_size), self.last_indexes]
def get_output_shape_for(self, input_shape):
return input_shape[0], input_shape[2]
def mixture_density_outputs(input, target_size, n_mixtures):
batch_size = input.shape[0]
m_by_n = n_mixtures * target_size
priors = TT.nnet.softmax(input[:, :n_mixtures]) # batch, n_mixtures
means = input[:, n_mixtures:n_mixtures + m_by_n].reshape((batch_size, target_size, n_mixtures)) # batch x target x mixtures
stds = TT.exp(input[:, n_mixtures + m_by_n:]).reshape((batch_size, n_mixtures)) # batch x n_mixtures
return priors, means, stds
# input should be batch x (2 + target_size) * n_mixtures
# target is batch x target_size
def mixture_density_loss(priors, means, stds, targets, target_size, mask=None, eps=1e-4):
kernel_constant = ((2 * np.pi) ** (-0.5 * target_size)) * (1 / (stds ** target_size))
norm_std = - (TT.sqr(targets[:, :, None] - means).sum(axis=1)) / (2 * TT.sqr(stds)) # normed over targets
max_norm_std = norm_std.max(axis=1)
kernel_minus_max = kernel_constant * TT.exp(norm_std - max_norm_std[:, None])
log_e_prob = TT.log((priors * kernel_minus_max).sum(axis=1)) + max_norm_std# summing over mixtures
if mask:
return - mask * log_e_prob
else:
return - log_e_prob
class FedLSTMLasagne(object):
def __init__(
self,
n_words=None,
word_size=None,
n_regimes=None,
regime_size=None,
doc_size=None,
lstm_size=32,
hidden_size=11,
n_mixtures=2,
target_size=3,
init_word_vectors=None,
l2_scale=1e-4,
mixture_density=True,
):
self.inputs_indexes = TT.tensor3(dtype='int32') # batch x sentences x T
self.last_word_in_sentence = TT.imatrix() # batch x sentences
self.last_sentence_in_doc = TT.ivector() # batch
self.targets = TT.matrix(dtype=theano.config.floatX) # batch_size
self.regimes = TT.ivector() # minibatch
self.doc_types = TT.ivector() # minibatch
word_input_layer = lasagne.layers.InputLayer(shape=(None, None, None), input_var=self.inputs_indexes)
regime_input_layer = lasagne.layers.InputLayer(shape=(None, ), input_var=self.regimes)
doc_input_layer = lasagne.layers.InputLayer(shape=(None, ), input_var=self.doc_types)
batch_size, n_sentences, T = word_input_layer.input_var.shape
word_embeddings = lasagne.layers.EmbeddingLayer(word_input_layer, n_words, word_size, W=init_word_vectors)
regime_embeddings = lasagne.layers.EmbeddingLayer(regime_input_layer, n_regimes, regime_size)
doc_embeddings = lasagne.layers.EmbeddingLayer(doc_input_layer, 2, doc_size)
word_embeddings = lasagne.layers.ReshapeLayer(word_embeddings, (-1, word_size))
preprocessed_layer = lasagne.layers.DenseLayer(word_embeddings, lstm_size)
preprocessed_dropout = lasagne.layers.DropoutLayer(preprocessed_layer, p=0.5)
reshaped_preprocessed_layer = lasagne.layers.ReshapeLayer(preprocessed_dropout, shape=(batch_size * n_sentences, T, lstm_size))
forget_gate = lasagne.layers.Gate(b=lasagne.init.Constant(5.0))
lstm_layer = lasagne.layers.LSTMLayer(reshaped_preprocessed_layer, lstm_size, forgetgate=forget_gate)
forget_gate = lasagne.layers.Gate(b=lasagne.init.Constant(5.0))
lstm_layer2 = lasagne.layers.LSTMLayer(lstm_layer, lstm_size, forgetgate=forget_gate)
sentence_summary = LastTimeStepLayer(lstm_layer2, batch_size * n_sentences, TT.reshape(self.last_word_in_sentence, (batch_size * n_sentences,)))
sentence_summary = lasagne.layers.ReshapeLayer(sentence_summary, shape=(batch_size, n_sentences, lstm_size))
forget_gate = lasagne.layers.Gate(b=lasagne.init.Constant(5.0))
doc_summary = lasagne.layers.LSTMLayer(sentence_summary, lstm_size, forgetgate=forget_gate)
last_doc_summary = LastTimeStepLayer(doc_summary, batch_size, self.last_sentence_in_doc) # batch x lstm_size
merge_layer = lasagne.layers.ConcatLayer([last_doc_summary, regime_embeddings, doc_embeddings])
merge_dropout = lasagne.layers.DropoutLayer(merge_layer, p=0.5)
preoutput_layer = lasagne.layers.DenseLayer(merge_dropout, hidden_size)
if mixture_density:
output_layer = lasagne.layers.DenseLayer(preoutput_layer, (2 + target_size) * n_mixtures, nonlinearity=None)
l2_penalty = regularize_network_params(output_layer, l2)
priors, means, stds = mixture_density_outputs(lasagne.layers.get_output(output_layer, deterministic=False), target_size, n_mixtures)
priors_det, means_det, stds_det = mixture_density_outputs(lasagne.layers.get_output(output_layer, deterministic=True), target_size, n_mixtures)
loss = mixture_density_loss(priors, means, stds, self.targets, target_size).mean() + l2_penalty * l2_scale
cost_ex_l2 = mixture_density_loss(priors_det, means_det, stds_det, self.targets, target_size).mean()
self._output = theano.function(
[
self.inputs_indexes,
self.last_word_in_sentence,
self.last_sentence_in_doc,
self.regimes,
self.doc_types,
],
[priors_det, means_det, stds_det],
)
else:
output_layer = lasagne.layers.DenseLayer(preoutput_layer, target_size, nonlinearity=None)
l2_penalty = regularize_network_params(output_layer, l2)
loss = lasagne.objectives.squared_error(lasagne.layers.get_output(output_layer, deterministic=False), self.targets).mean() + l2_penalty * l2_scale
cost_ex_l2 = TT.sqrt(lasagne.objectives.squared_error(lasagne.layers.get_output(output_layer, deterministic=True), self.targets).mean())
self._output = theano.function(
[
self.inputs_indexes,
self.last_word_in_sentence,
self.last_sentence_in_doc,
self.regimes,
self.doc_types,
],
lasagne.layers.get_output(output_layer, deterministic=True),
)
params = lasagne.layers.get_all_params(output_layer, trainable=True)
updates = lasagne.updates.adadelta(loss, params)
self._train = theano.function(
[
self.inputs_indexes,
self.last_word_in_sentence,
self.last_sentence_in_doc,
self.regimes,
self.doc_types,
self.targets
],
cost_ex_l2,
updates=updates
)
self._cost = theano.function(
[
self.inputs_indexes,
self.last_word_in_sentence,
self.last_sentence_in_doc,
self.regimes,
self.doc_types,
self.targets
],
cost_ex_l2,
)
def train(self, targets, sentences, last_word, last_sentence, regimes, doctypes):
return self._train(
sentences,
last_word,
last_sentence,
regimes,
doctypes,
targets
)
def get_cost(self, targets, sentences, last_word, last_sentence, regimes, doctypes):
return self._cost(
sentences,
last_word,
last_sentence,
regimes,
doctypes,
targets
)
def get_output(self, sentences, last_word, last_sentence, regimes, doctypes):
return self._output(
sentences,
last_word,
last_sentence,
regimes,
doctypes,
)
| {
"repo_name": "allentran/fed-rates-bot",
"path": "fed_bot/model/lstm_lasagne.py",
"copies": "1",
"size": "8422",
"license": "mit",
"hash": -1079157643151095800,
"line_mean": 40.6930693069,
"line_max": 158,
"alpha_frac": 0.611137497,
"autogenerated": false,
"ratio": 3.528278173439464,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4639415670439464,
"avg_score": null,
"num_lines": null
} |
__author__ = 'allentran'
import numpy as np
import theano
import theano.tensor as TT
from theano_layers import layers
class FedLSTM(object):
def __init__(
self,
input_size=300,
output_size=3,
hidden_size=None,
lstm_size=None,
truncate=10,
n_mixtures=5,
target_size=3,
l2_penalty=0,
n_regimes=6,
regime_size=5,
doctype_size=5,
vocab_size=10,
word_vectors=None,
):
self.inputs_indexes = TT.tensor3(dtype='int32') # n_words x n_sentences x n_batch
self.regimes = TT.ivector() # minibatch
self.doc_types = TT.ivector() # minibatch
self.unique_inputs = TT.ivector()
self.unique_regimes = TT.ivector()
self.unique_doc_types = TT.ivector()
self.mask = TT.tensor3() # n_words x n_sentences
self.outputs = TT.matrix() # n_batch x n_target_rates
regime_layer = layers.VectorEmbeddings(
n_vectors=n_regimes,
size=regime_size
)
doctype_layer = layers.VectorEmbeddings(
n_vectors=2,
size=doctype_size
)
word_vectors_layer = layers.VectorEmbeddings(
n_vectors=vocab_size,
size=input_size
)
word_vectors_layer.V.set_value(word_vectors)
regime_vectors = regime_layer.V[self.regimes] # n_batch x size
doctype_vectors = doctype_layer.V[self.doc_types] # n_batch x size
inputs = word_vectors_layer.V[self.inputs_indexes] # T x n_sentences x n_batch x size
preprocess_layer = layers.DenseLayer(
inputs,
input_size,
lstm_size,
activation=TT.nnet.relu
)
# T x n_sentences x n_batch x hidden[0]
lstmforward_layer = layers.LSTMStackedLayer(
preprocess_layer.h_outputs,
lstm_size,
n_layers=2,
input_size=lstm_size,
truncate=truncate,
)
# T x n_sentences x n_batch x hidden[1]
# max within a sentence (pick out phrases), then max over sentences
# note first max eliminates first axis, so 2nd max(axis=0) kills 2nd axis
max_pooled_words = (lstmforward_layer.h_outputs * self.mask[:, :, :, None]).max(axis=0).max(axis=0)
# n_batch x hidden[1]
words_and_context = TT.concatenate(
[
max_pooled_words,
regime_vectors,
doctype_vectors
],
axis=1
)
# n_batch x hidden[2] + doctype_size + regime_size
preoutput_layer = layers.DenseLayer(
words_and_context,
lstm_size + doctype_size + regime_size,
hidden_size,
activation=TT.nnet.relu,
feature_axis=1,
normalize_axis=0
)
# n_batch x hidden[3]
output_layer = layers.DenseLayer(
preoutput_layer.h_outputs,
hidden_size,
(2 + target_size) * n_mixtures,
activation=TT.tanh
)
# n_batch x (2 * target_size) * n_mixtures
mixture_density_layer = layers.MixtureDensityLayer(
output_layer.h_outputs[None, :, :],
self.outputs[None, :, :],
target_size=output_size,
n_mixtures=n_mixtures
)
self.layers = [
preprocess_layer,
lstmforward_layer,
preoutput_layer,
output_layer,
]
l2_cost = 0
for layer in self.layers:
l2_cost += l2_penalty * layer.get_l2sum()
l2_cost += l2_penalty * regime_layer.get_l2sum(self.unique_regimes)
l2_cost += l2_penalty * doctype_layer.get_l2sum(self.unique_doc_types)
l2_cost += l2_penalty * word_vectors_layer.get_l2sum(self.unique_inputs)
self.loss_function = mixture_density_layer.nll_cost.mean()
updates = []
for layer in self.layers:
updates += layer.get_updates(self.loss_function + l2_cost)
updates += regime_layer.get_updates(self.loss_function + l2_cost, self.unique_regimes)
updates += doctype_layer.get_updates(self.loss_function + l2_cost, self.unique_doc_types)
updates += word_vectors_layer.get_updates(self.loss_function + l2_cost, self.unique_inputs)
self._cost_and_update = theano.function(
inputs=[
self.inputs_indexes,
self.outputs,
self.mask,
self.regimes,
self.doc_types,
self.unique_inputs,
self.unique_doc_types,
self.unique_regimes
],
outputs=self.loss_function,
updates=updates
)
self._cost= theano.function(
inputs=[self.inputs_indexes, self.outputs, self.mask, self.regimes, self.doc_types],
outputs=self.loss_function,
)
self._output = theano.function(
inputs=[self.inputs_indexes, self.mask, self.regimes, self.doc_types],
outputs=mixture_density_layer.outputs,
)
def get_cost_and_update(self, inputs, outputs, mask, regimes, doctypes):
u_inputs = np.unique(inputs).flatten()
u_regimes = np.unique(regimes).flatten()
u_docs = np.unique(doctypes).flatten()
return self._cost_and_update(inputs, outputs, mask, regimes, doctypes, u_inputs, u_docs, u_regimes)
def get_cost(self, inputs, outputs, mask, regimes, doctypes):
return self._cost(inputs, outputs, mask, regimes, doctypes)
def get_output(self, inputs, mask, regimes, doctypes):
return self._output(inputs, mask, regimes, doctypes)
| {
"repo_name": "allentran/fed-rates-bot",
"path": "fed_bot/model/lstm.py",
"copies": "1",
"size": "5799",
"license": "mit",
"hash": -5154686223776002000,
"line_mean": 32.1371428571,
"line_max": 107,
"alpha_frac": 0.5594067943,
"autogenerated": false,
"ratio": 3.677235256816741,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47366420511167406,
"avg_score": null,
"num_lines": null
} |
__author__ = 'allentran'
import numpy as np
from ..scraper import Scraper
from ..model import lstm, lstm_lasagne
def model_test():
n_sentences = 6
T = 21
n_batch = 7
vocab_size=55
word_vector_size=111
test_ob = dict(
word_vectors=np.random.randint(0, vocab_size, size=(T, n_sentences, n_batch)).astype('int32'),
rates=np.ones((n_batch, 3)).astype('float32'),
max_mask=np.ones((T, n_sentences, n_batch)).astype('float32'),
regimes=np.ones(n_batch).astype('int32'),
doc_types=np.ones(n_batch).astype('int32')
)
word_embeddings = np.random.normal(0, 1, size=(vocab_size, word_vector_size)).astype('float32')
assert word_embeddings[test_ob['word_vectors']].shape == (T, n_sentences, n_batch, word_vector_size)
model = lstm.FedLSTM(
vocab_size=vocab_size,
hidden_size=9,
lstm_size=12,
n_mixtures=2,
word_vectors=word_embeddings,
doctype_size=7,
regime_size=4,
input_size=word_vector_size
)
first_cost = model.get_cost_and_update(
test_ob['word_vectors'],
test_ob['rates'],
test_ob['max_mask'],
test_ob['regimes'],
test_ob['doc_types']
)
for _ in xrange(5):
last_cost = model.get_cost_and_update(
test_ob['word_vectors'],
test_ob['rates'],
test_ob['max_mask'],
test_ob['regimes'],
test_ob['doc_types']
)
assert first_cost > last_cost
def lasagne_test():
n_batch = 5
n_sentence = 4
n_words = 11
n_targets = 3
n_mixtures = 2
vocab_size = 20
word_size = 6
word_vectors = np.random.randn(vocab_size, word_size).astype('float32')
fedlstm_model = lstm_lasagne.FedLSTMLasagne(vocab_size, word_size, 50, 13, 10, target_size=n_targets, n_mixtures=n_mixtures, init_word_vectors=word_vectors)
targets = np.random.randn(n_batch, n_targets).astype('float32')
words = np.random.randint(0, 10, size=(n_batch, n_sentence, n_words)).astype('int32')
first_cost = fedlstm_model._train(
words,
10 * np.ones((n_batch, n_sentence)).astype('int32'),
3 * np.ones((n_batch)).astype('int32'),
np.ones(5).astype('int32'),
np.ones(5).astype('int32'),
targets
)
for _ in xrange(10):
last_cost = fedlstm_model._train(
words,
10 * np.ones((n_batch, n_sentence)).astype('int32'),
3 * np.ones((n_batch)).astype('int32'),
np.ones(5).astype('int32'),
np.ones(5).astype('int32'),
targets
)
assert first_cost > last_cost
def scraper_test():
scraper = Scraper()
assert len(scraper.get_docs(limit=1)) == 1
| {
"repo_name": "allentran/fed-rates-bot",
"path": "fed_bot/tests/tests.py",
"copies": "1",
"size": "2767",
"license": "mit",
"hash": 6238550930257943000,
"line_mean": 26.67,
"line_max": 160,
"alpha_frac": 0.5724611493,
"autogenerated": false,
"ratio": 3.176808266360505,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9184386118173788,
"avg_score": 0.01297665949734333,
"num_lines": 100
} |
__author__ = 'allentran'
import re
import urlparse
import os
import requests
from bs4 import BeautifulSoup
class Scraper(object):
def __init__(self, start_year=2008, end_year=2009):
self.date_regex = re.compile(r'(?P<year>\d{4})(?P<month>\d{2})(?P<day>\d{2})')
self.recent_url = 'http://www.federalreserve.gov/monetarypolicy/fomccalendars.htm'
self.historical_years = range(start_year, end_year + 1)
self.historical_url = 'http://www.federalreserve.gov/monetarypolicy/fomchistorical%s.htm'
def download_minutes(self, urls):
def download(session, url):
r = session.get(url)
soup = BeautifulSoup(r.content, 'lxml')
text = soup.find('div',{'id':'leftText'}).get_text()
match = self.date_regex.search(url)
year, month, day = match.group('year'), match.group('month'), match.group('day')
return dict(
date=[year, month, day],
text=text
)
session = requests.session()
return [download(session, url) for url in urls]
def get_urls(self, table_class='statement2', link_text='Statement'):
session = requests.session()
# get recent
r = session.get(self.recent_url)
soup = BeautifulSoup(r.content, 'lxml')
links = []
for row in soup.find_all('td', table_class):
row_links = row.find_all('a')
links += [urlparse.urljoin(self.recent_url, l['href']) for l in row_links]
# get historical
for year in self.historical_years:
r = session.get(self.historical_url % year)
soup = BeautifulSoup(r.content, 'lxml')
row_links = soup.find_all('a')
links += [urlparse.urljoin(self.recent_url, l['href']) for l in row_links if l.get_text().strip() == link_text]
links = [link for link in links if link.find('.pdf') == -1 and link.find('beigebook') == -1]
return links
def get_docs(self, minutes=False, limit=1000000000):
if not minutes:
statement_urls = self.get_urls()
return self.download_minutes(statement_urls[:limit])
minute_urls = self.get_urls(table_class='minutes', link_text='HTML')
return self.download_minutes(minute_urls[:limit])
if __name__ == "__main__":
scraper = Scraper()
statements = scraper.get_docs()
for statement in statements:
date = statement['date']
filename = '%s%s%s.txt' % (date[0], date[1], date[2])
with open(os.path.join('data/statements', filename), 'w') as f:
f.write(statement['text'].encode('utf-8')) | {
"repo_name": "allentran/fed-rates-bot",
"path": "fed_bot/scraper/frb.py",
"copies": "1",
"size": "2663",
"license": "mit",
"hash": 1992766341370717200,
"line_mean": 34.0526315789,
"line_max": 123,
"alpha_frac": 0.5884340969,
"autogenerated": false,
"ratio": 3.550666666666667,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4639100763566667,
"avg_score": null,
"num_lines": null
} |
__author__ = 'allentran'
import theano
from theano_layers import layers
import numpy as np
import theano.tensor as TT
class DeepDPGModel(object):
def __init__(self, n_minibatch, n_assets, n_actions, k_info, preprocessed_size, lstm_size, merge_size, dense_sizes):
self.policy_model = DeepPolicyNetwork(
n_minibatch,
n_assets,
n_actions,
k_info,
preprocessed_size,
lstm_size,
merge_size
)
self.q_model = DeepQNetwork(
n_minibatch,
n_assets,
n_actions,
k_info,
preprocessed_size,
lstm_size,
merge_size * 2,
dense_sizes
)
self.replay_cache = None
# need current weights and target weights
# update weights (current via SGD, target from post SGD)
# need to generate actions (buy/sell per asset, buys as fraction of cash, sell as fraction of asset) - maybe just sigmoid
# prices are T x minibatch x N (make sure these are scaled)
# info is T x minibatch x k
# time to trade is T x minibatch x N
# deterministic states are 1 x minibatch x s
# holdings x N + 1
class DeepPolicyNetwork(object):
def __init__(self, n_minibatch, n_assets, n_actions, k_info, preprocessed_size, lstm_size, merge_size, seed=1692):
np.random.seed(seed)
self.srng = TT.shared_randomstreams.RandomStreams(seed)
self.n_minibatch = n_minibatch
self.n_actions = n_actions
self.layers = []
# non deterministic states
self.prices = TT.tensor3()
self.info = TT.tensor3()
self.next_trade = TT.tensor3()
self.det_states = TT.matrix()
# deterministic states
states = TT.concatenate(
[
self.prices,
self.info,
self.next_trade
],
axis=2
)
nondet_prepreprocessor = layers.DenseLayer(
states,
n_assets * 2 + k_info,
preprocessed_size,
TT.nnet.relu,
normalize_axis=1
)
nondet_processor = layers.LSTMLayer(
nondet_prepreprocessor.h_outputs,
lstm_size,
preprocessed_size,
normalize_axis=1
)
det_processor = layers.DenseLayer(
self.det_states,
n_assets * 2 + 1,
preprocessed_size,
TT.nnet.relu,
normalize_axis=1
)
merger = layers.DenseLayer(
TT.concatenate(
[
nondet_processor.h_outputs[-1, :, :],
det_processor.h_outputs
],
axis=1
),
preprocessed_size * 2,
merge_size,
TT.nnet.relu,
normalize_axis=0
)
merger2 = layers.DenseLayer(
merger.h_outputs,
merge_size,
merge_size,
TT.nnet.relu,
normalize_axis=0
)
self.action_layer = layers.DenseLayer(
merger2.h_outputs,
merge_size,
n_actions,
TT.nnet.softmax,
normalize_axis=0
)
self.layers += [
nondet_prepreprocessor,
nondet_processor,
det_processor,
merger,
merger2,
self.action_layer
]
def update_target_weights(self):
pass
def get_noisy_action(self, std):
actions = self.action_layer.h_outputs
return actions + self.srng.normal(size=actions.shape, std=std)
# need current weights and target weights
# update weights (current via SGD, target from post SGD)
# need gradients wrt to theta AND actions
class DeepQNetwork(object):
def __init__(self, n_minibatch, n_assets, n_actions, k_info, preprocessed_size, lstm_size, merge_size, dense_sizes, seed=1692):
self.prices = TT.tensor3()
self.info = TT.tensor3()
self.next_trade = TT.tensor3()
self.actions = TT.matrix()
self.det_states = TT.matrix()
states = TT.concatenate(
[
self.prices,
self.info,
self.next_trade
],
axis=2
)
nondet_prepreprocessor = layers.DenseLayer(
states,
n_assets * 2 + k_info,
preprocessed_size,
TT.nnet.relu,
normalize_axis=1,
dupe_weights=True
)
nondet_processor = layers.LSTMLayer(
nondet_prepreprocessor.h_outputs,
lstm_size,
preprocessed_size,
normalize_axis=1,
dupe_weights=True
)
det_processor = layers.DenseLayer(
self.det_states,
n_assets * 2 + 1,
preprocessed_size,
TT.nnet.relu,
normalize_axis=1,
dupe_weights=True
)
state_merger = layers.DenseLayer(
TT.concatenate(
[
nondet_processor.h_outputs[-1, :, :],
det_processor.h_outputs
],
axis=1
),
preprocessed_size * 2,
merge_size,
TT.nnet.relu,
normalize_axis=0,
dupe_weights=True
)
state_and_actions = TT.concatenate(
[
state_merger.h_outputs,
self.actions
],
axis=1
)
dense_layer = layers.DenseLayer(
state_and_actions,
merge_size + n_actions,
dense_sizes[0],
activation=TT.nnet.relu,
normalize_axis=0,
dupe_weights=True
)
dense_layer2 = layers.DenseLayer(
dense_layer.h_outputs,
dense_sizes[0],
dense_sizes[1],
activation=TT.nnet.relu,
normalize_axis=0,
dupe_weights=True
)
output_layer = layers.DenseLayer(
dense_layer2.h_outputs,
dense_sizes[1],
1,
normalize_axis=0,
dupe_weights=True
)
self.q_sa = output_layer.h_outputs
self.layers = [
nondet_prepreprocessor,
nondet_processor,
det_processor,
state_merger,
dense_layer,
dense_layer2,
output_layer
]
| {
"repo_name": "allentran/rl-l2t",
"path": "model/models.py",
"copies": "1",
"size": "6536",
"license": "apache-2.0",
"hash": 166040880560209660,
"line_mean": 24.8339920949,
"line_max": 131,
"alpha_frac": 0.5035189718,
"autogenerated": false,
"ratio": 4.012277470841007,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5015796442641007,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Allison MacLeay'
import numpy as np
import pandas as pd
import sys
#import CS6140_A_MacLeay.utils as utils
from CS6140_A_MacLeay import utils
from numpy.linalg import det, pinv, inv
from sklearn.cross_validation import KFold
def compute_accuracy(tp, tn, fp, fn):
return float(tp+tn)/(tn+tp+fp+fn)
def compute_ACC(predicted, observed):
[tp, tn, fp, fn] = get_performance_stats(predicted, observed)
return compute_accuracy(tp, tn, fp, fn)
def compute_MSE_arrays(predicted, observed):
T = len(observed)
if T != len(predicted):
print 'WARNING: len(o) {} is not equal to len(p) {}'.format(T, len(predicted))
observed = list(observed)
sig = 0
if T == 0:
return 0
for i, p in enumerate(predicted):
sig += (p-observed[i])**2
return float(sig)/T
def compute_MSE(predicted, observed):
""" predicted is scalar and observed as array"""
if len(observed) == 0:
return 0
err = 0
for o in observed:
err += (predicted - o)**2/predicted
return err/len(observed)
def predict(df, model, binary=False, sigmoid=False, means=None):
#if 'b' not in df.columns:
# df['b'] = 1
#model = np.append(model,1)
df['b'] = 1
predictions = np.dot(df, model)
if means is not None and len(means) == len(predictions):
predictions = [predictions[i] + means[i] for i in range(len(predictions))]
if binary:
for p in range(len(predictions)):
if predictions[p] < .5:
predictions[p] = 0
else:
predictions[p] = 1
return predictions
def compute_combined_MSE(A, B):
""" """
if len(A) == 0:
return 0
muA = utils.average(A)
muB = utils.average(B)
if muA == 0:
muA += .000000001
if muB == 0:
muB += .000000001
total = 0
total += compute_MSE(muA, A)
total += compute_MSE(muB, B)
return total
def mse(df, col):
mu = utils.average(df[col])
sig = 0
for i in df[col]:
sig += (i-mu)**2
return float(sig)/len(df[col])
def calculate_chisq_error(pred, truth):
""" (E-O)^2/E """
i = 0
err = 0
for p in pred:
t = truth[i]
err += (t - p)**2/t
i += 1
return err/len(truth)
def calculate_binary_error(pred, truth):
total = len(pred)
positves_predicted = sum(pred)
true_positive = sum(np.logical_and(pred, truth))
true_negative = sum(np.logical_and(np.logical_not(pred), np.logical_not(truth)))
correct = true_negative + true_positive
error = float(total - correct)/total
print 'Total: %s' % total
print 'True Positive: %s' % true_positive
print 'True Negative: %s' % true_negative
print 'Positives Predicted: %s' % positves_predicted
print 'Correctly Predicted: %s' % correct
print 'Error: %s' % error
return error
def binary_info_gain(df, feature, y):
"""
:param df: input dataframe
:param feature: column to investigate
:param y: column to predict
:return: information gain from binary feature column
"""
return float(sum(np.logical_and(df[feature], df[y])))/len(df[feature])
def get_performance_stats(truth, predict):
#print 'len: ' + str(len(truth)) + ' : ' + str(len(predict))
tp = 0
tn = 0
fp = 0
fn = 0
for i in range(0, len(truth)):
if predict[i] == 1:
if truth[i] == 1:
tp += 1
else:
fp += 1
else:
if truth[i] == 0:
tn += 1
else:
fn += 1
return [tp, tn, fp, fn]
def binary_probability(df, y):
""" probability for features with 0 and 1 values """
if len(df[y]) is 0:
prob = 0
else:
prob = float(sum(df[y]))/len(df[y])
return prob
def binary_entropy(df, y):
""" entropy for features with 0 and 1 values """
return binary_probability(df, y) * len(df)
def least_squares(df, y=None):
""" Option to pass in array rather than dataframe and column y """
if type(df) is not list and y is not None:
df = list(df[y])
if len(df) == 0:
return 0
mu = float(sum(df))/len(df)
sigma = 0
for i in range(0, len(df)):
sigma += (df[i] - mu)**2
return sigma/2
def binary_error(df, y, predicted):
error = binary_probability(df, y)
if predicted is 1:
error = 1 - error
return error
def get_linreg_w(X, Y):
""" X: dataframe of x1, x2, x..., xn
Y: array of y
return: w as matrix """
print X
X['b'] = 1
Xt = X.transpose()
w_den = np.dot(Xt, X)
w_pre = np.dot(utils.matrix_inverse(w_den), Xt)
w = np.dot(w_pre, Y)
del X['b']
return w
def column_product(w, x):
if type(w) is np.float64:
w = list(w)
sum = 0
for i in range(len(w)):
sum += w[i] * x[i]
return sum
def dot_product_sanity(X, w):
"""X is a matrix, w is a vector"""
result_vector = np.zeros(len(X.keys())) # number of rows
row_i = 0
for row_k in X.keys(): # number of column
row = X[row_k]
result_vector[row_i] = column_product(row, w)
row_i += 1
return result_vector
def check_vector_equality(vec1, vec2):
is_equal = True
error_msg = []
count_unequal = 0
if len(vec1) != len(vec2):
is_equal = False
error_msg.append('rows are different sizes ({}, {})'.format(len(vec1), len(vec2)))
else:
for i in range(len(vec1)):
if vec1[i] != vec2[i]:
is_equal = False
count_unequal += 1
if is_equal:
print 'Looks good! Lengths are {}'.format(len(vec1))
else:
print '\n'.join(error_msg)
return is_equal
def get_linridge_w(X_uncentered, Y, learning_rate):
""" Linear ridge
X: dataframe of x1, x2, x..., xn
Y: array of y
return: w as matrix """
#TODO - add mean back in before predict
X = X_uncentered
X['b'] = 1
Xt = X.transpose()
I = np.identity(X.shape[1])
w_den = np.dot(Xt, X) + np.dot(learning_rate, I)
#w_den = np.cov(X) + np.dot(learning_rate, I)
w_pre = np.dot(utils.matrix_inverse(w_den), Xt)
w = np.dot(w_pre, Y)
return w
def linear_regression_points(X_old, Y):
#print Y
Y_fit = []
X = pd.DataFrame(X_old.copy())
X['b'] = np.ones(len(X))
w = get_linreg_w(X, Y)
print 'w is: '
print w
for i, col in enumerate(X.columns):
Y_fit.append(w[i] * X[col])
return Y_fit
def linear_ridge_points(X_old, Y, learning_rate=.05):
#print Y
Y_fit = []
X = pd.DataFrame(X_old.copy())
X['b'] = np.ones(len(X))
w = get_linridge_w(X, Y, learning_rate)
print 'w is: '
print w
for i, col in enumerate(X.columns):
Y_fit.append(w[i] * X[col])
return Y_fit
def k_folds(df, k):
""" k folds for hw1 prob 2"""
number = np.floor(len(df)/k)
print number
folds = []
#for i in range(0, k):
# folds.append(df.sample(number, replace=False))
kf = KFold(len(df), n_folds=k)
return kf
def get_error(predict, truth, is_binary):
truth = np.array(truth)
predict = np.array(predict)
if is_binary:
error = compute_ACC(predict, truth)
else:
error = compute_MSE_arrays(predict, truth)
return error
def log_likelihood(array):
p = 1
for i in range(0,len(array)):
p *= array[i]
return np.log(p)
def init_w(size):
df = pd.DataFrame(np.random.random(size))
return df.reindex()
def summary(array):
""" returns mean and variance"""
return [utils.average(array), utils.variance(array, len(d))]
""" Added for lin ridge """
def pandas_to_data(df):
array = []
for i in range(len(df)): # row
row = df.iloc[i]
row_array = []
for j in range(len(row)):
row_array.append(row[j])
array.append(row_array)
return array
def transpose_array(arr):
tarry = []
for i in range(len(arr)):
if i == 0:
for ix in range(len(arr[i])):
tarry.append([])
for j in range(len(arr[i])):
tarry[j].append(arr[i][j])
return tarry
def multivariate_normal(covar_matrix, x_less, alpha=1):
"""
:param d: number of rows in X
:param covar_matrix:
:param x_less: X - u , u is a vector of mu
:return:
"""
covar_matrix = np.array(covar_matrix)
x_less = utils.to_col_vec(np.asarray(x_less))
epsilon = float(alpha * 1) / len(covar_matrix)
set_diag_min(covar_matrix, epsilon)
d = len(x_less)
prob = float(1)/ ((2 * np.pi)**(float(d)/2))
determinant = det(covar_matrix)
if determinant == 0:
print 'Determinant matrix cannot be singular'
prob = prob * 1.0/(determinant**(float(1)/2))
inverse = pinv(covar_matrix)
dot = np.dot(np.dot(x_less.T, inverse), x_less)
prob = prob * np.exp(-float(1)/2 * dot)
#var = multivariate_normal(mean=mus, cov=determinant)
return prob[0][0]
def set_diag_min(matrix, epsilon):
for i in range(len(matrix)):
for j in range(len(matrix[i])):
if i==j and matrix[i][j] < epsilon:
matrix[i][j] = epsilon
| {
"repo_name": "alliemacleay/MachineLearning_CS6140",
"path": "utils/Stats.py",
"copies": "1",
"size": "9293",
"license": "mit",
"hash": 807885298913479300,
"line_mean": 24.6005509642,
"line_max": 90,
"alpha_frac": 0.5583772732,
"autogenerated": false,
"ratio": 3.126850605652759,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9153560764783475,
"avg_score": 0.00633342281385667,
"num_lines": 363
} |
__author__ = 'Allison MacLeay'
from copy import deepcopy
import numpy as np
import CS6140_A_MacLeay.utils.NNet_5 as N5
"""
Neural Network
t - target - vector[k]
z - output - vector[k]
y - hidden - vector[j]
x - input - vector[i]
wji - weight between x and y - matrix[i, j]
wji[ 11 12 13 14 1j
21 22 23 24 2j
i1 i2 i3 i4 ij ]
wkj - weight between y and z - matrix[j, k]
wkj[ 11 12 13 14 1k
21 22 23 24 2k
j1 j2 j3 j4 jk ]
wji_apx - wji approximation
wkj_apx - wkj approximation
outputs_apx - outputs approximation
theta - no idea - bias maybe? init to 1...
i = 8
j = 3
k = 8
wji[8, 3]
wkj[3, 8]
"""
class NeuralNet():
def __init__(self):
"""
:rtype : object
"""
self.wji, self.wkj = init_apx()
self.inputs, self.hiddens, self.outputs = init_nnet()
self.learning_rate = .0005
def get_wlayer_i(self, layer, i):
if layer == 0:
return self.get_wi(i)
else:
return self.get_wj(i)
def get_wlayer(self, layer):
if layer == 0:
return self.wji
else:
return self.wkj
def get_wj(self, i):
return self.wkj[i]
def get_wi(self, j):
arr = []
for k in self.wji.keys():
arr.append(self.wji[k][j])
return arr
def get_output(self, layer, i):
output = []
if layer == 0:
# hidden
output = self.hiddens[i]
else:
# final output
output = self.outputs[i]
return output
def xget_tuple(self, i, j):
"""
i(0-7) j(0-2) i(0-7)
"""
check_it(self.inputs, self.hiddens, self.outputs)
return self.inputs[i], self.hiddens[i], self.outputs[i]
def update_weights(self, E, O):
lwji = len(self.wji)
lwkj = len(self.wkj)
#for i in range(lwji):
# for k in range(lwkj):
# delta_ji = self.learning_rate * E[i] * O[k]
def get_tuple(self, i):
check_it(self.inputs, self.hiddens, self.outputs)
return self.inputs[i], self.hiddens[i], self.outputs[i]
def init_nnet():
# initialize inputs (x)
zeros = [0, 0, 0, 0, 0, 0, 0, 0]
inputs = {}
for i in range(0, 8):
in_array = zeros[:]
in_array[i] = 1
inputs[i] = in_array
# initialize outputs (z)
outputs = deepcopy(inputs)
# initialize hidden weights (y)
hiddens = {0: [.89, .04, .08],
1: [.15, .99, .99],
2: [.01, .97, .27],
3: [.99, .97, .71],
4: [.03, .05, .02],
5: [.01, .11, .88],
6: [.80, .01, .98],
7: [.60, .94, .01]}
check_it(inputs, hiddens, outputs)
return inputs, hiddens, outputs
def check_it(inputs, hiddens, outputs):
# check all this initialization
print 'checking inputs'
print inputs
print outputs
print hiddens
# make sure python isn't stupid and was copied by value and not reference
# except it is... so use slice notation [:] for array and dict.copy() for hash
print 'checking pbv'
inputs[4][0] = 999
print inputs
print outputs
inputs[4][0] = 0
def init_apx():
# Initialize all weights and biases in network
# wji - matrix[8, 3]
# wkj - matrix[3, 8]
wji_apx = {0: [], 1: [], 2: [], 3: [], 4: [], 5: [], 6: [], 7: []}
wkj_apx = {0: [], 1: [], 2: []}
range_start = 0
range_end = 1
for i in range(0, 8):
wji_apx[i] = random_array(range_start, range_end, 3)
for i in range(0, 3):
wkj_apx[i] = random_array(range_start, range_end, 8)
return wji_apx, wkj_apx
def random_array(start, end, size):
step = float(end - start) / size
arr = []
for i in range(0, size):
arr.append(start + step * i)
return arr
def init_theta(size):
# theta should be 0 to start #TODO verify this
arr = []
for i in range(0, size):
arr.append(0)
return arr
def run_all(inputs, hiddens, outputs, num): # num is just for testing. should iterate through entire set
# run NNet for num examples in training set
layers = 2 # I will iterate through layers using layer
nn = NeuralNet()
wji_apx, wkj_apx = init_apx()
theta = init_theta(layers)
sum_j = {}
#for i in range(0, num):
for i in [0]:
#input, hidden, output = get_tuple(inputs, hiddens, outputs, i)
#input = []
input, hidden, output = nn.get_tuple(i)
# This should happen for the entire set before this
#wji, wkj = init_apx()
# initialize error matrix
err = []
for i in range(layers):
err.append(0)
for layer in range(layers):
# propogate inputs forward
O = nn.get_output(layer, i)[:] # returns 3 for i=0 and 8 for i=1
T = O[:] # target
print 'length of O is {} hiddens[0] {}'.format(len(O), len(nn.hiddens[0]))
wlayer = nn.get_wlayer(layer) # this should return 8 weights for i=0
o_length = len(O)
for append_i in range(len(O), len(wlayer)):
O.append(0)
print 'wlayer is {} length wlayer is {}'.format(wlayer, len(wlayer))
for j in range(len(wlayer)):
wj = wlayer[j]
sumk = 0
for k in range(o_length):
print 'counter {}: {} += {} * {}'.format(k, sumk, O[k], wlayer[j][k])
sumk += O[k] * wlayer[j][k]
input[j] = sumk + theta[layer]
O[j] = float(1)/(1 + np.exp(-input[j]))
err[layer] = []
for j in range(o_length):
print 'J IS {}'.format(j)
err[layer].append(O[j] * (1-O[j]) * (T[j] - O[j]))
layer_ct = layer + 1
sum_layer = 0
while layers + 1 > layer_ct < len(err):
if err[layer_ct] > 0:
weights = nn.get_wlayer(layer_ct)
for w_ct in range(len(weights)):
sum_layer += err[layer_ct] * weights[w_ct][j]
layer_ct += 1
if sum_layer != 0:
err[layer] = O[j] * (1 - O[j]) * sum_layer
#print 'layer {} new Oj = {}'.format(layer, O[j])
print 'len O for layer=0 should be 8 layer {} len {}'.format(layer, len(O))
print err[layer]
print O
# Outside layer loop
nn.update_weights(err, O)
def xrun_all():
layers = 2 # I will iterate through layers using layer
nn = NeuralNet()
wji_apx, wkj_apx = init_apx()
theta = init_theta(layers)
I = []
O = []
I[0] = nn.hiddens[0][0]
# For each training tuple
for j in range(layers): # layer (0-1)
I = nn.get_inputs(layer)
#sum_i +=
def run():
#inputs, hiddens, outputs = init_nnet()
#run_all(inputs, hiddens, outputs, 1)
#N3.run()
#N2.run()
#N4.run_autoencoder()
N5.run()
| {
"repo_name": "alliemacleay/MachineLearning_CS6140",
"path": "utils/NNet.py",
"copies": "1",
"size": "7156",
"license": "mit",
"hash": -8088037296961689000,
"line_mean": 25.2124542125,
"line_max": 106,
"alpha_frac": 0.503353829,
"autogenerated": false,
"ratio": 3.2205220522052205,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.422387588120522,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Allison MacLeay'
from CS6140_A_MacLeay import utils
import numpy as np
import pandas as pd
import CS6140_A_MacLeay.utils.Stats as mystats
class mockStump(object):
def __init__(self, feature, threshold):
self.feature = [feature, -2, -2]
self.threshold = [threshold, -2, -2]
class Tree(object):
def __init__(self, max_depth=3):
self.presence_array = None
self.converged = False
self.model = []
self.training_info = []
self.head = None
self.leaves = []
self.max_depth = max_depth
self.weights = None
self.tree_ = None
def fit(self, X, y, sample_weight=None): # d is weights
self.presence_array = np.ones(len(X))
if sample_weight is None:
sample_weight = np.ones(len(X))
self.weights = sample_weight
self.possible_thresholds = self.get_initial_thresholds(X)
self.head = self.initialize_branch(X, y)
self.grow_tree(self.head, X, y, self.max_depth)
self.tree_ = mockStump(self.head.feature, self.head.threshold)
self.print_tree()
return self
def predict(self, X):
self.head.predict(X)
predict_array = np.ones(len(X))
for leaf in self.leaves:
predicted = 1 if leaf.probability > .5 else -1
for i in range(len(leaf.presence_array)):
if leaf.presence_array[i] == 1:
predict_array[i] = predicted
return predict_array
def initialize_branch(self, X, y):
raise NotImplementedError
def get_initial_thresholds(self, data):
by_col = utils.transpose_array(data)
thresholds = []
start = 100
for j in range(len(by_col)):
col_thresholds = []
feature_j = [float(i) for i in np.array(by_col[j])]
values = list(set(feature_j))
values.sort()
col_thresholds.append(values[0] - .01)
for i in range(1, len(values)):
mid = (values[i] - values[i-1])/2
col_thresholds.append(values[i-1] + mid)
col_thresholds.append(values[-1] + .01)
thresholds.append(col_thresholds)
return thresholds
def grow_tree(self, this_branch, X, y, max_depth):
if self.weights is None:
self.weights = np.ones(len(X))
this_branch.split_branch(X, y)
stump = DecisionStump(this_branch.feature, this_branch.threshold)
self.model.append(stump)
if max_depth <= 1:
this_branch.converged = True
left = this_branch.left_child
right = this_branch.right_child
if not this_branch.converged:
max_depth -= 1
self.grow_tree(left, X, y, max_depth)
self.grow_tree(right, X, y, max_depth)
else:
#if left is not None:
# left.make_leaf(X, y)
# self.leaves.append(left)
#if right is not None:
# right.make_leaf(X, y)
# self.leaves.append(right)
this_branch.make_leaf(X, y)
def print_tree(self):
self.head.print_branch(True)
class Branch(object):
def __init__(self, X, y, presence_array, level=1, parent_branch=None, theta=0.01, weights=None):
if weights is None:
weights = np.ones(len(X))
self.presence_array = presence_array
self.theta = theta
self.data_subset, self.truth_subset, self.weights_subset = self.get_subset(X, y, weights)
self.feature = None
self.threshold = None
self.unique = 100000
self.parent = parent_branch
self.left_child = None
self.right_child = None
self.converged = False
self.info_gain = 0
self.level = level
self.leaf = False
self.probability = None
self.entropy = binary_entropy(self.truth_subset)
def get_stump(self):
#self.feature
return DecisionStump(self.feature, self.threshold)
def predict(self, X):
self.set_arrays(X)
def set_arrays(self, X):
left_X, right_X = self.split_presence_array(X, self.feature, self.threshold)
if len(left_X) > 0 and self.left_child is not None and not self.left_child.leaf:
self.left_child.presence_array = left_X
self.left_child.set_arrays(X)
if len(right_X) > 0 and self.right_child is not None and not self.right_child.leaf:
self.right_child.presence_array = right_X
self.right_child.set_arrays(X)
def predict_split(self, X):
presence_A, presence_B = self.split_presence_array(X, self.feature, self.threshold)
if not self.converged:
self.left_child = Branch(X, y, presence_A, self.level+1, parent_branch=self)
self.right_child = Branch(X, y, presence_B, self.level+1, parent_branch=self)
def print_branch(self, recursive=True):
text = ''
if self.converged:
text = 'Last split: '
if self.leaf:
text = 'LEAF P {} N {} '.format(self.probability, sum(self.presence_array))
text += 'Level {}: feature: {} threshold: {} entropy: {} info gain: {}'.format(self.level, self.feature, self.threshold, self.entropy, self.info_gain)
print text
if self.right_child is not None:
self.right_child.print_branch(recursive)
if self.left_child is not None:
self.left_child.print_branch(recursive)
def make_leaf(self, X, y):
self.leaf = True
_, truth, _ = self.get_subset(X, y, None)
self.probability = float(truth.count(1))/len(truth)
#self.probability = float(y.count(1))/len(y)
def update_leaf_status(self):
if len(self.data_subset) > 0 and len(set(self.data_subset[0])) < 2:
self.converged = True
if self.info_gain < self.theta:
self.converged = True
def split_branch(self, X, y):
self.converged = False
self.feature, self.info_gain, self.threshold = self.choose_best_feature()
self.update_leaf_status()
presence_A, presence_B = self.split_presence_array(X, self.feature, self.threshold)
self.update_leaf_status()
if not self.converged:
self.left_child = self.add_branch(X, y, presence_A)
self.right_child = self.add_branch(X, y, presence_B)
def add_branch(self, X, y, presence_array):
raise NotImplementedError
def split_presence_array(self, X, column, threshold):
array_l = []
array_r = []
by_col = utils.transpose_array(X)
data = by_col[column]
for i in range(len(data)):
if self.presence_array[i] == 1:
if data[i] > threshold:
array_l.append(0)
array_r.append(1)
else:
array_l.append(1)
array_r.append(0)
else:
array_l.append(0)
array_r.append(0)
return array_l, array_r
def choose_best_feature(self):
by_col = utils.transpose_array(self.data_subset)
max_info_gain = -1
min_weighted_error = 1.5
best_col = None
col_threshold = None
for j in range(len(by_col)):
info_gain, threshold, weighted_error = self.compute_info_gain(by_col[j], self.truth_subset)
#TODO - fix objective function so it is organized
#if info_gain > max_info_gain:
# best_ig_col = j
# max_info_gain = info_gain
# col_threshold = threshold
if weighted_error < min_weighted_error:
best_col = j
min_weighted_error = weighted_error
max_info_gain = info_gain
col_threshold = threshold
if best_col is None:
print "BEST COL is NONE"
self.print_branch(False)
return best_col, max_info_gain, col_threshold
def compute_info_gain(self, column, y):
theta = self.choose_theta(column, y)
#theta = optimalSplit(column)
entropy_after = get_split_info_gain(column, y, theta)
weighted_error = get_split_error(column, y, theta, self.weights_subset)
if self.entropy != binary_entropy(y):
print 'FALSE'
info_gain = self.entropy - entropy_after
#print 'Information Gain: %s' % info_gain
return info_gain, theta, weighted_error
def get_distance_from_mean(self, column, y):
theta = self.choose_theta(column, y)
d_from_m = get_split_error(column, y, theata, self.weights_subset)
def choose_theta(self, column, truth):
raise NotImplementedError
def get_subset(self, data, y, d=None):
subset = []
truth = []
weights=[] if d is not None else None
for i in range(len(data)):
if self.presence_array[i] == 1:
subset.append(data[i])
truth.append(y[i])
if weights is not None:
weights.append(d[i])
return subset, truth, weights
def binary_entropy(truth):
""" H(q) = SUMclases(P(class==1) * log2P(class==1))"""
if len(truth) == 0:
return 0
prob = float(truth.count(1))/len(truth)
return calc_entropy(prob)
def calc_entropy(prob):
if prob == 0 or prob == 1:
return 0
return -np.log2(prob) * prob - (np.log2(1-prob) * (1-prob))
def get_split_info_gain(column, y, theta):
subB = []
truthB = []
subC = []
truthC = []
for i in range(len(column)): # bestSplit(column)
if column[i] < theta:
subB.append(column[i])
truthB.append(y[i])
else:
subC.append(column[i])
truthC.append(y[i])
return (float(len(truthB))/len(y) * binary_entropy(truthB)) + (float(len(truthC))/len(y) * binary_entropy(truthC))
def get_split_error(column, y, theta, d):
weights = np.ones(len(column))
sumd = sum(d)
for i in range(len(d)):
weights[i] = float(d[i])/sumd
truthB = []
truthC = []
dB = []
dC = []
error = 0
for i in range(len(column)): # bestSplit(column)
if column[i] < theta:
truthB.append(y[i])
dB.append(weights[i])
else:
truthC.append(y[i])
dC.append(weights[i])
prob_B = float(truthB.count(1))/len(truthB) if len(truthB) > 0 else 0
prob_C = float(truthC.count(1))/len(truthC) if len(truthC) > 0 else 0
pred_B = 1 if prob_B >= .5 else -1
pred_C = 1 if prob_C >= .5 else -1
for j in range(len(truthB)):
if truthB[j] != pred_B:
error += dB[j]
for j in range(len(truthC)):
if truthC[j] != pred_C:
error += dC[j]
return error
def randomSplit(array):
data = np.array(array)
min = data.min()
max = data.max()
dat_range= max - min
interv = dat_range/len(data)
return np.random.random() * interv + min
class TreeOptimal(Tree):
def __init__(self, max_depth=3):
super(TreeOptimal, self).__init__(max_depth)
self.possible_thresholds = None
def initialize_branch(self, X, y):
return BranchOptimal(X, y, self.presence_array, weights=self.weights)
class BranchOptimal(Branch):
def choose_theta(self, column, truth):
feature_y = zip(column, truth)
feature_y.sort()
last = feature_y[0][0]
num_a = 1
num_b = len(column) - 1
num_ones_a = 1 if feature_y[0][1] == 1 else 0
num_ones_b = truth.count(1) - num_ones_a
best_val = 3 #random initialization > 2
best_i = None
for i in range(1, len(column) - 1):
if feature_y[i][1] == 1:
num_ones_a += 1
num_ones_b -= 1
num_a += 1
num_b -= 1
if feature_y[i][0] == last:
continue
last = feature_y[i][0]
perc_a = float(num_a)/len(column)
perc_b = float(num_b)/len(column)
prob_a = float(num_ones_a)/num_a
prob_b = float(num_ones_b)/num_b
new_val = perc_a * calc_entropy(prob_a) + perc_b * calc_entropy(prob_b)
if new_val < best_val:
best_val = new_val
best_i = i
if best_i is None:
best_i = 0 # only 1 unique element
mid = (feature_y[best_i + 1][0] - feature_y[best_i][0]) * .5
return feature_y[best_i][0] + mid
def add_branch(self, X, y, presence_A):
return BranchOptimal(X, y, presence_A, self.level+1, parent_branch=self)
class TreeRandom(Tree):
def __init__(self, max_depth=3):
super(TreeRandom, self).__init__(max_depth)
self.possible_thresholds = None
def initialize_branch(self, X, y):
return BranchRandom(X, y, self.presence_array, weights=self.weights)
class BranchRandom(Branch):
def choose_theta(self, column, truth):
return randomSplit(column)
def add_branch(self, X, y, presence_A):
return BranchRandom(X, y, presence_A, self.level+1, parent_branch=self)
class DecisionStump(object):
def __init__(self, feature, threshold):
self.feature = feature
self.threshold = float(threshold)
def split_truth_from_data(data, replace_zeros=True):
""" Assumes that the truth column is the last column """
truth_rows = utils.transpose_array(data)[-1] # truth is by row
data_rows = utils.transpose_array(utils.transpose_array(data)[:-1]) # data is by column
if replace_zeros:
for i in range(len(truth_rows)):
if truth_rows[i] == 0:
truth_rows[i] = -1
return truth_rows, data_rows
def partition_folds(data, percentage):
num = int(len(data) * percentage)
array = [[], []]
for set in [0, 1]:
idx_arr = np.arange(len(data))
np.random.shuffle(idx_arr)
for i in xrange(int(len(data)*percentage)):
array[set].append(data[idx_arr[i]])
return array
def partition_folds_q4(data, percentage):
num = int(len(data) * percentage)
idx_arr = np.arange(len(data))
np.random.shuffle(idx_arr)
array = [[], [], []]
data_size = int(len(data)*percentage) * 2
for i in xrange(data_size):
array[i%2].append(data[idx_arr[i]])
for i in xrange(data_size, len(data)):
array[2].append(data[idx_arr[i]])
return array
def compute_mse(yhat, y):
sumy = sum([(yh-yo)**2 for yh, yo in zip(yhat, y)])
return float(sumy)/len(y)
| {
"repo_name": "alliemacleay/MachineLearning_CS6140",
"path": "Homeworks/HW4/__init__.py",
"copies": "1",
"size": "14623",
"license": "mit",
"hash": -8056152829399094000,
"line_mean": 32.4622425629,
"line_max": 159,
"alpha_frac": 0.5651371128,
"autogenerated": false,
"ratio": 3.4189852700490997,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9459589254757448,
"avg_score": 0.004906625618330468,
"num_lines": 437
} |
__author__ = 'Allison MacLeay'
from sklearn.tree import DecisionTreeClassifier
import CS6140_A_MacLeay.Homeworks.HW4.data_load as dl
import numpy as np
class Bagging(object):
def __init__(self, max_rounds=10, sample_size=10, learner=DecisionTreeClassifier):
self.max_rounds = max_rounds
self.sample_size = sample_size
self.learner = learner
self.predictions = []
self.hypotheses = []
self.train_error = 0
def fit(self, X, y):
for round in xrange(self.max_rounds):
sub_X, sub_y = dl.random_sample(X, y, size=self.sample_size)
hypothesis = self.learner().fit(sub_X, sub_y)
pred_y = hypothesis.predict(sub_X)
error = float(sum([0 if py == ty else 1 for py, ty in zip(pred_y, sub_y)]))/len(sub_y)
print 'Round error: {}'.format(error)
self.predictions.append(pred_y)
self.hypotheses.append(hypothesis)
pred_bagged = self.predict(sub_X)
train_error = float(sum([0 if py == ty else 1 for py, ty in zip(pred_bagged, sub_y)]))/len(sub_y)
print 'Bagged Train Error: {}'.format(train_error)
def predict(self, X):
h_pred = []
for h in self.hypotheses:
h_pred.append(h.predict_proba(X))
return self.get_bagged_prediction(h_pred)
def get_bagged_prediction(self, hpred):
size = len(hpred[0])
p_sum = np.zeros(size)
for pred in hpred:
pred = self.unzip_prob(pred)
for p in range(len(pred)):
p_sum[p] += pred[p]
return [1 if float(p)/len(hpred) >= .5 else -1 for p in p_sum]
def _check_y(self, y):
if {1, 0}.issubset(set(y)):
return y
elif {-1, 1}.issubset(set(y)):
return [1 if yi == 1 else 0 for yi in y]
else:
raise ValueError("Bad labels. Expected either 0/1 or -1/1, but got: {}".format(sorted(set(y))))
def unzip_prob(self, y):
# probability of being in class 1
return list(zip(*y)[1])
| {
"repo_name": "alliemacleay/MachineLearning_CS6140",
"path": "Homeworks/HW4/bagging.py",
"copies": "1",
"size": "2071",
"license": "mit",
"hash": -2617123278695067000,
"line_mean": 34.7068965517,
"line_max": 109,
"alpha_frac": 0.5697730565,
"autogenerated": false,
"ratio": 3.308306709265176,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9281708520005263,
"avg_score": 0.019274249151982698,
"num_lines": 58
} |
__author__ = 'Allison MacLeay'
from sklearn.tree import DecisionTreeRegressor
import CS6140_A_MacLeay.Homeworks.HW4 as hw4
import numpy as np
class GradientBoostRegressor(object):
def __init__(self, n_estimators=10, learning_rate=0.1, max_depth=1, learner=DecisionTreeRegressor):
self.train_score = 0
self.max_rounds = n_estimators
self.learner = learner
self.learning_rate = learning_rate #TODO - unused variable
self.max_depth = max_depth
self.hypotheses = []
self.mean = None
self.training_error = []
self.local_error = []
def fit(self, X, y):
X = np.asarray(X)
y = np.asarray(y)
self.mean = np.mean(y)
#y = np.asarray([self.mean]*len(y))
#hypothesis = self.learner().fit(X, y)
#self.hypotheses.append(hypothesis)
for round in xrange(self.max_rounds):
residual = [(yn - yl) for yn, yl in zip(y, self.predict(X))]
hypothesis = self.learner().fit(X, residual)
self.hypotheses.append(hypothesis)
self.local_error.append(hw4.compute_mse(residual, hypothesis.predict(X)))
pred_round = self.predict(X)
self.train_score = hw4.compute_mse(pred_round, y)
self.training_error.append(self.train_score)
def predict(self, X):
X = np.asarray(X)
#predictions = np.array([self.mean] * X.shape[0])
predictions = np.zeros(len(X))
for h in self.hypotheses:
predictions += h.predict(X)
return predictions
def print_stats(self):
for r in range(len(self.training_error)):
print 'Round {}: training error: {}'.format(r, self.local_error[r], self.training_error[r])
def decision_function(self):
pass
def loss(self, y, yhat, weights):
return sum([(yh - yt)**2 for yh, yt, w in zip(yhat, y, weights)]) * .5
| {
"repo_name": "alliemacleay/MachineLearning_CS6140",
"path": "utils/GradientBoost.py",
"copies": "1",
"size": "1921",
"license": "mit",
"hash": 5105505519243310000,
"line_mean": 32.701754386,
"line_max": 103,
"alpha_frac": 0.5991671005,
"autogenerated": false,
"ratio": 3.4120781527531086,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45112452532531083,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Allison MacLeay'
import CS6140_A_MacLeay.Homeworks.HW3 as hw3
import CS6140_A_MacLeay.utils as utils
import numpy as np
from copy import deepcopy
class NaiveBayes():
def __init__(self, model_type, alpha=1, ignore_cols = []):
self.model_type = model_type
self.train_acc = 0
self.test_acc = 0
self.model = []
self.alpha = alpha # smoothing parameter
self.data_length = 0
self.cutoffs = None
self.y_prob = None
self.ignore_cols = ignore_cols
def train(self, data_rows, truth, ignore_cols=[]):
self.data_length = len(data_rows)
if self.model_type == 0:
self.model = self.model_average_train(data_rows, truth)
if self.model_type == 1:
self.model = self.model_gaussian_rand_var_train(data_rows, truth)
if self.model_type == 2:
self.model = self.model_bin_train(data_rows, truth, 4)
if self.model_type == 3:
self.model = self.model_bin_train(data_rows, truth, 9)
def predict(self, data_rows, theta=.5):
prediction = []
if self.model_type == 0:
prediction = self.model_average_predict(data_rows, theta=theta)
if self.model_type == 1:
prediction = self.model_gaussian_rand_var_predict(data_rows, theta=theta)
if self.model_type > 1:
prediction = self.model_bin_predict(data_rows, theta=theta)
return prediction
def model_average_train(self, data_row, truth):
""" return [prob_over_given_1, prob_over_given_0, prob_y1]
prob_over_give_x = col1[mu, var, proabality], colx[mu, var, prob] ...
"""
mus = hw3.get_mus(data_row)
is_not_spam = hw3.get_sub_at_value(data_row, truth, 0)
is_spam = hw3.get_sub_at_value(data_row, truth, 1)
prob_over = get_prob_over(data_row, mus)
prob_over_given_1 = get_prob_over(is_spam, mus)
prob_over_given_0 = get_prob_over(is_not_spam, mus)
l0 = len(prob_over_given_0)
l1 = len(prob_over_given_1)
if l1 != l0:
addx = abs(l1-l0)
fake_row = [0 for _ in range(addx)]
if l1 > l0:
prob_over_given_0 = fake_row
else:
prob_over_given_1 = fake_row
prob_y1 = float(sum(truth))/len(truth)
self.y_prob = prob_y1
return [prob_over_given_1, prob_over_given_0, prob_over, prob_y1]
def model_bin_train(self, data_row, truth, num_bins=2):
#TODO add epsilon
model = {}
cutoffsc = [[] for _ in range(len(data_row[0]))]
dmat = np.matrix(data_row)
drange = dmat.max() - dmat.min()
bin_size = float(drange) / num_bins
data_col = hw3.transpose_array(data_row)
for j in range(len(data_col)):
#cutoffsc.append([min(data_col)[0] + bin_size * i for i in range(num_bins)])
mu = np.asarray(data_col[j]).mean()
low_mu = np.asarray([data_col[j][i] for i in range(len(data_col[j])) if data_col[j][i] < mu]).mean()
high_mu = np.asarray([data_col[j][i] for i in range(len(data_col[j])) if data_col[j][i] > mu]).mean()
if num_bins == 4:
cutoffsc[j] = [min(data_col)[0], low_mu, mu, high_mu]
else:
cutoffsc[j] = [min(data_col)[0], (low_mu - min(data_col)[0])/2, mu, (high_mu-mu)/2, high_mu, (max(data_col)[0]-high_mu)/2]
cutoffs = [dmat.min() + bin_size * i for i in range(num_bins)]
#epsilon = float(alpha * 1) / len(covar_matrix)
for label in [0,1]:
# transpose to go by column
sub_data = hw3.transpose_array(hw3.get_sub_at_value(data_row, truth, label))
model[label] = hw3.bins_per_column(sub_data, cutoffs)
model[label] = hw3.bins_per_column_by_col(sub_data, cutoffsc)
# probability of bin given label
self.y_prob = float(sum(truth))/len(truth)
self.cutoffs = cutoffsc
return model
def model_bin_predict(self, data_row, alpha=2.00001, theta=.5):
"""
probality[0] = [xlabel_0_prob, xlabel_1_prob, ..., xlabel_n_prob]
probability of y == 0 given xlabel
probality[1] = [xlabel_0_prob, xlabel_1_prob, ..., xlabel_n_prob]
probability of y == 1 given xlabel
"""
probability = [[] for _ in [0, 1]] # hold probability per row
for r in range(len(data_row)):
prob = [1 for _ in [0, 1]] #[1 for _ in range(len(self.cutoffs))]
row = data_row[r]
for c in range(len(row)):
xbin = hw3.classify_x(row[c], self.cutoffs[c])
for label in [0, 1]:
# model[0] = [col1: prob_bin1, prob_bin2 ...], [col2:...]
#for modbin in self.model[label]
prob[label] = prob[label] * (self.model[label][c][xbin] + float(alpha) / len(data_row))
for label in [0, 1]:
prob_y = self.y_prob if label == 1 else 1 - self.y_prob
probability[label].append(prob[label] * prob_y)
return self.nb_predict(probability, theta=theta)
def model_gaussian_rand_var_train(self, data, truth):
mus = {}
std_dev = {}
for label in [0,1]:
sub_data = hw3.get_sub_at_value(data, truth, label)
mus[label] = hw3.get_mus(sub_data)
std_dev[label] = hw3.get_std_dev(sub_data)
self.y_prob = float(sum(truth))/len(truth)
return [mus, std_dev, float(sum(truth))/len(truth)]
def model_gaussian_rand_var_predict(self, data, theta=.5):
""" model = [[mus_by_col], [std_dev_by_col], prob_y]"""
std_devs = self.model[1]
mus = self.model[0]
y_prob = self.model[2]
probabilities = {}
for label in [0, 1]:
if len(std_devs[label]) == 0:
#print self.model
#print 'Standard Deviations is empty!!!'
probabilities[label] = [0] * len(data)
continue
prob_of_y = y_prob if label==1 else (1-y_prob)
probabilities[label] = hw3.univariate_normal(data, std_devs[label], mus[label], prob_of_y, .15, ignore_cols=self.ignore_cols)
return self.nb_predict(probabilities, theta)
def nb_predict(self, probabilities, theta=.5):
"""
probality[0] = [xlabel_0_prob, xlabel_1_prob, ..., xlabel_n_prob]
probability of y == 0 given xlabel
probality[1] = [xlabel_0_prob, xlabel_1_prob, ..., xlabel_n_prob]
probability of y == 1 given xlabel
"""
predict = []
for r in range(len(probabilities[0])):
#max_label = None
#for label in [0, 1]:
# if max_label == None:
# max_label = [probabilities[label][r], label]
# if probabilities[label][r] > max_label[0]:
# max_label = [probabilities[label][r], label]
#predict.append(max_label[1])
prob_norm = float(probabilities[1][r])/(probabilities[0][r] + probabilities[1][r])
if theta == 0:
theta -=.1
if prob_norm > theta:
predict.append(1)
else:
predict.append(0)
return predict
def model_average_predict(self, data_row, theta=.5):
""" For each row calculate the probability
that y is 1 and the probability that y is 0
P(Y|X) = ( P(X|Y) * P(Y) ) / ( P(X) )
P(X) = prob_over (probability that x is above average for column)
P(X|Y) = prob_over_given_c (probability that x is above average when y = c for column)
P(Y) = prob_y ( probability of y )
"""
mus = hw3.get_mus(data_row)
data_cols = hw3.transpose_array(data_row)
prob_over_given_1 = self.model[0]
prob_over_given_0 = self.model[1]
prob_over = self.model[2]
prob_y1 = self.model[3]
predict = []
for r in range(len(data_row)):
row = data_row[r]
prob_1 = 1
prob_0 = 1
for c in range(len(row)):
mu = mus[c]
if row[c] > mu:
prob_x1 = prob_over_given_1[c]
prob_x0 = prob_over_given_0[c]
prob_xover = prob_over[c]
else:
prob_x1 = 1 - prob_over_given_1[c]
prob_x0 = 1 - prob_over_given_0[c]
prob_xover = 1 - prob_over[c]
prob_1 = prob_1 * prob_x1 #* prob_y1 #/ prob_xover #P(X|Y) * P(Y)
prob_0 = prob_0 * prob_x0 #* (1-prob_y1) #/ prob_xover
#prob_1 = prob_1 + np.log(prob_x1) + np.log(prob_y1)
#prob_0 = prob_0 + np.log(prob_x0) + np.log(1-prob_y1)
prob_1 = prob_1 * prob_y1
prob_0 = prob_0 * (1 - prob_y1)
prob_norm = float(prob_1)/(prob_0 + prob_1)
if prob_norm > theta:
predict.append(1)
else:
predict.append(0)
return predict
def aggregate_model(self, models):
""" Average of all:
[prob_over_given_1, prob_over_given_0, prob_over, prob_y1]
"""
if self.model_type > 0:
#TODO - this is Baaaaad
self.aggregate_model2(models)
return
init = [0 for _ in models[0].model[0]]
mult_fields = 3 if self.model_type == 0 else 2
agg_model = []
for i in range(mult_fields):
agg_model.append(init[:])
agg_model.append(0)
total_models = len(models)
for m in range(len(models)):
model = models[m].model
for i in range(mult_fields):
probs = model[i][:]
for c in range(len(probs)): # columns
agg_model[i][c] += probs[c]
agg_model[3] += model[3]
for i in range(3):
for c in range(len(probs)): # columns
agg_model[i][c] = float(agg_model[i][c])/total_models
agg_model[3] = float(agg_model[3])/total_models
self.model = agg_model
def aggregate_model2(self, models):
""" Average of all:
[prob_of_y_given_x_and_1, prob_y_given_x_and_0, prob_y1]
"""
print "AGG MOD2"
# initiate models as {0: [0,0,0...len(cols)], 1: [0, 0 ,0, ..len(cols)]
init = {i:[0 for _ in models[0].model[0][0]] for i in [0,1]}
mult_fields = 3 if self.model_type == 0 else 2
agg_model = []
for i in range(mult_fields):
agg_model.append(init)
agg_model.append(0)
total_models = len(models)
for m in range(len(models)):
model = models[m].model
for i in range(mult_fields):
probs = model[i]
for label in range(len(probs)):
for c in range(len(probs[label])):
agg_model[i][label][c] += probs[label][c]
agg_model[mult_fields] += model[mult_fields]
for i in range(mult_fields):
for c in range(len(probs[0])): # columns
for label in [0, 1]:
agg_model[i][label][c] = float(agg_model[i][label][c])/total_models
agg_model[mult_fields] = float(agg_model[mult_fields])/total_models
self.model = agg_model
def aggregate_model3(self, models):
""" Average of all:
[prob_of_y_given_x_and_1, prob_y_given_x_and_0, prob_y1]
"""
print "AGG MOD3"
self.cutoffs = models[0].cutoffs
print self.cutoffs
# initiate models as {0: [0,0,0...len(cols)], 1: [0, 0 ,0, ..len(cols)]
#num_bins = len(self.cutoffs)
num_bins = len(self.cutoffs[0])
print num_bins
zeros = np.zeros(num_bins)
agg_model = {i:[deepcopy(np.asarray(zeros[:])) for _ in models[0].model[0]] for i in [0, 1]}
total_models = len(models)
y_prob_sum = 0
for m in range(len(models)):
model = models[m].model
for label in [0, 1]:
probs = model[label][:]
for c in range(len(probs)):
for xbin_i in range(num_bins):
agg_model[label][c][xbin_i] += probs[c][xbin_i]
y_prob_sum += models[m].y_prob
for label in [0, 1]:
for c in range(len(probs[0])): # columns
for xbin_i in range(num_bins): # number of bins
agg_model[label][c][xbin_i] = float(agg_model[label][c][xbin_i])/total_models
self.y_prob = float(y_prob_sum)/total_models
self.model = agg_model
def get_prob_over(data_by_row, mus):
"""
Return array of arrays
column[i] = [probability_above]
"""
probability_above_mu = []
size = len(data_by_row)
by_col = hw3.transpose_array(data_by_row)
for col in range(len(by_col)):
total_over = 0
column = by_col[col]
mu_col = mus[col]
var_col = utils.variance(by_col[col], size)
for row in range(len(column)):
if column[row] > mu_col:
total_over += 1
probability_above_mu.append(float(total_over)/size)
return probability_above_mu
def calc_bayes(prob_x_given_y, mux, varx, prob_y, prob_x):
return 0
def add_smoothing(array, alpha=0.0, length=1):
p1 = array[0]
p0 = array[1]
px = array[2]
py = array[3]
for p in [p1, p0, px]:
for i in range(len(p)):
p[i] = float(p[i] + alpha)/(length + alpha * len(p))
return [p1, p0, px, py]
| {
"repo_name": "alliemacleay/MachineLearning_CS6140",
"path": "utils/NaiveBayes.py",
"copies": "1",
"size": "13733",
"license": "mit",
"hash": 6571886659857699000,
"line_mean": 38.8057971014,
"line_max": 138,
"alpha_frac": 0.5212262434,
"autogenerated": false,
"ratio": 3.230533992001882,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42517602354018813,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Allison MacLeay'
import CS6140_A_MacLeay.utils as utils
import numpy as np
import os
uci_folder = 'data/UCI'
def data_q2():
pass
def data_q3_crx():
path = os.path.join(uci_folder, 'crx')
data = read_file(os.path.join(path, 'crx.data'))
data = clean_data(data)
data = normalize_data(data)
return data
def data_q3_vote():
path = os.path.join(uci_folder, 'vote')
data = read_file(os.path.join(path, 'vote.data'))
data = clean_data(data)
data = normalize_data(data)
return data
def metadata_q4():
folder = 'data/8newsgroup'
path = os.path.join(folder, 'train.trec')
data = read_file(os.path.join(path, 'feature_settings.txt'), ',')
data = get_feature_settings(data)
return data
def metadata_q4_labels():
folder = 'data/8newsgroup'
path = os.path.join(folder, 'train.trec')
data = read_file(os.path.join(path, 'data_settings.txt'), ',')
data = get_label_settings(data)
return data
def get_label_settings(data):
dsettings = {}
for i in range(len(data)):
for j in range(len(data[i])):
srow = data[i][j].split('=')
if len(srow) != 2:
print srow
else:
ftype = srow[0]
fval = srow[1]
if ftype == 'intId':
idx = int(fval)
elif ftype == 'extLabel':
name = fval
if name not in dsettings.keys():
dsettings[name] = [idx]
else:
dsettings[name].append(idx)
return dsettings
def get_feature_settings(data):
fsettings = {}
for i in range(len(data)):
for j in range(len(data[i])):
srow = data[i][j].split('=')
if len(srow) != 2:
print srow
else:
ftype = srow[0]
fval = srow[1]
if ftype == 'featureIndex':
idx = int(fval)
elif ftype == 'featureName':
name = fval
if name not in fsettings.keys():
fsettings[name] = [idx]
else:
fsettings[name].append(idx)
return fsettings
def data_q4():
folder = 'data/8newsgroup'
path = os.path.join(folder, 'train.trec')
data = read_file(os.path.join(path, 'feature_matrix.txt'), ' ')
data, feature = feature_map(data)
#data = clean_data(data)
#data = normalize_data(data)
return data, feature
def get_data_with_ft(data, fmap, feature_list):
idx = fmap[feature_list[0]]
sub = []
for i in range(1, len(feature_list)):
ft = feature_list[i]
#for id in idx:
#if ft not in fmap[m]:
# has_all = False
if has_all:
idx.append(m)
for i in idx:
sub.append(data[i])
return sub, idx
def feature_map(data):
values = []
features = []
for i in range(len(data)):
ft_row = []
val_row = []
for j in range(1, len(data[i])):
ft, val = data[i][j].split(':')
ft_row.append(ft)
val_row.append(val)
values.append(val_row)
features.append(ft_row)
return values, features
def index_features(data):
features = {}
for i in range(len(data)):
for ft in data[i]:
if ft not in features.keys():
features[ft] = [i]
else:
features[ft].append(i)
return features
def read_file(infile, delim='\t'):
X = []
with open(infile, 'r') as fh:
for line in fh:
row = line.strip().split(delim)
X.append(row)
return X
def normalize_data(X, skip=None):
if skip is not None and skip < 0:
skip += len(X[0])
by_col = utils.transpose_array(X)
normalized = []
for j in range(len(by_col)):
if skip != j:
new_col, is_singular = normalize_col(by_col[j])
normalized.append(new_col)
return utils.transpose_array(normalized)
def clean_data(X, remove_constant=False):
by_col = utils.transpose_array(X)
nan_rows = []
new_by_col = []
for i, x in enumerate(by_col):
col, bad_rows, is_singular = check_type(x)
for b in bad_rows:
if b not in nan_rows:
nan_rows.append(b)
if not is_singular or not remove_constant:
new_by_col.append(col)
upright = utils.transpose_array(new_by_col)
new_X = []
for i, row in enumerate(upright):
if i not in nan_rows:
new_X.append(row)
return new_X
def normalize_col(col):
is_singular = False
cmin = min(col)
cmax = max(col)
if cmin == cmax:
is_singular = True
col = [i - cmin for i in col]
cmax = max(col)
col = [float(i)/cmax for i in col]
return col, is_singular
def check_type(col):
new_col = []
serialized = False
classes = {}
contains_nans = []
is_singular = False
for i, x in enumerate(col):
#print x
if serialized:
val = serialize(x, classes)
if val is np.nan:
contains_nans.append(i)
new_col.append(val)
else:
try:
new_col.append(float(x))
except ValueError as e:
if i == 0:
val = serialize(x, classes)
if val is np.nan:
contains_nans.append(i)
new_col.append(val)
serialized = True
else:
new_col.append(np.nan)
contains_nans.append(i)
if min(new_col) == max(new_col):
is_singular = True
return new_col, contains_nans, is_singular
def serialize(x, classes):
if x == '?':
val = np.nan
elif x in classes.keys():
val = classes[x]
else:
if len(classes.values()) > 0:
val = len(classes.values())
else:
val = 0
classes[x] = val
return val
def get_train_and_test(folds, k):
train = []
for i, fold in enumerate(folds):
if i != k:
for row in folds[i]:
train.append(row)
return train, folds[k]
def random_sample(array, y_arr, size):
""" return indeces """
idx = np.random.choice(range(len(array)), size=size)
data = [array[i] for i in idx]
y = [y_arr[i] for i in idx]
return data, y
def load_spirals():
file = '../data/twoSpirals.txt'
data = []
y = []
with open(file, 'rb') as spirals:
for line in spirals:
line = line.strip()
data.append(np.array(line.split('\t')[:-1], dtype=float))
y.append(line.split('\t')[-1])
return np.array(data), np.array(y, dtype=float)
| {
"repo_name": "alliemacleay/MachineLearning_CS6140",
"path": "Homeworks/HW4/data_load.py",
"copies": "1",
"size": "6767",
"license": "mit",
"hash": 5152413463089065000,
"line_mean": 25.537254902,
"line_max": 69,
"alpha_frac": 0.5220925078,
"autogenerated": false,
"ratio": 3.400502512562814,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9385717361793132,
"avg_score": 0.007375531713936292,
"num_lines": 255
} |
__author__ = 'Allison MacLeay'
import CS6140_A_MacLeay.utils as utils
import pandas as pd
import CS6140_A_MacLeay.utils.Stats as mystats
import numpy as np
from CS6140_A_MacLeay.utils.Stats import multivariate_normal
#from scipy.stats import multivariate_normal # for checking
def load_and_normalize_spambase():
return utils.load_and_normalize_spam_data()
def pandas_to_data(df, remove_nan='True'):
array = []
for i in range(len(df)): # row
row = df.iloc[i]
row_array = []
for j in range(len(row)):
if not remove_nan or not any(np.isnan(row)):
row_array.append(row[j])
array.append(row_array)
return array
def transpose_array(arr):
tarry = []
for i in range(len(arr)):
if i == 0:
for ix in range(len(arr[i])):
tarry.append([])
for j in range(len(arr[i])):
tarry[j].append(arr[i][j])
return tarry
def get_mus(arr):
""" Return averages of each vector
Expects an array of arrays as input
"""
trans = transpose_array(arr) # to go by column
mus = []
for i in range(len(trans)):
mus.append(utils.average(trans[i]))
return mus
def calc_covar_X_Y(x, xmu, y, ymu):
return (x-xmu)*(y-ymu)
def get_covar_X_Y(data, predict):
"""Data and predict are by rows
"""
covar = []
xmus = get_mus(data)
ymu = utils.average(predict)
for row in range(len(data)):
covar.append([])
y = predict[row]
for i in range(len(data[row])):
x = data[row][i]
covar[row].append(calc_covar(x, xmus[i], y, ymu))
return covar
def calc_covar(x, xmu):
return (x-xmu)**2
def get_covar(data):
arr = np.array(data).T
#return np.cov(transpose_array(data))
return np.cov(arr)
#return np.corrcoef(arr)
def mu_for_y(data, truth, value):
"""
Returns averages for rows where y equals val
:param data: by rows
:param truth: by rows
:param value: 0 or 1
:return: array of averages by column
"""
sub = get_sub_at_value(data, truth, value)
return get_mus(sub)
def get_sub_at_value(data, truth, value):
sub = []
for i in range(len(truth)):
if truth[i] == value:
sub.append(data[i])
return sub
def separate_X_and_y(data):
y = []
X = []
for r in range(len(data)):
y.append(data[r][-1])
X.append(data[r][:-1])
return X, y
class GDA():
def __init__(self):
self.prob = None
self.predicted = None
def train2(self, X, mus, covar_matrix, label):
""" """
#TODO train subsets together
tmp = X[:]
if type(X) is list:
X = np.matrix(X)
prob = []
x_less = X - mus
# this is called for each class outside
for r, row in enumerate(x_less):
row = np.asarray(x_less[r]).ravel()
prob.append(self.multivariate_normal(covar_matrix, row, mus=mus))
# Alternate way to do this found below.
# http://machinelearningmastery.com/naive-bayes-classifier-scratch-python/
X = tmp[:]
self.update_prob(label, prob)
def train(self, X, covar_matrix, y):
""" """
#TODO train subsets together
self.prob = {}
mus = {}
#TODO my mus passed in are wrong - should be mus from total set
for label in [0, 1]:
self.prob[label] = [0 for _ in range(len(X))]
mus[label] = get_mus(get_sub_at_value(X, y, label))
mus['X'] = get_mus(X)
for label in [0, 1, 'X']:
prob = []
#sub_data, sub_truth, sub_indeces = get_data_truth(X, y, mus['X'], label)
x_less = [np.asarray(X[xi]) - mus[label] for xi in range(len(X))]
# this is called for each class outside
for r, row in enumerate(x_less):
row = np.asarray(x_less[r]).ravel()
prob.append(self.multivariate_normal(covar_matrix, row, mus=mus[label]))
self.prob[label] = prob
# now we have prob = [0: prob_x0_rows, 1:prob_x1_rows, 'X':prob_x_rows]
# Alternate way to do this found below.
# http://machinelearningmastery.com/naive-bayes-classifier-scratch-python/
#self.update_prob(label, prob)
def aggregate_model(self, models):
for gda in models:
pass
def multivariate_normal(self, covar_matrix, x_less, alpha=1, mus=[]):
"""
:param d: number of rows in X
:param covar_matrix:
:param x_less: X - u , u is a vector of mu
:return:
"""
return multivariate_normal(covar_matrix, x_less, alpha=1)
def update_prob(self, label, prob):
if self.prob is None:
self.prob = {label: prob}
elif label not in self.prob.keys():
self.prob[label] = prob
def predict(self, data):
predicted = []
lprob = {x: 1 for x in self.prob.keys()}
#TODO - Do I use Gaussian process instead?
for r in range(len(data)):
max_prob_label = None
for label in [0, 1]:
if max_prob_label is None:
max_prob_label = [self.prob[label][r], label]
else:
if self.prob[label][r] > max_prob_label[0]:
max_prob_label = [self.prob[label][r], label]
predicted.append(max_prob_label[1])
return predicted
def normalize_probabilities(self):
for i in range(len(self.prob[0])):
Z = self.prob[0] + self.prob[1]
self.prob[0] = self.prob[0] / Z
self.prob[1] = self.prob[1] / Z
def partition_folds(data, k):
k = int(k)
if k == 1:
return [data]
if len(data) > k:
array = [[] for _ in range(k)]
else:
array = [[] for _ in range(len(data))]
#array = []
idx_arr = np.arange(len(data))
np.random.shuffle(idx_arr)
for i in idx_arr:
array[i % k].append(data[i])
return array
def get_accuracy(predict, truth):
right = 0
for i in range(len(predict)):
if predict[i] == truth[i]:
right += 1
return float(right)/len(predict)
def get_data_and_mus(spamData):
truth_rows = transpose_array(spamData)[-1] # truth is by row
data_rows = transpose_array(transpose_array(spamData)[:-1]) # data is by column
data_mus = get_mus(data_rows)
y_mu = utils.average(truth_rows)
return truth_rows, data_rows, data_mus, y_mu
def get_data_truth(data_rows, truth_rows, data_mus, label):
data = []
mus = []
indeces = []
truth = []
for i in range(len(truth_rows)):
if truth_rows[i] == label:
data.append(data_rows[i])
indeces.append(i)
truth.append(truth_rows[i])
return data, truth, indeces
def get_std_dev(data):
std_dev = []
by_col = transpose_array(data)
for col in by_col:
sd = np.std(col)
#if sd == 0:
# print col
# print "ERROR: standard dev is 0!"
std_dev.append(sd)
return std_dev
def univariate_normal(data, std_dev, mus, prob_y, alpha=1, ignore_cols=[]):
"""
:row: one row
:param std_dev: array by col
:param mus: array by col
:return: probability
"""
row_probability = []
if len(std_dev) == 0:
return 0
num_std_devs = 1e-10 if len(std_dev) == 0 else len(std_dev)
# 1/(std_dev * sqrt(2*pi) ) exp( -1 * (x-mu)**2 / 2 * std_dev**2 )
epsilon = 1. * alpha/num_std_devs
prob_const = 1./ np.sqrt(2 * np.pi)
for row in data: # for each row
prob = 1
for j in range(len(std_dev)):
if j in ignore_cols:
continue
std_devj = std_dev[j] + epsilon
xj = row[j]
epow = -1 * (xj - mus[j])**2 / (2 * std_devj**2)
probj = prob_const * (1.0/std_devj) * np.exp(epow)
prob = probj * prob
# >>> probj
#5.9398401853736429
# >>> scipy.stats.norm(mus[j], std_devj).pdf(xj)
#5.9398401853736429
row_probability.append(prob * prob_y)
return row_probability
def bins_per_column(data_cols, cutoffs):
column_prob = []
num_bins = len(cutoffs)
for c in range(len(data_cols)):
prob = [0 for _ in range(num_bins)]
counts = classify(data_cols[c], cutoffs)
# add all bin counts for this column
for xbin_i in range(len(counts)):
prob[xbin_i] += counts[xbin_i]
prob = [float(prob[i]) / len(data_cols[c]) for i in range(num_bins)]
column_prob.append(prob)
return column_prob
def bins_per_column_by_col(data_cols, cutoffsc):
column_prob = []
num_bins = len(cutoffsc)
for c in range(len(data_cols)):
prob = [0 for _ in range(num_bins)]
counts = classify(data_cols[c], cutoffsc[c])
# add all bin counts for this column
for xbin_i in range(len(counts)):
prob[xbin_i] += counts[xbin_i]
prob = [float(prob[i]) / len(data_cols[c]) for i in range(num_bins)]
column_prob.append(prob)
return column_prob
def classify(row, cutoffs):
""" Classify a row for bins and return counts """
xbin = [0 for _ in range(len(cutoffs))]
for j in range(len(row)):
xbin[classify_x(row[j], cutoffs)] += 1
return xbin
def classify_x(x, cutoffs):
""" Classify a data point for bins """
bins = len(cutoffs)
binlabel = 1
# first entry is minimum. skip it
while binlabel < bins and x >= cutoffs[binlabel]:
binlabel += 1 # increment until row[j] is greater than binlabel
return binlabel - 1
| {
"repo_name": "alliemacleay/MachineLearning_CS6140",
"path": "Homeworks/HW3/__init__.py",
"copies": "1",
"size": "9832",
"license": "mit",
"hash": 2794959578143385600,
"line_mean": 29.9182389937,
"line_max": 88,
"alpha_frac": 0.5482099268,
"autogenerated": false,
"ratio": 3.2567075190460417,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43049174458460415,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Allison MacLeay'
import CS6140_A_MacLeay.utils.Tree as tree
import CS6140_A_MacLeay.utils.GradientDescent as gd
import CS6140_A_MacLeay.utils as utils
import CS6140_A_MacLeay.utils.plots as plot
import CS6140_A_MacLeay.Homeworks.hw2_new as hw2
import CS6140_A_MacLeay.utils.Stats as mystats
import numpy as np
import pandas as pd
import random
import sys
from nose.tools import assert_equal, assert_true, assert_not_equal, assert_false
def testTree():
best = ('A', 5)
data = {'A': [1,2,6,7,8,9,3,4,5], 'C': [1,0,1,0,1,0,1,0,1], 'B': [1,1,0,0,0,0,1,1,1]}
df = pd.DataFrame(data)
print tree.find_best_label_new(df, 'A', 'B')
print 'best feature and label'
print tree.find_best_feature_and_label_for_split(df, 'B', regression=True)
#assert_equal(best, tree.find_best_feature_and_label_for_split(df, 'B', regression=True))
def testGradient(): # Great success with subset
test, train = utils.load_and_normalize_housing_set()
df_full = pd.DataFrame(train)
subset_size = 100
df = utils.train_subset(df_full, ['CRIM', 'TAX', 'B', 'MEDV'], n=subset_size)
dfX = pd.DataFrame([df['CRIM'], df['TAX']]).transpose()
print len(dfX)
print dfX
#raw_input()
fit = gd.gradient(dfX, df['MEDV'].head(subset_size), .5, max_iterations=300)
print 'read v fit'
print len(dfX)
print df['MEDV'].head(10)
print fit
data = gd.add_col(gd.pandas_to_data(dfX), 1)
print np.dot(data, fit)
def testGradSynth():
data, y = get_test_data()
df = pd.DataFrame(data, columns=["x0", "x1"])
print gd.gradient(df, y, .5, max_iterations=30)
pass
def testGradientByColumn():
test, train = utils.load_and_normalize_housing_set()
blacklist = ['NOX', 'RM']
df_full = pd.DataFrame(train)
for i in range(2, len(df_full.columns) - 1):
cols = []
for j in range(1, i):
if df_full.columns[j] not in blacklist:
cols.append(df_full.columns[j])
cols.append('MEDV')
print cols
raw_input()
testGradient_by_columns(df_full, cols)
def testGradient_by_columns(df, cols): # fail
df = utils.train_subset(df, cols, n=len(df))
#dfX = pd.DataFrame([df['CRIM'], df['TAX']]).transpose()
print len(df)
print df
#raw_input()
fit = gd.gradient(df, df['MEDV'].head(len(df)), .00001, max_iterations=5000)
print 'read v fit'
print len(df)
print df['MEDV'].head(10)
print fit
print np.dot(df, fit)
def testGradient2():
X = np.random.random(size=[10, 2])
y = .5 * X[:, 0] + 2 * X[:, 1] + 3
df = pd.DataFrame(data=X)
w = gd.gradient(df, y, .05)
def testHW2_subset(): # Success
test, train = utils.load_and_normalize_housing_set()
df_full = pd.DataFrame(train)
df_test = utils.train_subset(df_full, ['CRIM', 'TAX', 'B', 'MEDV'], n=10)
df_train = utils.train_subset(df_full, ['CRIM', 'TAX', 'B', 'MEDV'], n=10)
dfX_test = pd.DataFrame([df_test['CRIM'], df_test['TAX'], df_test['MEDV']]).transpose()
dfX_train = pd.DataFrame([df_train['CRIM'], df_train['TAX'], df_train['MEDV']]).transpose()
print hw2.linear_gd(dfX_train, dfX_test, 'MEDV')
def testHW2_allcols(): # Fail
test, train = utils.load_and_normalize_housing_set()
df_full = pd.DataFrame(train)
cols = [col for col in df_full.columns if col != 'MEDV']
df_test = utils.train_subset(df_full, cols, n=10)
df_train = utils.train_subset(df_full, cols, n=10)
#dfX_test = pd.DataFrame([df_test['CRIM'], df_test['TAX'], df_test['MEDV']]).transpose()
#dfX_train = pd.DataFrame([df_train['CRIM'], df_train['TAX'], df_train['MEDV']]).transpose()
print hw2.linear_gd(df_train, df_test, 'MEDV')
def testHW2(): # Success
test, train = utils.load_and_normalize_housing_set()
df_train = pd.DataFrame(train)
df_test = pd.DataFrame(test)
print df_train.head(10)
#raw_input()
print hw2.linear_gd(df_train, df_test, 'MEDV')
def testLogisticGradient():
""" logistic gradient descent """
df_test, df_train = utils.split_test_and_train(utils.load_and_normalize_spam_data())
Y = 'is_spam'
binary = utils.check_binary(df_train[Y])
model = gd.logistic_gradient(df_train, df_train[Y], .1, max_iterations=5)
#print model
#raw_input()
predict = gd.predict(df_train, model, binary, True)
print predict
error_train = mystats.get_error(predict, df_train[Y], binary)
#raw_input()
predict = gd.predict(df_test, model, binary, True)
print predict
error_test = mystats.get_error(predict, df_test[Y], binary)
print 'error train {} error_test {}'.format(error_train, error_test)
return [error_train, error_test]
def testScale():
test, train = utils.load_and_normalize_housing_set()
df_full = pd.DataFrame(train)
df = utils.train_subset(df_full, ['CRIM', 'TAX', 'B', 'MEDV'], n=10)
w = []
for i in range(0,len(df['TAX'])):
w.append(random.random())
scaled = utils.scale(w, min(df['TAX']), max(df['TAX']))
plot.fit_v_point([w, df['MEDV'], scaled])
def testLinRidge_test_data():
dX, y = get_test_data()
X = pd.DataFrame(data=dX, columns=["x0", "x1"])
X['y'] = y
#print hw2.linear_reg_errors(h_train, h_test, 'MEDV', True)
print hw2.linear_reg(X, 'y', False, True)
def testLinRidge():
h_test, h_train = utils.load_and_normalize_housing_set()
#print hw2.linear_reg_errors(h_train, h_test, 'MEDV', True)
print hw2.linear_reg(h_train, 'MEDV', False, False)
def testBinary():
not_binary = [5,6,7]
binary = [1,0,1]
df = pd.DataFrame({'not_binary': not_binary, 'binary': binary})
print df['not_binary']
nb_result = utils.check_binary(df['not_binary'])
b_result = utils.check_binary(df['binary'])
print 'not binary: {} binary: {}'.format(nb_result, b_result)
def testLogGradient2():
X = np.random.random(size=[10, 2])
y = utils.sigmoid(X[:, 0]* .5 + 2 * X[:, 1] + 3)
df = pd.DataFrame(data=X)
w = gd.logistic_gradient(df, y, .05)
print w
def UnitTest():
test_vector_equality()
test_column_product()
test_dot_prod()
def test_dot_prod():
data = {1:[1, 2, 3, 4, 5],
2:[1, 2, 3, 4, 5],
3:[1, 2, 3, 4, 5],
4:[1, 2, 3, 4, 5]}
multiplier = [2, 2, 2, 2, 2]
truth = [30, 30, 30, 30]
assert_true(mystats.check_vector_equality(truth, mystats.dot_product_sanity(data, multiplier)))
def test_vector_equality():
v1 = [1, 2, 3, 4, 5]
v2 = [1, 2, 3, 4, 5]
v3 = [1, 2, 3, 4, 6]
v4 = [1, 2, 3, 4]
assert_true(mystats.check_vector_equality(v1, v2))
assert_false(mystats.check_vector_equality(v2, v3))
assert_false(mystats.check_vector_equality(v3, v4))
def test_column_product():
v1 = [1, 2, 3, 4, 5]
v2 = [2, 2, 2, 2, 2]
truth = 30
prod = mystats.column_product(v1, v2)
assert_equal(truth, prod)
def test_add_col():
array = []
for i in range(5):
row = []
for j in range(3):
row.append(0)
array.append(row)
print array
array = utils.add_col(array, 1)
print array
def get_test_data():
X = np.random.random(size=(10, 2))
y = .5 * X[:, 0] + 2 * X[:, 1] + 3
return X, y
if __name__ == '__main__':
print 'Test main for HW2'
#testTree()
#testScale()
#test_add_col()
#testGradient()
testGradSynth()
#testGradientByColumn()
#testGradient2()
#testLinRidge()
#testLinRidge_test_data()
#testLogisticGradient()
#testLogGradient2()
#testHW2_allcols()
#testHW2()
#testBinary()
#UnitTest()
| {
"repo_name": "alliemacleay/MachineLearning_CS6140",
"path": "Tests/testTree.py",
"copies": "1",
"size": "7595",
"license": "mit",
"hash": 6298505101809626000,
"line_mean": 32.0217391304,
"line_max": 99,
"alpha_frac": 0.6069782752,
"autogenerated": false,
"ratio": 2.781032588795313,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8826325656623716,
"avg_score": 0.012337041474319453,
"num_lines": 230
} |
__author__ = 'Allison MacLeay'
import numpy as np
import CS6140_A_MacLeay.utils.Stats as mystats
from CS6140_A_MacLeay.utils import check_binary
from CS6140_A_MacLeay.utils.Stats import get_error
import pandas as pd
class Perceptron:
def __init__(self, data, predict_col, learning_rate, max_iterations=1000):
self.model = train_perceptron(data, predict_col, learning_rate, max_iterations)
self.result_is_binary = check_binary(data[predict_col])
self.training_score = self._get_score_from_data(data, data[predict_col])
self.predict_column = predict_col
def get_predicted(self, data):
#print len(data.columns)
#print len(self.model)
#if self.predict_column not in data.columns:
# data[self.predict_column] = np.ones(len(data))
predicted = np.dot(data, self.model)
return predicted
def get_score(self, predict, truth_set):
return get_error(predict, truth_set, self.result_is_binary)
def _get_score_from_data(self, data, truth_set):
predicted = self.get_predicted(data)
return self.get_score(predicted, truth_set)
def print_score(self, score=None):
caption = 'MSE: '
if self.result_is_binary:
caption = 'Accuracy: '
if score is None:
score = str(self.training_score)
print '{} {}'.format(caption, score)
def train_perceptron(data, predict, learning_rate, max_iterations=1000):
ct_i = 0
size = len(data)
cols = []
for col in data.columns:
if col != predict:
cols.append(col)
X = data[cols]
# Add column of ones
X['ones'] = np.ones(size)
X = X.reindex()
p = data[predict]
# keep track of the mistakes
last_m = 10000000000000
#TODO Do we flip our predict column? I didn't
#TODO Do we flip our ones column? I did
# Switch x values from positive to negative if y < 0
ct_neg_1 = 0
print p[:5]
for i, row in enumerate(X.iterrows()):
if list(p)[i] < 0:
ct_neg_1 += 1
for cn, col in enumerate(X.columns):
X.iloc[i, cn] *= -1
#print 'ct neg is {} '.format(ct_neg_1)
#print size
# Get random array of w values
w = mystats.init_w(5)[0]
# --sample init w--
#0 0.761070
#1 0.238147
#2 0.928009
#3 0.487875
#4 0.541245
#print 'w array'
#print w.head(5)
#print X.head(5)
while ct_i < max_iterations: # for each iteration
J = []
n_row = 0
mistakes_x_sum = 0
num_of_mistakes = 0
#print 'w'
#print w
for r_ct, row in X.iterrows(): # for each row
x_sum = 0
#print 'ct_i {} j {} w {} x {} x_sum {}'.format(ct_i, n_row, wj, X.iloc[n_row][ct_i], x_sum)
for c_ct, col in enumerate(X.columns):
#print 'col: {} d(col): {}'.format(col, row[col])
x_sum += w[c_ct] * row[col]
J.append(x_sum)
if x_sum < 0:
mistakes_x_sum += x_sum
num_of_mistakes += 1
for w_ct in range(len(w)):
w[w_ct] += learning_rate * row[w_ct]
print 'Number of mistakes {}'.format(num_of_mistakes)
# check objective
#print 'sum of J is {}'.format(sum(J))
#print 'iteration: {} length of mistakes: {} sum: {}'.format(ct_i, num_of_mistakes, -1 * mistakes_x_sum)
#print '{} mis*lr={}'.format(mistakes_x_sum, mistakes_x_sum * learning_rate)
# update w
#for wi, wcol in enumerate(mistakes.columns):
# # Add the sum of mistakes for each column to w for that column
# w[wi] += learning_rate * sum(mistakes[wcol])
# print 'wcol: {} {}'.format(wcol, sum(mistakes[wcol]))
#w += sum(mistakes) * learning_rate
#if last_m < num_of_mistakes:
# print 'last_m is {} and size of mistakes is {}'.format(last_m, num_of_mistakes)
# break
last_m = num_of_mistakes
ct_i += 1
if num_of_mistakes == 0:
break
#print pd.DataFrame(J).head(5)
return w
| {
"repo_name": "alliemacleay/MachineLearning_CS6140",
"path": "utils/Perceptron.py",
"copies": "1",
"size": "4177",
"license": "mit",
"hash": -7878808161132714000,
"line_mean": 28.6241134752,
"line_max": 112,
"alpha_frac": 0.5566195834,
"autogenerated": false,
"ratio": 3.213076923076923,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42696965064769227,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Allison MacLeay'
import numpy as np
import pandas as pd
from CS6140_A_MacLeay.utils import average, sigmoid, add_col, get_hw
import CS6140_A_MacLeay.utils.Stats as mystats
import sys
#import CS6140_A_MacLeay.Homeworks.HW3 as hw3u
def to_col_vec(x):
return x.reshape((len(x), 1))
def gradient(X, Y, gd_lambda, descent=True, max_iterations=1000, binary=False):
accepted = False
iterations = 0
if type(X) is pd.DataFrame:
X = pandas_to_data(X)
# add column of ones
X = add_col(X, 1)
w = [0 for _ in range(len(X[0]))]
hrows = [0 for _ in range(len(X))]
print type(list(Y))
not_converged = len(X)
while iterations < max_iterations and not_converged > 0:
not_converged = len(X)
iterations += 1
for r in range(len(X)): # r = i, c = j
row = X[r]
h = get_hw(row, w, binary)
hrows[r] = h
#TODO this doesn't seem right!
#print 'values: {} {} '.format(list(Y)[r], h)
if h-list(Y)[r] == 0:
not_converged -= 1
for c in range(len(row)):
w[c] = w[c] - (gd_lambda * (h - list(Y)[r]) * row[c])
debug_print(iterations, not_converged, hrows, Y)
return w
def logistic_gradient(X, Y, gd_lambda, descent=True, epsilon_accepted=1e-6, max_iterations=10000000):
accepted = False
iterations = 0
epsilon = 1
X['b'] = np.ones(len(X))
m = X.shape[1] # number of cols
print 'sh0: {} len(X): {}'.format(m, len(X))
w_old = np.zeros(m)
while not accepted:
w_new = np.zeros(w_old.shape)
for j in range(len(w_old)): # by col
delta = 0.0
for i in range(len(X)): # by row
delta += (Y.values[i] - sigmoid(np.dot(w_old, X.values[i]))) * X.values[i, j]
w_new[j] = w_old[j] + gd_lambda * delta
if np.any(np.isnan(w_new)):
raise ValueError('NAN is found on iteration {}'.format(iterations))
epsilon = sum(np.abs(w_new - w_old))/len(w_new)
print 'epsilon: {}'.format(epsilon)
print 'w:'
print '{} iterations, w: {}'.format(iterations, w_new[:])
w_old = w_new
if epsilon < epsilon_accepted:
accepted = True
if iterations >= max_iterations:
accepted = True
iterations += 1
return w_new
def predict_data(data, model, binary=True, logistic=True, theta=.5):
# TODO values are too low to be correct
predict = []
for i in range(len(data)):
value = 0
row = data[i]
for j in range(len(row)):
value += row[j] * model[j]
if binary:
if value > theta:
predict.append(1)
else:
predict.append(0)
else:
predict.append(value)
return predict
def predict(df, model, binary=False, logistic=False):
if 'b' not in df.columns:
df['b'] = 1
if binary:
cutoff = .5
if binary and logistic:
predictions = [sigmoid(x) for x in np.dot(df, model)]
else:
predictions = np.dot(df, model)
if binary:
for p in range(len(predictions)):
if predictions[p] < cutoff:
predictions[p] = 0
else:
predictions[p] = 1
return predictions
def pandas_to_data(df):
array = []
for i in range(len(df)): # row
row = df.iloc[i]
row_array = []
for j in range(len(row)):
row_array.append(row[j])
array.append(row_array)
return array
def debug_print(iters, nc, h, y):
diffs = 0
error = mystats.get_error(h, y, 0)
for i, pred in enumerate(y):
diffs += abs(pred - h[i])
distance = float(diffs)/len(h)
print "actual"
print y[:5]
print "predicted"
print h[:5]
print 'loop: {} num not converged: {} distance: {} MSE: {}'.format(iters, nc, distance, error)
| {
"repo_name": "alliemacleay/MachineLearning_CS6140",
"path": "utils/GradientDescent.py",
"copies": "1",
"size": "3957",
"license": "mit",
"hash": -7000099899518504000,
"line_mean": 29.2061068702,
"line_max": 101,
"alpha_frac": 0.539802881,
"autogenerated": false,
"ratio": 3.283817427385892,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9316388923743896,
"avg_score": 0.0014462769283992637,
"num_lines": 131
} |
__author__ = 'Allison MacLeay'
import numpy as np
import pandas as pd
import CS6140_A_MacLeay.Homeworks.hw2_new as hw2
import CS6140_A_MacLeay.utils.Stats as mystats
import matplotlib.pyplot as plt
import matplotlib.patches as patches
def count_black(arr):
#print len(arr)
black = np.zeros(shape=(len(arr), len(arr[0])))
sum = 0
for i in range(len(arr)):
sum_black = 0
for j in range(len(arr[i])):
if arr[i][j] == 1:
sum_black += 1
tot = (i + 1) * (j + 1)
prev = black[i - 1][j] if i > 0 else 0
black[i][j] = float(prev * (j + 1) * i + sum_black)/ tot
return black
def get_rect_coords(number, size=28):
# min area is 170
coords = []
for n in xrange(number):
height = np.random.randint(5, size)
minw = 130/height
width = np.random.randint(minw, size)
start_y = np.random.randint(0, size - height)
start_x = np.random.randint(0, size - width)
# return coordinate of the rectangles we need for area calculation, not the corners of the rectangle of interest
c = [[start_x - 1, start_y - 1], [start_x + width, start_y - 1], [start_x - 1, height + start_y], [start_x + width, start_y + height]]
coords.append(c)
return coords
def get_features(barray, coords):
total = get_black_amt(barray, coords)
vsplit_x = (coords[1][0] - coords[0][0])/2 + coords[0][0]
hsplit_y = (coords[2][1] - coords[0][1])/2 + coords[0][1]
left = [coords[0], [vsplit_x, coords[1][1]], coords[2], [vsplit_x, coords[3][1]]]
top = [coords[0], coords[1], [coords[2][0], hsplit_y], [coords[3][0], hsplit_y]]
left_amt = get_black_amt(barray, left)
top_amt = get_black_amt(barray, top)
return total - top_amt, total - left_amt
def get_black_amt(barray, coords):
"""A = rect[0]
B = rect[1]
C = rect[2]
D = rect[3]
"""
# B(x) - A(x) * C(y) - A(y)
size = (coords[1][0] - coords[0][0]) * (coords[3][1] - coords[0][1])
rect = [barray[c[1]][c[0]] * (c[1] + 1) * (c[0] + 1) for c in coords]
# D + A - B + C
number_black = rect[3] + rect[0] - (rect[1] + rect[2])
return float(number_black)/ size if number_black > 0 else 0
def remove_col(data, c):
X = []
for i in range(len(data)):
X.append([])
for j in range(len(data[0])):
if j != c:
X[i].append(data[i][j])
return X
def group_fold(data, skip):
group = []
for i in range(len(data)):
if i != skip:
for r in range(len(data[i])):
group.append(data[i][r])
return group
class Ridge(object):
""" I did hw2 in a strange way but it worked.
This is a wrapper to use the linear ridge
regression like scikit's implementation
"""
def __init__(self):
self.w = []
def fit(self, X, y):
df = pd.DataFrame(X)
self.w = mystats.get_linridge_w(df, y, True)
def predict(self, X):
df = pd.DataFrame(X)
return mystats.predict(df, self.w, True, [])
def lin_ridge_wrap(X, y):
df = pd.DataFrame(X)
df['is_spam'] = y
accuracy = hw2.linear_reg(df, 'is_spam', True, True) # returns accuracy
return accuracy
def split_test_and_train(data, percentage):
# split into test and train at a certain percentage
num = int(len(data) * percentage)
array = [[], []]
idx_arr = np.arange(len(data))
np.random.shuffle(idx_arr)
setn = 0
for i in xrange(len(data)):
if i > num:
setn = 1
array[setn].append(data[idx_arr[i]])
return array # [ test_array, train_array ]
def show_rectangles(rects, fname='hw5_q5_rect.png'):
fig2 = plt.figure()
ax2 = fig2.add_subplot(111, aspect='equal')
ax2.set_ylim([0,28])
ax2.set_xlim([0,28])
for r in rects:
corner = r[2]
height = r[0][1] - r[2][1] + 1
width = r[1][0] - r[0][0] + 1
ax2.add_patch(
patches.Rectangle(
(corner[0], corner[1]),
width,
height,
fill=False, # remove background
edgecolor=random_color()
)
)
fig2.savefig(fname, dpi=90, bbox_inches='tight')
def random_color():
r = np.random.random()
g = np.random.random()
b = np.random.random()
return (r,g,b)
def get_margin_fractions_validate(ada, X, y):
def normalize_labels(y):
"""Normalizes positive labels to +1 and negative labels to -1"""
return np.asarray([1.0 if yi > 0 else -1.0 for yi in y])
def single_sample_margin(ada, H, X, y, i, round_idx):
"""\
Computes margin for a single sample using a given subset of AdaBoost rounds.
:param ada: fitted AdaBoost instance
:param H: matrix of predictions where rows are stumps from each AdaBoost round, and columns are
the stumps' prediction for a sample
:param X: data matrix
:param y: ground truth
:param i: index of sample for which to compute margin
:param round_idx: which AdaBoost rounds to include in the sum
:return: margin due to the sample, a float
"""
if len(round_idx) == 0:
return 0.0
return np.sum([y[i] * ada.alpha[j] * H[j][i] for j in round_idx])
def feature_margin(ada, H, X, y, feat_idx):
"""\
Computes margin over all samples for a single feature.
:param ada: fitted AdaBoost instance
:param H: matrix of predictions where rows are stumps from each AdaBoost round, and columns are
the stumps' prediction for a sample
:param X: data matrix
:param y: ground truth
:param feat_idx: index of the feature for which to compute margin
:return: margin for the feature, a float
"""
all_rounds = np.arange(len(ada.stump))
rounds_with_feat = np.asarray([round_idx
for round_idx, stump in zip(all_rounds, ada.stump)
if stump.feature == feat_idx])
margin_f = np.sum([single_sample_margin(ada, H, X, y, sample_idx, rounds_with_feat)
for sample_idx in range(X.shape[0])])
margin_total = np.sum([single_sample_margin(ada, H, X, y, sample_idx, all_rounds)
for sample_idx in range(X.shape[0])])
return margin_f / margin_total
X = np.asarray(X)
y = normalize_labels(y)
H = np.asarray([normalize_labels(h.predict(X)) for h in ada.hypotheses])
return np.asarray([feature_margin(ada, H, X, y, feat_idx) for feat_idx in xrange(X.shape[1])])
| {
"repo_name": "alliemacleay/MachineLearning_CS6140",
"path": "Homeworks/HW5/__init__.py",
"copies": "1",
"size": "6755",
"license": "mit",
"hash": -3105942176935273000,
"line_mean": 31.6328502415,
"line_max": 142,
"alpha_frac": 0.5576609919,
"autogenerated": false,
"ratio": 3.225883476599809,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9241069450610699,
"avg_score": 0.008495003577821875,
"num_lines": 207
} |
__author__ = 'Allison MacLeay'
import sys
import os
import argparse
import time
from glob import glob
from tempfile import mkdtemp
"""
Run a command on every file in a directory
"""
def get_cmd(fname, cmd, params):
""" return command """
out_name = os.path.join(params['out'], params['name'] + fname)
cmd = cmd.replace('$INPUT', fname)
cmd = cmd.replace('$OUT', out_name)
return cmd
def bsub_cmd(user, log, flags):
""" return bsub command"""
if flags != '': # make sure it is buffered by spaces
if flags[0] != ' ':
flags = ' ' + flags
if flags[-1] != ' ':
flags = flags + ' '
return 'bsub -q medium -u ' + user + ' -o ' + flags + os.path.join(log, 'lsf_out.log') + ' -e ' + os.path.join(log,
'lsf_err.log') + ' '
def get_names(path, pattern):
""" Return all unique file prefixes """
return glob(os.path.join(path, pattern))
def check_done(file_num, path):
"""
Delay completion of script until all
files are written
"""
start = time.time()
timeout = (24 * 60 * 60) # 24 hours
done = 0
while done == 0:
files = next(os.walk(path))[2]
all_closed = 0
if len(files) == file_num:
all_closed = 1
for f in files:
if f.find('tmp') > 0:
all_closed = 0
print 'found tmp file ' + f + '. Waiting...'
continue
if all_closed == 1:
done = 1
elif (time.time() - start) > timeout:
done = 1
print 'Job timed out'
else:
time.sleep(5)
print 'checking for job completion after waiting %d seconds' % (time.time() - start)
print 'searching for ' + str(file_num) + ', found ' + str(len(files)) + ' files in ' + path
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog="Batch command helper",
description="Run umitag utility in batches of similarly prefixed names. "
"Pass in a command in the format ls $INPUT >> $OUT")
parser.add_argument('--dir', default='.', help='directory containing input files')
parser.add_argument('--cmd', default='ls $INPUT',
help='command to run on each file in the specified directory')
parser.add_argument('--out', default='tmp', help='directory to deposit output files')
parser.add_argument('--batch_name', default='batch', help='name to prepend to processed files')
parser.add_argument('--pattern', default='*.*', help='match pattern for input files. default=*.*')
parser.add_argument('--verbose', action='store_true', help='verbosity. default=False')
# optional bsub options
parser.add_argument('--bsub', action='store_true', help='use bsub')
parser.add_argument('--log', help='directory to deposit bsub log files')
parser.add_argument('--bsub_user', default='am282', help='user name for bsub command. default=am282')
parser.add_argument('--bsub_mod', default='', help='extra parameters for bsub command')
parser.add_argument('--output_count', help='number of expected output files per input. Process will wait to '
'complete until all files are created. Leave this flag out to avoid '
'the process waiting')
args = parser.parse_args()
if args.out == 'tmp':
print 'WARNING: Output directory was not supplied. Temporary directory will be used instead.'
p = {}
p['path'] = args.dir
p['name'] = args.batch_name
if args.out != 'tmp':
os.system('mkdir -p {}'.format(p['out']))
else:
p['out'] = mkdtemp()
# bsub options
if args.bsub:
if not hasattr(args, 'log'):
raise Exception("ERROR: If using bsub you must specify a directory for the log files.")
os.system('mkdir -p {}'.format(args.log))
os.system('ls {} >> {}'.format(p['path'], os.path.join(args.log, 'ls_inputdir.txt')))
expected_output = int(args.output_count) if hasattr(args, 'output_count') else 0
files = get_names(args.dir, args.pattern)
if len(files) < 1:
print "Error: No file prefixes were found in {}.".format(args.dir)
count_lsf = 0
for fname in files:
if (fname.find('undetermined') > -1):
# skip undeterminded for now
cmd = 'echo skipping undetermined files'
elif not args.bsub:
cmd = get_cmd(fname, args.cmd, p)
else:
cmd = bsub_cmd(args.bsub_user, args.log, args.bsub_mod) + get_cmd(fname, args.cmd, p)
# Keep track of lsf job for listener
count_lsf += expected_output # stays 0 if the process will not wait until completion
if args.verbose:
print 'batch command helper running command:\n' + cmd
os.system(cmd)
if count_lsf > 0:
check_done(count_lsf, p['out'])
if args.verbose:
print 'batch_process done'
| {
"repo_name": "alliemacleay/misc",
"path": "batch_command.py",
"copies": "1",
"size": "5201",
"license": "mit",
"hash": 3344739154065286000,
"line_mean": 38.4015151515,
"line_max": 135,
"alpha_frac": 0.5566237262,
"autogenerated": false,
"ratio": 3.907588279489106,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9954791705028064,
"avg_score": 0.0018840601322082755,
"num_lines": 132
} |
__author__ = 'Allison MacLeay'
from sklearn.tree import DecisionTreeClassifier
import CS6140_A_MacLeay.Homeworks.HW4 as decTree
import CS6140_A_MacLeay.Homeworks.HW4 as hw4
import numpy as np
class BoostRound():
def __init__(self, adaboost, round_number):
self.learner = adaboost.learner
self.error = 1 # unweighted error
self.errors_weighted = [] # weighted errors before normalization
self.weight_distribution = [] # Dt(x)
self.err_matrix = [] # 0 if incorrect, 1 if correct
self.alpha = 0 # alpha t
self.converged = False
self.stump = None # Decision stump (feature, threshold pair)
def run(self, f_data, truth, weights):
model = self.fit(f_data, truth, weights)
predicted = self.predict(model, f_data) # {-1, 1}
#self.stump = self.get_decision_stump(predicted, truth)
# Error matrix for round computed from test data
self.err_matrix = self.compute_error_matrix(truth, predicted)
self.error = self.get_error(self.err_matrix) # 1 if correct, else 0
self.set_alpha(weights)
self.errors_weighted = self.weight_errors(self.err_matrix, weights)
self.set_weight_distribution_and_total(weights) # Dt(x) and epsilon
if model.tree_.feature[0] < 0:
raise ValueError('oops')
self.stump = hw4.DecisionStump(model.tree_.feature[0], model.tree_.threshold[0])
def fit(self, data, truth, weights):
raise NotImplementedError
def predict(self, model, data):
# {-1, 1}
predicted = model.predict(data)
for i in range(len(predicted)):
if predicted[i] > 0:
predicted[i] = 1
else:
predicted[i] = -1
return predicted
#return self.test_predict(model, data)
def test_predict(self, model, data):
predicted = np.ones(len(data))
for i in range(len(predicted)):
predicted[i] = 1
return predicted
def compute_error_matrix(self, truth, predicted):
""" returns {0, 1}
"""
err_matrix = np.ones(len(truth))
for i in range(len(truth)):
if truth[i] != predicted[i]:
err_matrix[i] = 0
return err_matrix
def get_error(self, err_matrix):
return 1 - float(sum(err_matrix))/len(err_matrix)
def weight_errors(self, err_matrix, weights):
weighted = []
# Error matrix is inverted # 0 if error, 1 if correct
for i in range(len(err_matrix)):
weighted.append(weights[i] * np.exp(1 if err_matrix[i]==0 else -1))
return weighted
def set_weight_distribution_and_total(self, last_weights):
sum_weights = sum(self.errors_weighted)
# Compute Error function - wikipedia version - not correct
#wd = [self.errors_weighted[i] * np.exp(-self.alpha if self.err_matrix[i]==1 else self.alpha)
# for i in range(len(self.errors_weighted))]
#sum_wd = sum(wd)
#self.weight_distribution = [float(w)/sum_wd for w in wd]
sum_wd = sum(self.errors_weighted)
self.weight_distribution = [float(w)/sum_wd for w in self.errors_weighted]
if np.any(np.isnan(self.weight_distribution)):
raise ValueError('nans in weights')
def set_alpha(self, weights):
#TODO fix alphas
epsilon = self.get_epsilon(weights)
if epsilon == 0 or epsilon >= 1:
raise ValueError('oops')
else:
self.alpha = .5 * np.log( (1 - epsilon) / epsilon)
def get_epsilon(self, weights):
epsilon = 0
for i, is_correct in enumerate(self.err_matrix):
if is_correct==0:
epsilon += weights[i]
return float(epsilon)/len(weights)
class BoostRoundRandom(BoostRound):
def fit(self, data, truth, weights):
model = ''
# create decision stumps
print 'BoostRoundRandom'
#model = DecisionTreeRegressor(max_depth=3)
model = decTree.TreeRandom(max_depth=1)
model.fit(data, truth, weights)
return model
class BoostRoundOptimal(BoostRound):
def fit(self, data, truth, weights):
model = ''
# create decision stumps
print 'BoostRoundOptimal'
model = DecisionTreeClassifier(max_depth=1)
#model = decTree.TreeOptimal(max_depth=1)
model.fit(data, truth, sample_weight=weights)
return model
| {
"repo_name": "alliemacleay/MachineLearning_CS6140",
"path": "utils/AdaboostRound.py",
"copies": "1",
"size": "4485",
"license": "mit",
"hash": -434915011835039000,
"line_mean": 32.7218045113,
"line_max": 101,
"alpha_frac": 0.6028985507,
"autogenerated": false,
"ratio": 3.685291700903862,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4788190251603862,
"avg_score": null,
"num_lines": null
} |
__author__ = 'allyjweir'
import os
import errno
import pdb
import textract
from django.core.files.storage import default_storage
from django.core.files.base import ContentFile
from django.conf import settings
import magic
def make_sure_path_exists(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def get_text(InMemoryUploadedFile):
'''An InMemoryUploadedFile is passed to this function however Textract requires a path to a file in order to interact with it. To handle this, we will temporarily store the file, complete processing and then delete that temporary file.'''
path = default_storage.save('tmp/' + InMemoryUploadedFile.name, ContentFile(InMemoryUploadedFile.read()))
text = None
try:
text = textract.process(os.path.join(settings.MEDIA_ROOT, path))
except:
print ("textract failed to read the file")
# Still delete the temporary file if it fails to parse
path = default_storage.delete('tmp/' + InMemoryUploadedFile.name)
return text
'''
Django docs suggest trusting the user uploads a valid file but verify this to make sure that the file is safe to store.
This is done using the python-magic library.
'''
def is_file_valid(InMemoryUploadedFile):
trusted = InMemoryUploadedFile.content_type
m = magic.Magic(mime=True)
verified = m.from_buffer(InMemoryUploadedFile.read())
if trusted == verified:
return True
else:
return False
'''
This matches the Mime type to one of the options in the database model for Datapoint.
Will extract the first section from the Mime Type (for example 'image' from 'image/png') and use this to define the file's type.
Using this as a reference: http://en.wikipedia.org/wiki/Internet_media_type#List_of_common_media_types
'''
def get_filetype(InMemoryUploadedFile):
type = InMemoryUploadedFile.content_type
type = type.split("/")
if (type[1] == "pdf"):
return type[1] # Separate if statement due to "application/pdf". Diffferent structure to rest of Mime Types we care about.
else:
return type[0] # Returns things like 'video', 'audio', 'image', 'text'
'''
Made under the assumption that the thing after the last "." will be the filetype.
This works even if it tries something silly like "file.jpg.pdf". It will still extract the "pdf" bit.
'''
def get_file_extension(InMemoryUploadedFile):
filename = InMemoryUploadedFile.name
filename = filename.split(".")
return filename[-1] # Return the last element, ie the filetype.
| {
"repo_name": "allyjweir/lackawanna",
"path": "lackawanna/datapoint/file_import.py",
"copies": "1",
"size": "2600",
"license": "bsd-3-clause",
"hash": -1077739569369585800,
"line_mean": 32.3333333333,
"line_max": 242,
"alpha_frac": 0.7169230769,
"autogenerated": false,
"ratio": 4,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.52169230769,
"avg_score": null,
"num_lines": null
} |
"""
This file can be used to denoise single channel images using approximate
MAX-SUM inference on a Markov Random Field.
"""
import numpy as np
from scipy import sparse
from scipy.misc.pilutil import imread, imsave
import pylab
import models
import cliques
import inference
import potentials
from general import subv2ind
import time
import psyco
psyco.full()
def prompt(prompt):
"""
Displays a message in the console, prompting the user for input, then
captures a keyboard string input from the user. The user terminates
input by pressing the 'Enter' key. The function then returns the
captured string.
Parameters
----------
prompt: String
A message to prompt the user what to enter. (String)
"""
return raw_input(prompt).strip()
def set_adj_mat_entry(adj_mat, ind):
"""
"""
if adj_mat[ind[1], ind[0]] != 1:
adj_mat[ind[0], ind[1]] = 1
return adj_mat
def create_lattice_sparse_adj_matrix(rows, cols, layers=1):
"""
Creates an adjacency matrix for a lattice shaped graph with an
arbitrary number of rows and columns.
NOTE:
The subv2ind routine used in the function works in column-major order,
and this function assumes row-major order, therefore row and column
indices have been switched round when using the subv2ind function.
"""
"""Create adjacency matrix"""
adj_mat = sparse.lil_matrix((rows*cols*layers, rows*cols*layers), dtype=int)
"""Assign the 2 edges for top-left node"""
adj_mat[0, 1] = 1
adj_mat[0, cols] = 1
"""Assign the 2 edges for top-right node"""
ind = subv2ind(np.array([cols, rows]), np.array([cols-1, 0]))[0, 0]
temp_ind = subv2ind(np.array([cols, rows]), np.array([cols-2, 0]))[0, 0]
adj_mat = set_adj_mat_entry(adj_mat, [ind, temp_ind])
temp_ind = subv2ind(np.array([cols, rows]), np.array([cols-1, 1]))[0, 0]
adj_mat = set_adj_mat_entry(adj_mat, [ind, temp_ind])
"""Assign the 2 edges for bottom-left node"""
ind = subv2ind(np.array([cols, rows]), np.array([0, rows-1]))[0, 0]
temp_ind = subv2ind(np.array([cols, rows]), np.array([0, rows-2]))[0, 0]
adj_mat = set_adj_mat_entry(adj_mat, [ind, temp_ind])
temp_ind = subv2ind(np.array([cols, rows]), np.array([1, rows-1]))[0, 0]
adj_mat = set_adj_mat_entry(adj_mat, [ind, temp_ind])
"""Assign the 2 edges for bottom_right node"""
ind = subv2ind(np.array([cols, rows]), np.array([cols-1, rows-1]))[0, 0]
temp_ind = subv2ind(np.array([cols, rows]), np.array([cols-2, rows-1]))[0, 0]
adj_mat = set_adj_mat_entry(adj_mat, [ind, temp_ind])
temp_ind = subv2ind(np.array([cols, rows]), np.array([cols-1, rows-2]))[0, 0]
adj_mat = set_adj_mat_entry(adj_mat, [ind, temp_ind])
"""Assign the 3 edges for each left border nodes"""
for i in range(1, rows-1):
ind = subv2ind(np.array([cols, rows]), np.array([0, i]))[0, 0]
temp_ind = subv2ind(np.array([cols, rows]), np.array([0, i-1]))[0, 0]
adj_mat = set_adj_mat_entry(adj_mat, [ind, temp_ind])
temp_ind = subv2ind(np.array([cols, rows]), np.array([0, i+1]))[0, 0]
adj_mat = set_adj_mat_entry(adj_mat, [ind, temp_ind])
adj_mat[ind, ind+1] = 1
"""Assign the 3 edges for each right border nodes"""
for i in range(1, rows-1):
ind = subv2ind(np.array([cols, rows]), np.array([cols-1, i]))[0, 0]
temp_ind = subv2ind(np.array([cols, rows]), np.array([cols-1, \
i-1]))[0, 0]
adj_mat = set_adj_mat_entry(adj_mat, [ind, temp_ind])
temp_ind = subv2ind(np.array([cols, rows]), np.array([cols-1, \
i+1]))[0, 0]
adj_mat = set_adj_mat_entry(adj_mat, [ind, temp_ind])
adj_mat = set_adj_mat_entry(adj_mat, [ind, ind-1])
"""Assign the 3 edges for each top border nodes"""
for i in range(1, cols-1):
ind = subv2ind(np.array([cols, rows]), np.array([i, 0]))[0, 0]
adj_mat = set_adj_mat_entry(adj_mat, [ind, ind-1])
adj_mat = set_adj_mat_entry(adj_mat, [ind, ind+1])
temp_ind = subv2ind(np.array([cols, rows]), np.array([i, 1]))[0, 0]
adj_mat = set_adj_mat_entry(adj_mat, [ind, temp_ind])
"""Assign the 3 edges for each bottom border nodes"""
for i in range(1, cols-1):
ind = subv2ind(np.array([cols, rows]), np.array([i, rows-1]))[0, 0]
adj_mat = set_adj_mat_entry(adj_mat, [ind, ind-1])
adj_mat = set_adj_mat_entry(adj_mat, [ind, ind+1])
temp_ind = subv2ind(np.array([cols, rows]), np.array([i, rows-2]))[0, 0]
adj_mat = set_adj_mat_entry(adj_mat, [ind, temp_ind])
"""Assign edges for inner, fully-connected nodes"""
for i in range(1, rows-1):
for j in range(1, cols-1):
ind = subv2ind(np.array([cols, rows]), np.array([j, i]))[0, 0]
temp_ind = subv2ind(np.array([cols, rows]), np.array([j, i-1]))[0, 0]
adj_mat = set_adj_mat_entry(adj_mat, [ind, temp_ind])
adj_mat = set_adj_mat_entry(adj_mat, [ind, ind-1])
adj_mat = set_adj_mat_entry(adj_mat, [ind, ind+1])
temp_ind = subv2ind(np.array([cols, rows]), np.array([j, i+1]))[0, 0]
adj_mat = set_adj_mat_entry(adj_mat, [ind, temp_ind])
"""Assign the edges to the observed nodes"""
for i in range(0, adj_mat.shape[0]/2):
adj_mat[i, i + adj_mat.shape[0]/2] = 1
return adj_mat
def create_img_prob_table(depth, sigma=0.5):
"""
Creates a lookup table of probabilities for a 2 node clique in the
ising model used for graphical model image denoising.
Parameters
----------
depth: Int
The number of different values a pixel can take.
sigma: Float
Co-efficient used in creating the table.
"""
"""Initialize the table to the right size"""
prob_tbl = np.zeros((depth, depth))
"""
Fill the table with values, where the probability of the two nodes in
the clique having the same value is the highest, and the probability
of the nodes having completley opposite values is the lowest.
"""
for i in range(0, depth):
for j in range(0, depth):
prob_tbl[i, j] = (-1*(i - j)**2)/2*sigma
return prob_tbl
def denoise_image(img, depth=255, max_iter=10):
"""
Denoises a single channel image.
Parameters
----------
img: Numpy array
The image to denoise.
depth: Int
The number of different values a pixel can take.
max_iter: Int
The maximum number of times the inference algorithm can iterate.
"""
"""
Create adjacency matrix representation of the hidden lattice graph that
represents the denoised image.
"""
print "Creating initial adjacency matrix..."
adj_mat = create_lattice_sparse_adj_matrix(img.shape[0], img.shape[1], 2)
print "Determine the cliques from the adjacency matrix..."
"""Get the cliques as a list"""
clq_doms = []
i = 0
for cols in adj_mat.rows:
if len(cols) > 0:
for col in cols:
new_clique = [i, col]
new_clique.sort()
clq_doms.append(new_clique)
i = i + 1
"""Create list of node sizes"""
print "Creating list of node sizes..."
ns = depth * np.ones((1, img.shape[0]*img.shape[1]*2))
"""Create list of cliques and assign potentials to them"""
print "Creating the list of cliques and their potentials..."
"""Image model"""
T_img = create_img_prob_table(depth, 1)
"""Noise model"""
T_noise = create_img_prob_table(depth, 5)
clqs = []
outer_layer = range(img.shape[0]*img.shape[1], img.shape[0]*img.shape[1]*2)
for i in range(0, len(clq_doms)):
if clq_doms[i][1] in outer_layer:
clqs.append(cliques.discrete_clique(i, clq_doms[i], \
np.array([depth, depth]),\
T_img))
else:
clqs.append(cliques.discrete_clique(i, clq_doms[i], \
np.array([depth, depth]), \
T_noise))
"""Create the MRF object and set the lattice flag to TRUE"""
print "Creating MRF..."
net = models.mrf(adj_mat, ns, clqs, lattice=True)
"""Initialize the inference engine to be approximate"""
net.init_inference_engine(exact=False, max_iter=max_iter)
"""Create the evidence, with the noisy nodes being observed"""
evidence = img.flatten().tolist()
N = len(evidence)
for i in range(0, N):
evidence.insert(0, [])
"""Run loopy-belief propagation"""
print "Running loopy belief propagation..."
mlc = net.max_sum(evidence)
"""
Extract denoised image from most likely configuaration of the hidden
nodes.
"""
print "Extracting denoised image..."
new_img = np.array(mlc[0:img.shape[0]*img.shape[1]])
new_img = np.array(new_img.reshape(img.shape[0], img.shape[1]))
"""Delete objects"""
del img
del adj_mat
del net
return new_img
if __name__ == '__main__':
"""Define the noisy image location"""
in_fname = 'noisy.png'
"""Define the output path to save intermediate denoised images to"""
out_fname = '.\\output\\'
"""Load the image"""
img = np.array(imread(in_fname, 1)/255, dtype=int)
"""Determine the images depth"""
depth = np.max(img)+1
"""Define the sliding window size"""
seg_size = 100
"""
If the image is smaller that the sliding window, then just denoise the
whole image
"""
if img.shape[0]<seg_size:
new_img = denoise_image(img)
imsave(out_fname + str(1) + '.png', new_img)
else:
"""Denoise the image in overlapping segments"""
count = 0
cut = int(float(seg_size)/2)
for i in range(cut, img.shape[0], cut):
for j in range(cut, img.shape[1], cut):
"""Extract the window to denoise"""
sub_img = img[i-cut:i+cut, j-cut:j+cut]
"""Denoise the window"""
new_img = denoise_image(sub_img, depth, 6)
"""
Place the denoised window back into the noisy image, except
for the leading edge pixels of the window.
"""
img[(i-cut):(i+cut-1), (j-cut):(j+cut-1)] = \
new_img[0:seg_size-1, 0:seg_size-1]
"""Compensate for edge cases"""
if (i + cut) == img.shape[0]:
img[i+cut-1, j-cut:j+cut] = \
new_img[seg_size-1, 0:seg_size]
if (j + cut) == img.shape[1]:
img[i-cut:i+cut, j+cut-1] = \
new_img[0:seg_size, seg_size-1]
print "Saving partially denoised image..."
imsave(out_fname + str(count+1) + '.png', img)
count = count + 1
prompt("Press enter to exit...")
| {
"repo_name": "bhrzslm/uncertainty-reasoning",
"path": "my_engine/others/GrMPy/lib/GrMPy/Examples/Non-trivial/batch_mrf_image_denoising.py",
"copies": "1",
"size": "11529",
"license": "mit",
"hash": -4410897609154473000,
"line_mean": 35.5537459283,
"line_max": 81,
"alpha_frac": 0.5510451904,
"autogenerated": false,
"ratio": 3.345618107951248,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4396663298351248,
"avg_score": null,
"num_lines": null
} |
"""
This is a tutorial on how to create a Bayesian network, and perform
approximate MAX-SUM inference on it.
"""
"""Import the required numerical modules"""
import numpy as np
"""Import the GrMPy modules"""
import models
import inference
import cpds
if __name__ == '__main__':
"""
This example is based on the lawn sprinkler example, and the Bayesian
network has the following structure, with all edges directed downwards:
Cloudy - 0
/ \
/ \
/ \
Sprinkler - 1 Rainy - 2
\ /
\ /
\ /
Wet Grass -3
"""
"""Assign a unique numerical identifier to each node"""
C = 0
S = 1
R = 2
W = 3
"""Assign the number of nodes in the graph"""
nodes = 4
"""
The graph structure is represented as a adjacency matrix, dag.
If dag[i, j] = 1, then there exists a directed edge from node
i and node j.
"""
dag = np.zeros((nodes, nodes))
dag[C, [R, S]] = 1
dag[R, W] = 1
dag[S, W] = 1
"""
Define the size of each node, which is the number of different values a
node could observed at. For example, if a node is either True of False,
it has only 2 possible values it could be, therefore its size is 2. All
the nodes in this graph has a size 2.
"""
ns = 2 * np.ones(nodes)
"""
We now need to assign a conditional probability distribution to each
node.
"""
node_cpds = [[], [], [], []]
"""Define the CPD for node 0"""
CPT = np.array([0.5, 0.5])
node_cpds[C] = cpds.TabularCPD(CPT)
"""Define the CPD for node 1"""
CPT = np.array([[0.8, 0.2], [0.2, 0.8]])
node_cpds[R] = cpds.TabularCPD(CPT)
"""Define the CPD for node 2"""
CPT = np.array([[0.5, 0.5], [0.9, 0.1]])
node_cpds[S] = cpds.TabularCPD(CPT)
"""Define the CPD for node 3"""
CPT = np.array([[[1, 0], [0.1, 0.9]], [[0.1, 0.9], [0.01, 0.99]]])
node_cpds[W] = cpds.TabularCPD(CPT)
"""Create the Bayesian network"""
net = models.bnet(dag, ns, node_cpds = node_cpds)
"""
Intialize the BNET's inference engine to use APPROXIMATE inference
by setting exact=false.
"""
net.init_inference_engine(exact=False)
"""Define observed evidence ([] means that node is unobserved)"""
evidence = [None, 0, None, None]
"""Execute the sum-product algorithm"""
net.sum_product(evidence)
"""
Print out the marginal probability of each node.
"""
marginal = net.marginal_nodes([C])
print 'Probability it is cloudy: ', marginal.T[1]*100, '%'
marginal = net.marginal_nodes([S])
print 'Probability the sprinkler is on: ', 0, '%' #Observed node
marginal = net.marginal_nodes([R])
print 'Probability it is raining: ',marginal.T[1]*100, '%'
marginal = net.marginal_nodes([W])
print 'Probability the grass is wet: ', marginal.T[1]*100, '%'
| {
"repo_name": "bhrzslm/uncertainty-reasoning",
"path": "my_engine/others/GrMPy/lib/GrMPy/Examples/Discrete/BNET/Inference/Approximate/Tut_BNET_sumproduct.py",
"copies": "1",
"size": "3253",
"license": "mit",
"hash": 5684060546605169000,
"line_mean": 29.5825242718,
"line_max": 75,
"alpha_frac": 0.5351982785,
"autogenerated": false,
"ratio": 3.479144385026738,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9490168069076704,
"avg_score": 0.004834918890006794,
"num_lines": 103
} |
"""
This is a tutorial on how to create a Bayesian network, and perform
exact MAX-SUM inference on it.
"""
"""Import the required numerical modules"""
import numpy as np
from sprinkler_data import sprinkler_evidence, sprinkler_mpe
"""Import the GrMPy modules"""
import models
import inference
import cpds
def test_bnet_maxsum():
"""
Testing: MAX-SUM on BNET
This example is based on the lawn sprinkler example, and the Bayesian
network has the following structure, with all edges directed downwards:
Cloudy - 0
/ \
/ \
/ \
Sprinkler - 1 Rainy - 2
\ /
\ /
\ /
Wet Grass -3
"""
"""Assign a unique numerical identifier to each node"""
C = 0
S = 1
R = 2
W = 3
"""Assign the number of nodes in the graph"""
nodes = 4
"""
The graph structure is represented as a adjacency matrix, dag.
If dag[i, j] = 1, then there exists a directed edge from node
i and node j.
"""
dag = np.zeros((nodes, nodes))
dag[C, [R, S]] = 1
dag[R, W] = 1
dag[S, W] = 1
"""
Define the size of each node, which is the number of different values a
node could observed at. For example, if a node is either True of False,
it has only 2 possible values it could be, therefore its size is 2. All
the nodes in this graph has a size 2.
"""
node_sizes = 2 * np.ones(nodes)
"""
We now need to assign a conditional probability distribution to each
node.
"""
node_cpds = [[], [], [], []]
"""Define the CPD for node 0"""
CPT = np.array([0.5, 0.5])
node_cpds[C] = cpds.TabularCPD(CPT)
"""Define the CPD for node 1"""
CPT = np.array([[0.8, 0.2], [0.2, 0.8]])
node_cpds[R] = cpds.TabularCPD(CPT)
"""Define the CPD for node 2"""
CPT = np.array([[0.5, 0.5], [0.9, 0.1]])
node_cpds[S] = cpds.TabularCPD(CPT)
"""Define the CPD for node 3"""
CPT = np.array([[[1, 0], [0.1, 0.9]], [[0.1, 0.9], [0.01, 0.99]]])
node_cpds[W] = cpds.TabularCPD(CPT)
"""Create the Bayesian network"""
net = models.bnet(dag, node_sizes, node_cpds=node_cpds)
"""
Intialize the BNET's inference engine to use EXACT inference
by setting exact=True.
"""
net.init_inference_engine(exact=True)
"""Create and enter evidence ([] means that node is unobserved)"""
all_ev = sprinkler_evidence();
all_mpe = sprinkler_mpe();
count = 0;
errors = 0;
for evidence in all_ev:
"""Execute the max-sum algorithm"""
mlc = net.max_sum(evidence)
## print mlc
mpe = all_mpe[count]
errors = errors + np.sum(np.array(mpe) - np.array(mlc))
count = count + 1
assert errors == 0
| {
"repo_name": "bhrzslm/uncertainty-reasoning",
"path": "my_engine/others/GrMPy/lib/GrMPy/Tests/test_BNET_maxsum.py",
"copies": "1",
"size": "3128",
"license": "mit",
"hash": -5845971085484897000,
"line_mean": 27.7904761905,
"line_max": 75,
"alpha_frac": 0.5246163683,
"autogenerated": false,
"ratio": 3.467849223946785,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4492465592246785,
"avg_score": null,
"num_lines": null
} |
"""
This is a tutorial on how to create a Bayesian network, and perform
exact MAX-SUM inference on it.
"""
"""Import the required numerical modules"""
import numpy as np
"""Import the GrMPy modules"""
import models
import inference
import cpds
if __name__ == '__main__':
"""
This example is based on the lawn sprinkler example, and the Bayesian
network has the following structure, with all edges directed downwards:
Cloudy - 0
/ \
/ \
/ \
Sprinkler - 1 Rainy - 2
\ /
\ /
\ /
Wet Grass -3
"""
"""Assign a unique numerical identifier to each node"""
C = 0
S = 1
R = 2
W = 3
"""Assign the number of nodes in the graph"""
nodes = 4
"""
The graph structure is represented as a adjacency matrix, dag.
If dag[i, j] = 1, then there exists a directed edge from node
i and node j.
"""
dag = np.zeros((nodes, nodes))
dag[C, [R, S]] = 1
dag[R, W] = 1
dag[S, W] = 1
"""
Define the size of each node, which is the number of different values a
node could observed at. For example, if a node is either True of False,
it has only 2 possible values it could be, therefore its size is 2. All
the nodes in this graph has a size 2.
"""
node_sizes = 2 * np.ones(nodes)
"""
We now need to assign a conditional probability distribution to each
node.
"""
node_cpds = [[], [], [], []]
"""Define the CPD for node 0"""
CPT = np.array([0.5, 0.5])
node_cpds[C] = cpds.TabularCPD(CPT)
"""Define the CPD for node 1"""
CPT = np.array([[0.8, 0.2], [0.2, 0.8]])
node_cpds[R] = cpds.TabularCPD(CPT)
"""Define the CPD for node 2"""
CPT = np.array([[0.5, 0.5], [0.9, 0.1]])
node_cpds[S] = cpds.TabularCPD(CPT)
"""Define the CPD for node 3"""
CPT = np.array([[[1, 0], [0.1, 0.9]], [[0.1, 0.9], [0.01, 0.99]]])
node_cpds[W] = cpds.TabularCPD(CPT)
"""Create the Bayesian network"""
net = models.bnet(dag, node_sizes, node_cpds=node_cpds)
"""
Intialize the BNET's inference engine to use EXACT inference
by setting exact=True.
"""
net.init_inference_engine(exact=True)
"""Define observed evidence ([] means that node is unobserved)"""
evidence = [None, 0, None, None]
"""Execute the sum-product algorithm"""
net.enter_evidence(evidence)
net.sum_product()
"""
Print out the marginal probability of each node.
"""
marginal = net.marginal_nodes([C])
print 'Probability it is cloudy: ', marginal.T[1]*100, '%'
marginal = net.marginal_nodes([S])
print 'Probability the sprinkler is on: ', 0, '%' #Observed node
marginal = net.marginal_nodes([R])
print 'Probability it is raining: ',marginal.T[1]*100, '%'
marginal = net.marginal_nodes([W])
print 'Probability the grass is wet: ', marginal.T[1]*100, '%'
| {
"repo_name": "bhrzslm/uncertainty-reasoning",
"path": "my_engine/others/GrMPy/lib/GrMPy/Examples/Discrete/BNET/Inference/Exact/Tut_BNET_sumproduct.py",
"copies": "1",
"size": "3279",
"license": "mit",
"hash": -3842371830754611700,
"line_mean": 29.5288461538,
"line_max": 75,
"alpha_frac": 0.5358340958,
"autogenerated": false,
"ratio": 3.4735169491525424,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45093510449525426,
"avg_score": null,
"num_lines": null
} |
"""
This is a tutorial on how to create a Bayesian network, learn its parameters
from fully observed data via MLE, and perform exact MAX-SUM inference on it.
"""
"""Import the required numerical modules"""
import numpy as np
"""Import the GrMPy modules"""
import models
import inference
import cpds
if __name__ == '__main__':
"""
This example is based on the lawn sprinkler example, and the Bayesian
network has the following structure, with all edges directed downwards:
Cloudy - 0
/ \
/ \
/ \
Sprinkler - 1 Rainy - 2
\ /
\ /
\ /
Wet Grass -3
"""
"""Assign a unique numerical identifier to each node"""
C = 0
S = 1
R = 2
W = 3
"""Assign the number of nodes in the graph"""
nodes = 4
"""
The graph structure is represented as a adjacency matrix, dag.
If dag[i, j] = 1, then there exists a directed edge from node
i and node j.
"""
dag = np.zeros((nodes, nodes))
dag[C, [R, S]] = 1
dag[R, W] = 1
dag[S, W] = 1
"""
Define the size of each node, which is the number of different values a
node could observed at. For example, if a node is either True of False,
it has only 2 possible values it could be, therefore its size is 2. All
the nodes in this graph has a size 2.
"""
ns = 2 * np.ones(nodes)
"""Create the BNET"""
net = models.bnet(dag, ns)
"""Define the samples to train the models parameters with"""
samples = \
[[0, 1, 0, 0],
[0, 1, 0, 1],
[1, 0, 1, 1],
[1, 0, 1, 0],
[0, 1, 0, 1],
[1, 0, 1, 1],
[0, 1, 1, 1],
[0, 1, 0, 1],
[1, 0, 1, 1],
[0, 1, 0, 1],
[0, 1, 0, 1],
[1, 0, 1, 1],
[0, 1, 0, 1],
[1, 0, 1, 1],
[0, 1, 0, 1],
[1, 1, 1, 1],
[0, 0, 0, 0],
[0, 1, 0, 1],
[1, 0, 1, 1],
[1, 1, 0, 1],
[1, 0, 1, 1],
[1, 1, 1, 1],
[1, 0, 1, 1],
[1, 0, 0, 0],
[0, 1, 0, 1],
[1, 0, 1, 1],
[1, 0, 1, 1],
[1, 0, 0, 0],
[0, 1, 0, 1],
[0, 1, 0, 1],
[1, 0, 1, 0],
[1, 0, 1, 0],
[0, 0, 0, 0],
[1, 1, 1, 1],
[1, 0, 1, 1],
[1, 0, 1, 1],
[0, 0, 0, 0],
[1, 0, 0, 0],
[0, 0, 0, 0],
[1, 0, 1, 1],
[0, 0, 0, 0],
[0, 1, 0, 1],
[0, 0, 0, 0],
[0, 1, 0, 1],
[0, 1, 0, 1],
[0, 0, 1, 1],
[1, 0, 0, 0],
[1, 0, 1, 1],
[0, 0, 0, 0],
[1, 1, 1, 1]]
"""Train the models parameters using the defined samples"""
net.learn_params_mle(samples[:])
"""Intialize the BNET's inference engine to use EXACT inference"""
net.init_inference_engine(exact=True)
"""Create and enter evidence ([] means that node is unobserved)"""
evidence = [None, 0, None, None]
net.enter_evidence(evidence)
mlc = net.max_sum()
"""
mlc contains the most likely configuaration for all the nodes in the BNET
based in the input evidence.
"""
print 'Cloudy node: ', bool(mlc[C])
print 'Sprinkler node: ', bool(mlc[S])
print 'Rainy node: ', bool(mlc[R])
print 'Wet grass node: ', bool(mlc[W])
| {
"repo_name": "bhrzslm/uncertainty-reasoning",
"path": "my_engine/others/GrMPy/lib/GrMPy/Examples/Discrete/BNET/Learning/MLE/Tut_BNET_MLE.py",
"copies": "1",
"size": "3886",
"license": "mit",
"hash": -2616775477888170000,
"line_mean": 27.8923076923,
"line_max": 77,
"alpha_frac": 0.4065877509,
"autogenerated": false,
"ratio": 3.367417677642981,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42740054285429807,
"avg_score": null,
"num_lines": null
} |
"""
This is a tutorial on how to create a Bayesian network, learn its parameters
from partially observed data via EM, and perform exact MAX-SUM inference on it.
"""
"""Import the required numerical modules"""
import numpy as np
"""Import the GrMPy modules"""
import models
import inference
import cpds
if __name__ == '__main__':
"""
This example is based on the lawn sprinkler example, and the Bayesian
network has the following structure, with all edges directed downwards:
Cloudy - 0
/ \
/ \
/ \
Sprinkler - 1 Rainy - 2
\ /
\ /
\ /
Wet Grass -3
"""
"""Assign a unique numerical identifier to each node"""
C = 0
S = 1
R = 2
W = 3
"""Assign the number of nodes in the graph"""
nodes = 4
"""
The graph structure is represented as a adjacency matrix, dag.
If dag[i, j] = 1, then there exists a directed edge from node
i and node j.
"""
dag = np.zeros((nodes, nodes))
dag[C, [R, S]] = 1
dag[R, W] = 1
dag[S, W] = 1
"""
Define the size of each node, which is the number of different values a
node could observed at. For example, if a node is either True of False,
it has only 2 possible values it could be, therefore its size is 2. All
the nodes in this graph has a size 2.
"""
ns = 2 * np.ones(nodes)
"""Create the BNET"""
net = models.bnet(dag, ns)
"""Define the samples to train the models parameters with"""
samples = \
[[0, 1, 0, 0],
[0, 1, 0, 1],
[1, 0, 1, 1],
[1, 0, 1, 0],
[0, 1, 0, 1],
[1, 0, 1, 1],
[0, 1, 1, 1],
[0, 1, 0, 1],
[1, 0, 1, 1],
[0, 1, 0, 1],
[0, 1, 0, 1],
[1, 0, 1, 1],
[0, 1, 0, 1],
[1, 0, 1, 1],
[0, 1, 0, 1],
[1, 1, 1, 1],
[0, 0, 0, 0],
[0, 1, 0, 1],
[1, 0, 1, 1],
[1, 1, 0, 1],
[1, 0, 1, 1],
[1, 1, 1, 1],
[1, 0, 1, 1],
[1, 0, 0, 0],
[0, 1, 0, 1],
[1, 0, 1, 1],
[1, 0, 1, 1],
[1, 0, 0, 0],
[0, 1, 0, 1],
[0, 1, 0, 1],
[1, 0, 1, 0],
[1, 0, 1, 0],
[0, 0, 0, 0],
[1, 1, 1, 1],
[1, 0, 1, 1],
[1, 0, 1, 1],
[0, 0, 0, 0],
[1, 0, 0, 0],
[0, 0, 0, 0],
[1, 0, 1, 1],
[0, 0, 0, 0],
[0, 1, 0, 1],
[0, 0, 0, 0],
[0, 1, 0, 1],
[0, 1, 0, 1],
[0, 0, 1, 1],
[1, 0, 0, 0],
[1, 0, 1, 1],
[0, 0, 0, 0],
[1, 1, 1, 1]]
"""Train the models parameters using the defined samples"""
inference_engine = inference.JTreeInferenceEngine(net)
net.inference_engine = inference_engine
net.learn_params_EM(samples[:])
"""Intialize the BNET's inference engine to use EXACT inference"""
net.init_inference_engine(exact=True)
"""Create and enter evidence ([] means that node is unobserved)"""
evidence = [None, 0, None, None]
net.enter_evidence(evidence)
mlc = net.max_sum()
"""
mlc contains the most likely configuaration for all the nodes in the BNET
based in the input evidence.
"""
print 'Cloudy node: ', bool(mlc[C])
print 'Sprinkler node: ', bool(mlc[S])
print 'Rainy node: ', bool(mlc[R])
print 'Wet grass node: ', bool(mlc[W])
| {
"repo_name": "bhrzslm/uncertainty-reasoning",
"path": "my_engine/others/GrMPy/lib/GrMPy/Examples/Discrete/BNET/Learning/EM/Tut_BNET_EM.py",
"copies": "1",
"size": "3997",
"license": "mit",
"hash": -2153821050079191300,
"line_mean": 28.2803030303,
"line_max": 79,
"alpha_frac": 0.4158118589,
"autogenerated": false,
"ratio": 3.3959218351741716,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9287599401299528,
"avg_score": 0.00482685855492873,
"num_lines": 132
} |
"""
This is a tutorial on how to create a Markov random field, and perform
approximate MAX-SUM inference on it.
"""
"""Import the required numerical modules"""
import numpy as np
"""Import the GrMPy modules"""
import models
import inference
import cliques
if __name__ == '__main__':
"""
This example is based on the lawn sprinkler example, and the Markov
random field has the following structure.
Cloudy - 0
/ \
/ \
/ \
Sprinkler - 1 Rainy - 2
\ /
\ /
\ /
Wet Grass -3
"""
"""Assign a unique numerical identifier to each node"""
C = 0
S = 1
R = 2
W = 3
"""Assign the number of nodes in the graph"""
nodes = 4
"""
The graph structure is represented as a adjacency matrix, dag.
If adj_mat[i, j] = 1, then there exists a undirected edge from node
i and node j.
"""
adj_mat = np.matrix(np.zeros((nodes, nodes)))
adj_mat[C, [R, S]] = 1
adj_mat[R, W] = 1
adj_mat[S, W] = 1
"""
Define the size of each node, which is the number of different values a
node could observed at. For example, if a node is either True of False,
it has only 2 possible values it could be, therefore its size is 2. All
the nodes in this graph has a size 2.
"""
ns = 2 * np.ones(nodes)
"""
Define the clique domains. The domain of a clique, is the indices of the
nodes in the clique. A clique is a fully connected set of nodes.
Therefore, for a set of node to be a clique, every node in the set must
be connected to every other node in the set.
"""
clq_doms = [[0, 1, 2], [1, 2, 3]]
"""Define potentials for the cliques"""
clqs = []
T = np.zeros((2, 2, 2))
T[:, :, 0] = np.array([[0.2, 0.2], [0.09, 0.01]])
T[:, :, 1] = np.array([[0.05, 0.05], [0.36, 0.04]])
clqs.append(cliques.clique(0, clq_doms[0], np.array([2, 2, 2]), T))
T[:, :, 0] = np.array([[1, 0.1], [0.1, 0.01]])
T[:, :, 1] = np.array([[0, 0.9], [0.9, 0.99]])
clqs.append(cliques.clique(1, clq_doms[1], np.array([2, 2, 2]), T))
"""Create the MRF"""
net = models.mrf(adj_mat, ns, clqs, lattice=False)
"""
Intialize the MRF's inference engine to use APPROXIMATE inference, by
setting exact=False.
"""
net.init_inference_engine(exact=False)
"""Create and enter evidence ([] means that node is unobserved)"""
evidence = [None, 0, None, None]
"""Execute the max-sum algorithm"""
mlc = net.max_sum(evidence)
"""
mlc contains the most likely configuaration for all the nodes in the MRF
based in the input evidence.
"""
print 'Cloudy node: ', bool(mlc[C])
print 'Sprinkler node: ', bool(mlc[S])
print 'Rainy node: ', bool(mlc[R])
print 'Wet grass node: ', bool(mlc[W])
| {
"repo_name": "bhrzslm/uncertainty-reasoning",
"path": "my_engine/others/GrMPy/lib/GrMPy/Examples/Discrete/MRF/Inference/Approximate/Tut_MRF_maxsum.py",
"copies": "1",
"size": "3178",
"license": "mit",
"hash": 8291124756713509000,
"line_mean": 31.1041666667,
"line_max": 76,
"alpha_frac": 0.5298930145,
"autogenerated": false,
"ratio": 3.4098712446351933,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9417940376394369,
"avg_score": 0.004364776548164706,
"num_lines": 96
} |
"""
This is a tutorial on how to create a Markov random field, and perform
approximate SUM-PRODUCT inference on it.
"""
"""Import the required numerical modules"""
import numpy as np
"""Import the GrMPy modules"""
import models
import inference
import cliques
if __name__ == '__main__':
"""
This example is based on the lawn sprinkler example, and the Markov
random field has the following structure.
Cloudy - 0
/ \
/ \
/ \
Sprinkler - 1 Rainy - 2
\ /
\ /
\ /
Wet Grass -3
"""
"""Assign a unique numerical identifier to each node"""
C = 0
S = 1
R = 2
W = 3
"""Assign the number of nodes in the graph"""
nodes = 4
"""
The graph structure is represented as a adjacency matrix, dag.
If adj_mat[i, j] = 1, then there exists a undirected edge from node
i and node j.
"""
adj_mat = np.matrix(np.zeros((nodes, nodes)))
adj_mat[C, [R, S]] = 1
adj_mat[R, W] = 1
adj_mat[S, W] = 1
"""
Define the size of each node, which is the number of different values a
node could observed at. For example, if a node is either True of False,
it has only 2 possible values it could be, therefore its size is 2. All
the nodes in this graph has a size 2.
"""
ns = 2 * np.ones(nodes)
"""
Define the clique domains. The domain of a clique, is the indices of the
nodes in the clique. A clique is a fully connected set of nodes.
Therefore, for a set of node to be a clique, every node in the set must
be connected to every other node in the set.
"""
clq_doms = [[0, 1, 2], [1, 2, 3]]
"""Define potentials for the cliques"""
clqs = []
T = np.zeros((2, 2, 2))
T[:, :, 0] = np.array([[0.2, 0.2], [0.09, 0.01]])
T[:, :, 1] = np.array([[0.05, 0.05], [0.36, 0.04]])
clqs.append(cliques.clique(0, clq_doms[0], np.array([2, 2, 2]), T))
T[:, :, 0] = np.array([[1, 0.1], [0.1, 0.01]])
T[:, :, 1] = np.array([[0, 0.9], [0.9, 0.99]])
clqs.append(cliques.clique(1, clq_doms[1], np.array([2, 2, 2]), T))
"""Create the MRF"""
net = models.mrf(adj_mat, ns, clqs, lattice=False)
"""
Intialize the MRF's inference engine to use Approximate inference, by
setting exact=False.
"""
net.init_inference_engine(exact=False)
"""Create and enter evidence ([] means that node is unobserved)"""
evidence = [None, 0, None, None]
"""Execute max-sum algorithm"""
net.sum_product(evidence)
"""
Print out the marginal probability of each node.
"""
marginal = net.marginal_nodes([C])
print 'Probability it is cloudy: ', marginal.T[1]*100, '%'
marginal = net.marginal_nodes([S])
print 'Probability the sprinkler is on: ', 0, '%' #Observed node
marginal = net.marginal_nodes([R])
print 'Probability it is raining: ',marginal.T[1]*100, '%'
marginal = net.marginal_nodes([W])
print 'Probability the grass is wet: ', marginal.T[1]*100, '%'
| {
"repo_name": "bhrzslm/uncertainty-reasoning",
"path": "my_engine/others/GrMPy/lib/GrMPy/Examples/Discrete/MRF/Inference/Approximate/Tut_MRF_sumproduct.py",
"copies": "1",
"size": "3377",
"license": "mit",
"hash": 2474598772053122000,
"line_mean": 32.1111111111,
"line_max": 76,
"alpha_frac": 0.5368670418,
"autogenerated": false,
"ratio": 3.4319105691056913,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9446191250684277,
"avg_score": 0.004517272044282691,
"num_lines": 99
} |
"""
This is a tutorial on how to create a Markov random field, and perform
exact MAX-SUM inference on it.
"""
"""Import the required numerical modules"""
import numpy as np
from sprinkler_data import sprinkler_evidence, sprinkler_mpe
"""Import the GrMPy modules"""
import models
import inference
import cliques
def test_mrf_maxsum():
"""
Testing: MAX-SUM of MRF
This example is based on the lawn sprinkler example, and the Markov
random field has the following structure.
Cloudy - 0
/ \
/ \
/ \
Sprinkler - 1 Rainy - 2
\ /
\ /
\ /
Wet Grass -3
"""
"""Assign a unique numerical identifier to each node"""
C = 0
S = 1
R = 2
W = 3
"""Assign the number of nodes in the graph"""
nodes = 4
"""
The graph structure is represented as a adjacency matrix, dag.
If adj_mat[i, j] = 1, then there exists a undirected edge from node
i and node j.
"""
adj_mat = np.matrix(np.zeros((nodes, nodes)))
adj_mat[C, [R, S]] = 1
adj_mat[R, W] = 1
adj_mat[S, W] = 1
"""
Define the size of each node, which is the number of different values a
node could observed at. For example, if a node is either True of False,
it has only 2 possible values it could be, therefore its size is 2. All
the nodes in this graph has a size 2.
"""
ns = 2 * np.ones(nodes)
"""
Define the clique domains. The domain of a clique, is the indices of the
nodes in the clique. A clique is a fully connected set of nodes.
Therefore, for a set of node to be a clique, every node in the set must
be connected to every other node in the set.
"""
clq_doms = [[0, 1, 2], [1, 2, 3]]
"""Define potentials for the cliques"""
clqs = []
T = np.zeros((2, 2, 2))
T[:, :, 0] = np.array([[0.2, 0.2], [0.09, 0.01]])
T[:, :, 1] = np.array([[0.05, 0.05], [0.36, 0.04]])
clqs.append(cliques.clique(0, clq_doms[0], np.array([2, 2, 2]), T))
T[:, :, 0] = np.array([[1, 0.1], [0.1, 0.01]])
T[:, :, 1] = np.array([[0, 0.9], [0.9, 0.99]])
clqs.append(cliques.clique(1, clq_doms[1], np.array([2, 2, 2]), T))
"""Create the MRF"""
net = models.mrf(adj_mat, ns, clqs, lattice=False)
"""
Intialize the MRF's inference engine to use EXACT inference, by
setting exact=True.
"""
net.init_inference_engine(exact=True)
"""Create and enter evidence ([] means that node is unobserved)"""
all_ev = sprinkler_evidence();
all_mpe = sprinkler_mpe();
count = 0;
errors = 0;
for evidence in all_ev:
"""Execute the max-sum algorithm"""
mlc = net.max_sum(evidence)
mpe = all_mpe[count]
errors = errors + np.sum(np.array(mpe) - np.array(mlc))
count = count + 1
assert errors == 0
| {
"repo_name": "bhrzslm/uncertainty-reasoning",
"path": "my_engine/others/GrMPy/lib/GrMPy/Tests/test_MRF_maxsum.py",
"copies": "1",
"size": "3193",
"license": "mit",
"hash": 6573256029279029000,
"line_mean": 29.6138613861,
"line_max": 76,
"alpha_frac": 0.5299091763,
"autogenerated": false,
"ratio": 3.396808510638298,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4426717686938298,
"avg_score": null,
"num_lines": null
} |
"""
This is a tutorial on how to create a Markov random field, and perform
exact MAX-SUM inference on it.
"""
"""Import the required numerical modules"""
import numpy as np
"""Import the GrMPy modules"""
import models
import inference
import cliques
if __name__ == '__main__':
"""
This example is based on the lawn sprinkler example, and the Markov
random field has the following structure.
Cloudy - 0
/ \
/ \
/ \
Sprinkler - 1 Rainy - 2
\ /
\ /
\ /
Wet Grass -3
"""
"""Assign a unique numerical identifier to each node"""
C = 0
S = 1
R = 2
W = 3
"""Assign the number of nodes in the graph"""
nodes = 4
"""
The graph structure is represented as a adjacency matrix, dag.
If adj_mat[i, j] = 1, then there exists a undirected edge from node
i and node j.
"""
adj_mat = np.matrix(np.zeros((nodes, nodes)))
adj_mat[C, [R, S]] = 1
adj_mat[R, W] = 1
adj_mat[S, W] = 1
"""
Define the size of each node, which is the number of different values a
node could observed at. For example, if a node is either True of False,
it has only 2 possible values it could be, therefore its size is 2. All
the nodes in this graph has a size 2.
"""
ns = 2 * np.ones(nodes)
"""
Define the clique domains. The domain of a clique, is the indices of the
nodes in the clique. A clique is a fully connected set of nodes.
Therefore, for a set of node to be a clique, every node in the set must
be connected to every other node in the set.
"""
clq_doms = [[0, 1, 2], [1, 2, 3]]
"""Define potentials for the cliques"""
clqs = []
T = np.zeros((2, 2, 2))
T[:, :, 0] = np.array([[0.2, 0.2], [0.09, 0.01]])
T[:, :, 1] = np.array([[0.05, 0.05], [0.36, 0.04]])
clqs.append(cliques.clique(0, clq_doms[0], np.array([2, 2, 2]), T))
T[:, :, 0] = np.array([[1, 0.1], [0.1, 0.01]])
T[:, :, 1] = np.array([[0, 0.9], [0.9, 0.99]])
clqs.append(cliques.clique(1, clq_doms[1], np.array([2, 2, 2]), T))
"""Create the MRF"""
net = models.mrf(adj_mat, ns, clqs, lattice=False)
"""
Intialize the MRF's inference engine to use EXACT inference, by
setting exact=True.
"""
net.init_inference_engine(exact=True)
"""Create and enter evidence ([] means that node is unobserved)"""
evidence = [None, 0, None, None]
"""Execute the max-sum algorithm"""
mlc = net.max_sum(evidence)
"""
mlc contains the most likely configuaration for all the nodes in the MRF
based in the input evidence.
"""
print 'Cloudy node: ', bool(mlc[C])
print 'Sprinkler node: ', bool(mlc[S])
print 'Rainy node: ', bool(mlc[R])
print 'Wet grass node: ', bool(mlc[W])
| {
"repo_name": "bhrzslm/uncertainty-reasoning",
"path": "my_engine/others/GrMPy/lib/GrMPy/Examples/Discrete/MRF/Inference/Exact/Tut_MRF_maxsum.py",
"copies": "1",
"size": "3180",
"license": "mit",
"hash": -691828130919156900,
"line_mean": 30.4489795918,
"line_max": 76,
"alpha_frac": 0.5251572327,
"autogenerated": false,
"ratio": 3.4193548387096775,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44445120714096775,
"avg_score": null,
"num_lines": null
} |
"""
This is a tutorial on how to create a Markov random field, and perform
exact SUM-PRODUCT inference on it.
"""
"""Import the required numerical modules"""
import numpy as np
from sprinkler_data import sprinkler_evidence, sprinkler_probs
"""Import the GrMPy modules"""
import models
import inference
import cliques
def test_mrf_sumproduct():
"""
Testing: SUM-PRODUCT on MRF
This example is based on the lawn sprinkler example, and the Markov
random field has the following structure.
Cloudy - 0
/ \
/ \
/ \
Sprinkler - 1 Rainy - 2
\ /
\ /
\ /
Wet Grass -3
"""
"""Assign a unique numerical identifier to each node"""
C = 0
S = 1
R = 2
W = 3
"""Assign the number of nodes in the graph"""
nodes = 4
"""
The graph structure is represented as a adjacency matrix, dag.
If adj_mat[i, j] = 1, then there exists a undirected edge from node
i and node j.
"""
adj_mat = np.matrix(np.zeros((nodes, nodes)))
adj_mat[C, [R, S]] = 1
adj_mat[R, W] = 1
adj_mat[S, W] = 1
"""
Define the size of each node, which is the number of different values a
node could observed at. For example, if a node is either True of False,
it has only 2 possible values it could be, therefore its size is 2. All
the nodes in this graph has a size 2.
"""
ns = 2 * np.ones(nodes)
"""
Define the clique domains. The domain of a clique, is the indices of the
nodes in the clique. A clique is a fully connected set of nodes.
Therefore, for a set of node to be a clique, every node in the set must
be connected to every other node in the set.
"""
clq_doms = [[0, 1, 2], [1, 2, 3]]
"""Define potentials for the cliques"""
clqs = []
T = np.zeros((2, 2, 2))
T[:, :, 0] = np.array([[0.2, 0.2], [0.09, 0.01]])
T[:, :, 1] = np.array([[0.05, 0.05], [0.36, 0.04]])
clqs.append(cliques.clique(0, clq_doms[0], np.array([2, 2, 2]), T))
T[:, :, 0] = np.array([[1, 0.1], [0.1, 0.01]])
T[:, :, 1] = np.array([[0, 0.9], [0.9, 0.99]])
clqs.append(cliques.clique(1, clq_doms[1], np.array([2, 2, 2]), T))
"""Create the MRF"""
net = models.mrf(adj_mat, ns, clqs, lattice=False)
"""
Intialize the MRF's inference engine to use EXACT inference, by
setting exact=True.
"""
net.init_inference_engine(exact=True)
"""Create and enter evidence ([] means that node is unobserved)"""
all_ev = sprinkler_evidence();
all_prob = sprinkler_probs();
count = 0;
errors = 0;
for evidence in all_ev:
"""Execute the max-sum algorithm"""
net.sum_product(evidence)
ans = [1, 1, 1, 1]
marginal = net.marginal_nodes([C])
if evidence[C] is None:
ans[C] = marginal.T[1]
marginal = net.marginal_nodes([S])
if evidence[S] is None:
ans[S] = marginal.T[1]
marginal = net.marginal_nodes([R])
if evidence[R] is None:
ans[R] = marginal.T[1]
marginal = net.marginal_nodes([W])
if evidence[W] is None:
ans[W] = marginal.T[1]
errors = errors + \
np.round(np.sum(np.array(ans) - np.array(all_prob[count])), 3)
count = count + 1
assert errors == 0
| {
"repo_name": "bhrzslm/uncertainty-reasoning",
"path": "my_engine/others/GrMPy/lib/GrMPy/Tests/test_MRF_sumproduct.py",
"copies": "1",
"size": "3744",
"license": "mit",
"hash": -6938001118818341000,
"line_mean": 30.275862069,
"line_max": 79,
"alpha_frac": 0.5192307692,
"autogenerated": false,
"ratio": 3.419178082191781,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9400707904693628,
"avg_score": 0.007540189339630419,
"num_lines": 116
} |
"""
This is a tutorial on how to create a Markov random field, and perform
exact SUM-PRODUCT inference on it.
"""
"""Import the required numerical modules"""
import numpy as np
"""Import the GrMPy modules"""
import models
import inference
import cliques
if __name__ == '__main__':
"""
This example is based on the lawn sprinkler example, and the Markov
random field has the following structure.
Cloudy - 0
/ \
/ \
/ \
Sprinkler - 1 Rainy - 2
\ /
\ /
\ /
Wet Grass -3
"""
"""Assign a unique numerical identifier to each node"""
C = 0
S = 1
R = 2
W = 3
"""Assign the number of nodes in the graph"""
nodes = 4
"""
The graph structure is represented as a adjacency matrix, dag.
If adj_mat[i, j] = 1, then there exists a undirected edge from node
i and node j.
"""
adj_mat = np.matrix(np.zeros((nodes, nodes)))
adj_mat[C, [R, S]] = 1
adj_mat[R, W] = 1
adj_mat[S, W] = 1
"""
Define the size of each node, which is the number of different values a
node could observed at. For example, if a node is either True of False,
it has only 2 possible values it could be, therefore its size is 2. All
the nodes in this graph has a size 2.
"""
ns = 2 * np.ones(nodes)
"""
Define the clique domains. The domain of a clique, is the indices of the
nodes in the clique. A clique is a fully connected set of nodes.
Therefore, for a set of node to be a clique, every node in the set must
be connected to every other node in the set.
"""
clq_doms = [[0, 1, 2], [1, 2, 3]]
"""Define potentials for the cliques"""
clqs = []
T = np.zeros((2, 2, 2))
T[:, :, 0] = np.array([[0.2, 0.2], [0.09, 0.01]])
T[:, :, 1] = np.array([[0.05, 0.05], [0.36, 0.04]])
clqs.append(cliques.clique(0, clq_doms[0], np.array([2, 2, 2]), T))
T[:, :, 0] = np.array([[1, 0.1], [0.1, 0.01]])
T[:, :, 1] = np.array([[0, 0.9], [0.9, 0.99]])
clqs.append(cliques.clique(1, clq_doms[1], np.array([2, 2, 2]), T))
"""Create the MRF"""
net = models.mrf(adj_mat, ns, clqs, lattice=False)
"""
Intialize the MRF's inference engine to use EXACT inference, by
setting exact=True.
"""
net.init_inference_engine(exact=True)
"""Create and enter evidence ([] means that node is unobserved)"""
evidence = [None, 0, None, None]
"""Execute max-sum algorithm"""
net.sum_product(evidence)
"""
Print out the marginal probability of each node.
"""
marginal = net.marginal_nodes([C])
print 'Probability it is cloudy: ', marginal.T[1]*100, '%'
marginal = net.marginal_nodes([S])
print 'Probability the sprinkler is on: ', 0, '%' #Observed node
marginal = net.marginal_nodes([R])
print 'Probability it is raining: ',marginal.T[1]*100, '%'
marginal = net.marginal_nodes([W])
print 'Probability the grass is wet: ', marginal.T[1]*100, '%'
| {
"repo_name": "bhrzslm/uncertainty-reasoning",
"path": "my_engine/others/GrMPy/lib/GrMPy/Examples/Discrete/MRF/Inference/Exact/Tut_MRF_sumproduct.py",
"copies": "1",
"size": "3363",
"license": "mit",
"hash": 4591044741365252600,
"line_mean": 31.9696969697,
"line_max": 76,
"alpha_frac": 0.5349390425,
"autogenerated": false,
"ratio": 3.4246435845213847,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44595826270213845,
"avg_score": null,
"num_lines": null
} |
"""
This is a tutorial on how to create a Markov random field, learn its
parameters from fully observed data via MLE, and perform exact MAX-SUM
inference on it.
"""
"""Import the required numerical modules"""
import numpy as np
"""Import the GrMPy modules"""
import models
import inference
import cliques
if __name__ == '__main__':
"""
This example is based on the lawn sprinkler example, and the Markov
random field has the following structure.
Cloudy - 0
/ \
/ \
/ \
1 - Sprinkler----Rainy - 2
\ /
\ /
\ /
Wet Grass -3
"""
"""Assign a unique numerical identifier to each node"""
C = 0
S = 1
R = 2
W = 3
"""Assign the number of nodes in the graph"""
nodes = 4
"""
The graph structure is represented as a adjacency matrix, dag.
If adj_mat[i, j] = 1, then there exists a undirected edge from node
i and node j.
"""
adj_mat = np.matrix(np.zeros((nodes, nodes)))
adj_mat[C, [R, S]] = 1
adj_mat[R, W] = 1
adj_mat[S, W] = 1
"""
Define the size of each node, which is the number of different values a
node could observed at. For example, if a node is either True of False,
it has only 2 possible values it could be, therefore its size is 2. All
the nodes in this graph has a size 2.
"""
ns = 2 * np.ones(nodes)
"""
Define the clique domains. The domain of a clique, is the indices of the
nodes in the clique. A clique is a fully connected set of nodes.
Therefore, for a set of node to be a clique, every node in the set must
be connected to every other node in the set.
"""
clq_doms = [[0], [0, 1], [0, 2], [1, 2, 3]]
"""Create blank cliques with the required domains and sizes"""
clqs = []
clqs.append(cliques.clique(0, clq_doms[0], np.array([2])))
clqs.append(cliques.clique(1, clq_doms[1], np.array([2, 2])))
clqs.append(cliques.clique(2, clq_doms[2], np.array([2, 2])))
clqs.append(cliques.clique(3, clq_doms[3], np.array([2, 2, 2])))
"""Create the MRF"""
net = models.mrf(adj_mat, ns, clqs, lattice=False)
"""Define the samples that will be used to train the models parameters"""
samples = \
[[0, 1, 0, 0],
[0, 1, 0, 1],
[1, 0, 1, 1],
[1, 0, 1, 0],
[0, 1, 0, 1],
[1, 0, 1, 1],
[0, 1, 1, 1],
[0, 1, 0, 1],
[1, 0, 1, 1],
[0, 1, 0, 1],
[0, 1, 0, 1],
[1, 0, 1, 1],
[0, 1, 0, 1],
[1, 0, 1, 1],
[0, 1, 0, 1],
[1, 1, 1, 1],
[0, 0, 0, 0],
[0, 1, 0, 1],
[1, 0, 1, 1],
[1, 1, 0, 1],
[1, 0, 1, 1],
[1, 1, 1, 1],
[1, 0, 1, 1],
[1, 0, 0, 0],
[0, 1, 0, 1],
[1, 0, 1, 1],
[1, 0, 1, 1],
[1, 0, 0, 0],
[0, 1, 0, 1],
[0, 1, 0, 1],
[1, 0, 1, 0],
[1, 0, 1, 0],
[0, 0, 0, 0],
[1, 1, 1, 1],
[1, 0, 1, 1],
[1, 0, 1, 1],
[0, 0, 0, 0],
[1, 0, 0, 0],
[0, 0, 0, 0],
[1, 0, 1, 1],
[0, 0, 0, 0],
[0, 1, 0, 1],
[0, 0, 0, 0],
[0, 1, 0, 1],
[0, 1, 0, 1],
[0, 0, 1, 1],
[1, 0, 0, 0],
[1, 0, 1, 1],
[0, 0, 0, 0],
[1, 1, 1, 1]]
"""Train the models parameters using the defined samples"""
net.learn_params_mle(samples[:])
"""Intialize the MRF's inference engine to use EXACT inference"""
net.init_inference_engine(exact=True)
"""Create and enter evidence ([] means that node is unobserved)"""
evidence = [None, 0, None, None]
mlc = net.max_sum(evidence)
"""
mlc contains the most likely configuaration for all the nodes in the MRF
based in the input evidence.
"""
print 'Cloudy node: ', bool(mlc[C])
print 'Sprinkler node: ', bool(mlc[S])
print 'Rainy node: ', bool(mlc[R])
print 'Wet grass node: ', bool(mlc[W])
| {
"repo_name": "bhrzslm/uncertainty-reasoning",
"path": "my_engine/others/GrMPy/lib/GrMPy/Examples/Discrete/MRF/Learning/EM/Tut_MRF_MLE.py",
"copies": "1",
"size": "4602",
"license": "mit",
"hash": 3292670312574869000,
"line_mean": 29.7379310345,
"line_max": 77,
"alpha_frac": 0.4387222947,
"autogenerated": false,
"ratio": 3.282453637660485,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9199702751245744,
"avg_score": 0.004294636222948382,
"num_lines": 145
} |
"""
This is a tutorial on how to create a Markov random field, learn its
parameters from partially observed data via EM, and perform exact MAX-SUM
inference on it.
"""
"""Import the required numerical modules"""
import numpy as np
"""Import the GrMPy modules"""
import models
import inference
import cliques
if __name__ == '__main__':
"""
This example is based on the lawn sprinkler example, and the Markov
random field has the following structure.
Cloudy - 0
/ \
/ \
/ \
1 - Sprinkler----Rainy - 2
\ /
\ /
\ /
Wet Grass -3
"""
"""Assign a unique numerical identifier to each node"""
C = 0
S = 1
R = 2
W = 3
"""Assign the number of nodes in the graph"""
nodes = 4
"""
The graph structure is represented as a adjacency matrix, dag.
If adj_mat[i, j] = 1, then there exists a undirected edge from node
i and node j.
"""
adj_mat = np.matrix(np.zeros((nodes, nodes)))
adj_mat[C, [R, S]] = 1
adj_mat[R, W] = 1
adj_mat[S, W] = 1
"""
Define the size of each node, which is the number of different values a
node could observed at. For example, if a node is either True of False,
it has only 2 possible values it could be, therefore its size is 2. All
the nodes in this graph has a size 2.
"""
ns = 2 * np.ones(nodes)
"""
Define the clique domains. The domain of a clique, is the indices of the
nodes in the clique. A clique is a fully connected set of nodes.
Therefore, for a set of node to be a clique, every node in the set must
be connected to every other node in the set.
"""
clq_doms = [[0], [0, 1], [0, 2], [1, 2, 3]]
"""Create blank cliques with the required domains and sizes"""
clqs = []
clqs.append(cliques.clique(0, clq_doms[0], np.array([2])))
clqs.append(cliques.clique(1, clq_doms[1], np.array([2, 2])))
clqs.append(cliques.clique(2, clq_doms[2], np.array([2, 2])))
clqs.append(cliques.clique(3, clq_doms[3], np.array([2, 2, 2])))
"""Create the MRF"""
net = models.mrf(adj_mat, ns, clqs, lattice=False)
"""Define the samples that will be used to train the models parameters"""
samples = \
[[0, 1, 0, None],
[0, 1, 0, 1],
[1, 0, 1, 1],
[1, 0, 1, 0],
[0, 1, 0, 1],
[1, 0, 1, 1],
[0, 1, 1, 1],
[0, 1, 0, 1],
[1, 0, 1, 1],
[0, 1, 0, 1],
[0, 1, 0, 1],
[1, 0, 1, 1],
[0, 1, 0, 1],
[1, 0, 1, 1],
[0, 1, 0, 1],
[1, 1, 1, 1],
[0, 0, 0, 0],
[0, 1, 0, 1],
[1, 0, 1, 1],
[1, 1, 0, 1],
[1, 0, 1, 1],
[1, 1, 1, 1],
[1, 0, 1, 1],
[1, 0, 0, 0],
[0, 1, 0, 1],
[1, 0, 1, 1],
[1, 0, 1, 1],
[1, 0, 0, 0],
[0, 1, 0, 1],
[0, 1, 0, 1],
[1, 0, 1, 0],
[1, 0, 1, 0],
[0, 0, 0, 0],
[1, 1, 1, 1],
[1, 0, 1, 1],
[1, 0, 1, 1],
[0, 0, 0, 0],
[1, 0, 0, 0],
[0, 0, 0, 0],
[1, 0, 1, 1],
[0, 0, 0, 0],
[0, 1, 0, 1],
[0, 0, 0, 0],
[0, 1, 0, 1],
[0, 1, 0, 1],
[0, 0, 1, 1],
[1, 0, 0, 0],
[1, 0, 1, 1],
[0, 0, 0, 0],
[1, 1, 1, 1]]
"""Train the models parameters using the defined samples"""
net.learn_params_EM(samples[:])
"""Intialize the MRF's inference engine to use EXACT inference"""
net.init_inference_engine(exact=True)
"""Create and enter evidence ([] means that node is unobserved)"""
evidence = [None, 0, None, None]
mlc = net.max_sum(evidence)
"""
mlc contains the most likely configuaration for all the nodes in the MRF
based in the input evidence.
"""
print 'Cloudy node: ', bool(mlc[C])
print 'Sprinkler node: ', bool(mlc[S])
print 'Rainy node: ', bool(mlc[R])
print 'Wet grass node: ', bool(mlc[W])
| {
"repo_name": "bhrzslm/uncertainty-reasoning",
"path": "my_engine/others/GrMPy/lib/GrMPy/Examples/Discrete/MRF/Learning/EM/Tut_MRF_EM.py",
"copies": "1",
"size": "4609",
"license": "mit",
"hash": 8728251330566328000,
"line_mean": 29.5684931507,
"line_max": 77,
"alpha_frac": 0.4391408115,
"autogenerated": false,
"ratio": 3.2897930049964312,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42289338164964313,
"avg_score": null,
"num_lines": null
} |
"""
This module contains functions that are used in testing the PyBNT toolbox.
Functions contained in this module are:
'are_equal': Tests whether two Numpy ndarray's are equivalent.
'read_example': Reads test data from a text file.
"""
import numpy as np
from os import path
def read_samples(fname, nsamples=0):
"""
This function reads a set of values from a text file into a numpy array.
Parameters
----------
fname: String
The path to the text file containing the values
"""
f = open('./Data/' + fname, 'r')
lines = f.readlines()
ans = []
for line in lines:
line = line.split()
temp = []
for val in line:
temp.append(float(val) - 1)
ans.append(temp)
ans = np.array(ans)
f.close()
return ans
def create_all_evidence(nodes, sizes):
"""
"""
num_samples = (sizes+1)**nodes
counts = []
cur_counts = []
cur_value = []
for i in range(0, nodes):
counts.append((sizes+1)**i)
cur_counts.append(0)
cur_value.append(None)
counts.reverse()
samples = []
for i in range(0, num_samples):
sample = []
for i in range(0, nodes):
sample.append(cur_value[i])
cur_counts[i] = cur_counts[i] + 1
if cur_counts[i] == counts[i]:
if cur_value[i] == None:
cur_value[i] = 0
elif cur_value[i] != sizes - 1:
cur_value[i] = cur_value[i] + 1
else:
cur_value[i] = None
cur_counts[i] = 0
samples.append(sample)
return samples
| {
"repo_name": "bhrzslm/uncertainty-reasoning",
"path": "my_engine/others/GrMPy/lib/GrMPy/Tests/Old unit tests/utilities.py",
"copies": "1",
"size": "1797",
"license": "mit",
"hash": 3425775255974178000,
"line_mean": 25.2272727273,
"line_max": 76,
"alpha_frac": 0.510851419,
"autogenerated": false,
"ratio": 3.7911392405063293,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48019906595063294,
"avg_score": null,
"num_lines": null
} |
"""
This module contains the classes used to perform inference on various
graphical models.
"""
__docformat__ = 'restructuredtext'
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from scipy import sparse
import general
import graph
import cliques
import potentials
import models
import cpds
import pylab
from utilities import create_all_evidence
def test_bnet_mle():
"""EXAMPLE: MLE learning on a BNET"""
"""Create all data required to instantiate the bnet object"""
nodes = 4
dag = np.zeros((nodes, nodes))
C = 0
S = 1
R = 2
W = 3
dag[C, [R, S]] = 1
dag[R, W] = 1
dag[S, W] = 1
ns = 2 * np.ones((1, nodes))
"""Instantiate the model"""
net = models.bnet(dag, ns, [])
"""Learn the parameters"""
samples = np.array(pylab.load('./Data/lawn_samples.txt')) - 1
net.learn_params_mle(samples.copy())
"""Initialize the inference engine"""
net.init_inference_engine(exact=True)
"""Create and enter evidence"""
evidences = create_all_evidence(4, 2)
mlcs = np.array([[0, 0, 0, 0]])
for evidence in evidences:
mlc = net.max_sum(evidence)
mlcs = np.vstack((mlcs, mlc))
"""Read in expected values"""
exp_mlcs = np.array(pylab.load('./Data/bnet_mle_exact_max_sum_res.txt'))
"""Assert that the output matched the expected values"""
assert_array_equal(mlcs, exp_mlcs)
def test_bnet_EM():
"""EXAMPLE: EM learning on a BNET"""
"""Create all data required to instantiate the bnet object"""
nodes = 4
dag = np.zeros((nodes, nodes))
C = 0
S = 1
R = 2
W = 3
dag[C, [R, S]] = 1
dag[R, W] = 1
dag[S, W] = 1
ns = 2 * np.ones((1, nodes))
"""Instantiate the model"""
net = models.bnet(dag, ns, [])
"""
Load the samples, and set one sample of one node to be unobserved, this
should not effect the learnt parameter much, and will demonstrate that
the algorithm can handle unobserved samples.
"""
samples = (np.array(pylab.load('./Data/lawn_samples.txt')) - 1).tolist()
samples[0][0] = []
"""Learn the parameters"""
net.learn_params_EM(samples[:])
"""Initialize the inference engine"""
net.init_inference_engine(exact=True)
"""Create and enter evidence"""
evidences = create_all_evidence(4, 2)
mlcs = np.array([[0, 0, 0, 0]])
for evidence in evidences:
mlc = net.max_sum(evidence)
mlcs = np.vstack((mlcs, mlc))
"""Read in expected values"""
exp_mlcs = np.array(pylab.load('./Data/bnet_mle_exact_max_sum_res.txt'))
"""Assert that the output matched the expected values"""
assert_array_equal(mlcs, exp_mlcs)
def test_mrf_mle():
"""EXAMPLE: MLE learning on a MRF"""
"""Define MRF graph structure"""
C = 0
S = 1
R = 2
W = 3
nodes = 4
adj_mat = sparse.lil_matrix((nodes, nodes), dtype=int)
adj_mat[C, [R, S]] = 1
adj_mat[R, W] = 1
adj_mat[S, W] = 1
adj_mat[R, S] = 1
"""Define clique domains and node sizes"""
ns = 2 * np.ones((1, nodes))
clq_doms = [[0], [0, 1], [0, 2], [1, 2, 3]]
"""Define cliques and potentials"""
clqs = []
clqs.append(cliques.discrete_clique(0, clq_doms[0], np.array([2])))
clqs.append(cliques.discrete_clique(1, clq_doms[1], np.array([2, 2])))
clqs.append(cliques.discrete_clique(2, clq_doms[2], np.array([2, 2])))
clqs.append(cliques.discrete_clique(3, clq_doms[3], np.array([2, 2, 2])))
"""Create the MRF"""
net = models.mrf(adj_mat, ns, clqs)
"""Learn the parameters"""
samples = np.array(pylab.load('./Data/lawn_samples.txt')) - 1
net.learn_params_mle(samples[:])
"""Initialize the inference engine"""
net.init_inference_engine(exact=True)
"""Create and enter evidence"""
evidences = create_all_evidence(4, 2)
mlcs = np.array([[0, 0, 0, 0]])
for evidence in evidences:
mlc = net.max_sum(evidence)
mlcs = np.vstack((mlcs, mlc))
"""Read in expected values"""
exp_mlcs = np.array(pylab.load('./Data/mrf_mle_exact_max_sum_res.txt'))
"""Assert that the output matched the expected values"""
assert_array_equal(mlcs, exp_mlcs)
def test_mrf_EM():
"""EXAMPLE: EM learning on a MRF"""
"""Define MRF graph structure"""
C = 0
S = 1
R = 2
W = 3
nodes = 4
adj_mat = sparse.lil_matrix((nodes, nodes), dtype=int)
adj_mat[C, [R, S]] = 1
adj_mat[R, W] = 1
adj_mat[S, W] = 1
adj_mat[R, S] = 1
"""Define clique domains and node sizes"""
ns = 2 * np.ones((1, nodes))
clq_doms = [[0], [0, 1], [0, 2], [1, 2, 3]]
"""Define cliques and potentials"""
clqs = []
clqs.append(cliques.discrete_clique(0, clq_doms[0], np.array([2])))
clqs.append(cliques.discrete_clique(1, clq_doms[1], np.array([2, 2])))
clqs.append(cliques.discrete_clique(2, clq_doms[2], np.array([2, 2])))
clqs.append(cliques.discrete_clique(3, clq_doms[3], np.array([2, 2, 2])))
"""Create the MRF"""
net = models.mrf(adj_mat, ns, clqs)
"""
Load the samples, and set one sample of one node to be unobserved, this
should not effect the learnt parameter much, and will demonstrate that
the algorithm can handle unobserved samples.
"""
samples = (np.array(pylab.load('./Data/lawn_samples.txt')) - 1).tolist()
samples[0][0] = []
"""Learn the parameters"""
net.learn_params_EM(samples[:])
"""Initialize the inference engine"""
net.init_inference_engine(exact=True)
"""Create and enter evidence"""
evidences = create_all_evidence(4, 2)
mlcs = np.array([[0, 0, 0, 0]])
for evidence in evidences:
mlc = net.max_sum(evidence)
mlcs = np.vstack((mlcs, mlc))
"""Read in expected values"""
exp_mlcs = np.array(pylab.load('./Data/mrf_em_exact_max_sum_res.txt'))
"""Assert that the output matched the expected values"""
assert_array_equal(mlcs, exp_mlcs)
| {
"repo_name": "bhrzslm/uncertainty-reasoning",
"path": "my_engine/others/GrMPy/lib/GrMPy/Tests/Old unit tests/test_learning.py",
"copies": "1",
"size": "6257",
"license": "mit",
"hash": -7903659463712798000,
"line_mean": 28.8226600985,
"line_max": 77,
"alpha_frac": 0.5830270097,
"autogenerated": false,
"ratio": 3.142641888498242,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4225668898198242,
"avg_score": null,
"num_lines": null
} |
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.contrib.operators import SSHExecuteOperator
from airflow.contrib.hooks import SSHHook
from datetime import datetime, timedelta
default_args = {
'owner': 'alo-alt',
'depends_on_past': False,
'start_date': datetime(2016, 7, 9),
'email': ['instert@here, 2nd@works.too'],
'email_on_failure': True,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=5),
}
dag = DAG('restart_process', default_args=default_args, schedule_interval='0 12 * * 6')
listener01 = SSHHook(conn_id='server01')
listener02 = SSHHook(conn_id='server02')
# SSH connect
t1 = SSHExecuteOperator(
task_id='restart_nginx',
ssh_hook = server01,
bash_command="sudo service nginx restart",
dag=dag)
t2 = SSHExecuteOperator(
task_id='restart_postfix',
ssh_hook = server02,
bash_command="sudo service postfix restart",
dag=dag)
t3 = SSHExecuteOperator(
task_id='check_nginx',
ssh_hook = server01,
bash_command="sudo service nginx status",
dag=dag)
t4 = SSHExecuteOperator(
task_id='check_postfix',
ssh_hook = server02,
bash_command="sudo service postfix status",
dag=dag)
t3.set_upstream(t1)
t2.set_upstream(t3)
t4.set_upstream(t2)
| {
"repo_name": "alo-alt/airflow",
"path": "DAG/process_restart.py",
"copies": "1",
"size": "1384",
"license": "apache-2.0",
"hash": -1602024860584322300,
"line_mean": 25.6153846154,
"line_max": 87,
"alpha_frac": 0.6625722543,
"autogenerated": false,
"ratio": 3.351089588377724,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.45136618426777236,
"avg_score": null,
"num_lines": null
} |
__author__ = "Alok Kumar"
import click
import datetime
import dateutil.tz
import dateutil.parser
import colorama
import humanize
import requests
import json
FUTURE = "future"
NOW = "now"
PAST = "past"
SCREEN_WIDTH = 68
def prettify(match):
diff = (datetime.datetime.now(tz=dateutil.tz.tzlocal()) - dateutil.parser.parse(match['datetime']))
seconds = diff.total_seconds()
if seconds > 0:
if seconds > 60 * 90:
status = PAST
else:
status = NOW
else:
status = FUTURE
if status in [PAST, NOW]:
color = colorama.Style.BRIGHT + colorama.Fore.GREEN
else:
color = colorama.Style.NORMAL + colorama.Fore.WHITE
home = match['home_team']
away = match['away_team']
if status == NOW:
minute = int(seconds / 60)
match_status = "Being played now: %s minutes gone" % minute
elif status == PAST:
if match['winner'] == 'Draw':
result = 'Draw'
else:
result = "%s won" % (match['winner'])
match_status = "Played %s. %s" % (humanize.naturaltime(diff), result)
else:
match_status = "Will be played %s at %s" % (
humanize.naturaltime(diff), dateutil.parser.parse(match['datetime']).astimezone(dateutil.tz.tzlocal()).strftime("%H:%M %p")
)
if status == NOW:
match_percentage = int(seconds / 60 / 90 * 100)
elif status == FUTURE:
match_percentage = 0
else:
match_percentage = 100
return """
{} {:<30} {} - {} {:>30}
{}
\xE2\x9A\xBD {}
""".format(
color,
home['country'],
home['goals'],
away['goals'],
away['country'],
progress_bar(match_percentage),
colorama.Fore.WHITE + match_status
)
def progress_bar(percentage, separator="o", character="-"):
"""
Creates a progress bar by given percentage value
"""
filled = colorama.Fore.GREEN + colorama.Style.BRIGHT
empty = colorama.Fore.WHITE + colorama.Style.BRIGHT
if percentage == 100:
return filled + character * SCREEN_WIDTH
if percentage == 0:
return empty + character * SCREEN_WIDTH
completed = int((SCREEN_WIDTH / 100.0) * percentage)
return (filled + (character * (completed - 1)) +
separator +
empty + (character * (SCREEN_WIDTH - completed)))
@click.command()
def today():
url = "http://worldcup.sfg.io/matches/today"
response = requests.get(url)
matches = json.loads(response.text)
for match in matches:
print prettify(match)
@click.command()
def tomorrow():
url = "http://worldcup.sfg.io/matches/tomorrow/"
response = requests.get(url)
matches = json.loads(response.text)
for match in matches:
print prettify(match) | {
"repo_name": "rajalokan/wc14",
"path": "wc14/today.py",
"copies": "1",
"size": "2836",
"license": "mit",
"hash": -2804748512971033000,
"line_mean": 25.7641509434,
"line_max": 135,
"alpha_frac": 0.5849788434,
"autogenerated": false,
"ratio": 3.645244215938303,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4730223059338303,
"avg_score": null,
"num_lines": null
} |
__author__ = 'alphabuddha'
from dss.forms import *
from cropper.models import *
from django.shortcuts import render_to_response, HttpResponseRedirect, render
from django.template.context import RequestContext
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from reportlab.pdfgen import canvas
from django.http import HttpResponse
from django.contrib import messages
from django.contrib.auth.models import User
from django.core.mail import send_mail
from django.conf import settings
from django.contrib.gis.geos import Point
from vectorformats.Formats import Django, GeoJSON
from django.core.context_processors import csrf
import uuid
from django.db.models import Q
from django.views import generic
from django.shortcuts import render_to_response, get_object_or_404
from django.utils import timezone
import hashlib, datetime, random
from rest_framework import status
from django.http import HttpResponse
from rest_framework.decorators import api_view
from django.contrib.auth import authenticate, login
from rest_framework.response import Response
from django.core import serializers
from chartit import DataPool, Chart
from django.views.generic.edit import UpdateView
from django.views.generic.base import TemplateView
from django.views.generic import ListView
from django.contrib.auth import logout
from django.contrib.auth import views
from django.core.paginator import Paginator, InvalidPage, EmptyPage
# Create your views here.
def planner(request):
return render_to_response("temp/planner.html", locals(), context_instance=RequestContext(request))
def analyzer(request):
return render_to_response("temp/analyzer.html", locals(), context_instance=RequestContext(request))
class cropIndex(generic.ListView):
queryset = cropping.objects.approved()
#properties = crop.objects.all()
template_name = "temp/crop.html"
paginate_by = 10
def get(self, request, *args, **kwargs):
crops_list = cropping.objects.all()
var_get_search = request.GET.get('search_box')
if var_get_search is not None:
crops_list = crops_list.filter(Q(name__icontains=var_get_search)| Q(category__icontains = var_get_search)| Q(soil__icontains = var_get_search))
paginator = Paginator(crops_list, 10) # Show 25 contacts per page
# Make sure page request is an int. If not, deliver first page.
try:
page = int(request.GET.get('page', '1'))
except ValueError:
page = 1
# If page request (9999) is out of range, deliver last page of results.
try:
crops = paginator.page(page)
except (EmptyPage, InvalidPage):
crops = paginator.page(paginator.num_pages)
return render(request, self.template_name, {'crops': crops,'crops_list':crops_list})
class cropDetail(generic.DetailView):
model = cropping
template_name = "temp/crops.html"
| {
"repo_name": "wanjohikibui/agrismart",
"path": "cropper/views.py",
"copies": "1",
"size": "2810",
"license": "mit",
"hash": 9181279310221629000,
"line_mean": 37.4931506849,
"line_max": 146,
"alpha_frac": 0.7832740214,
"autogenerated": false,
"ratio": 3.6876640419947506,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.98150383841554,
"avg_score": 0.031179935847869995,
"num_lines": 73
} |
__author__ = 'alphabuddha'
from dss.forms import *
from dss.models import *
from django.shortcuts import render_to_response, HttpResponseRedirect, render
from django.template.context import RequestContext
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from reportlab.pdfgen import canvas
from django.http import HttpResponse
from django.contrib import messages
from django.contrib.auth.models import User
from django.core.mail import send_mail
from django.conf import settings
from django.contrib.gis.geos import Point
from vectorformats.Formats import Django, GeoJSON
from django.core.context_processors import csrf
import uuid
from django.views import generic
from django.shortcuts import render_to_response, get_object_or_404
from django.utils import timezone
import hashlib, datetime, random
from rest_framework import status
from django.http import HttpResponse
from rest_framework.decorators import api_view
from django.contrib.auth import authenticate, login
from rest_framework.response import Response
from django.core import serializers
from chartit import DataPool, Chart
from django.views.generic.edit import UpdateView
from django.views.generic.base import TemplateView
from django.views.generic import ListView
from django.contrib.auth import logout
from django.contrib.auth import views
from icalendar import Calendar, Event
from django.db.models import get_model
from django.contrib.sites.models import Site
def contact_us(request):
return render_to_response("/temp/contact-us.html", locals(), context_instance=RequestContext(request))
def form_error(request):
return render_to_response('temp/error.html')
def form_success(request):
return render_to_response('temp/success.html')
def register_success(request):
return render(request, 'registration/registration_complete.html')
def about(request):
return render_to_response("temp/about-us.html", locals(), context_instance=RequestContext(request))
def calendar(request):
return render_to_response("temp/calendar.html", locals(), context_instance=RequestContext(request))
def export(request, event_id):
event = get_model('events', 'event').objects.get(id = event_id)
cal = Calendar()
site = Site.objects.get_current()
cal.add('prodid', '-//%s Events Calendar//%s//' % (site.name, site.domain))
cal.add('version', '2.0')
site_token = site.domain.split('.')
site_token.reverse()
site_token = '.'.join(site_token)
ical_event = Event()
ical_event.add('summary', event.description)
ical_event.add('dtstart', event.start)
ical_event.add('dtend', event.end and event.end or event.start)
ical_event.add('dtstamp', event.end and event.end or event.start)
ical_event['uid'] = '%d.event.events.%s' % (event.id, site_token)
cal.add_component(ical_event)
response = HttpResponse(cal.as_string(), mimetype="text/calendar")
response['Content-Disposition'] = 'attachment; filename=%s.ics' % event.slug
return response
class BlogIndex(generic.ListView):
queryset = Entry.objects.published()
template_name = "temp/feeds.html"
paginate_by = 2
class BlogDetail(generic.DetailView):
model = Entry
template_name = "temp/post.html"
@login_required
def status(request):
user = request.user.get_full_name()
applications = application.objects.filter(user=request.user)
return render_to_response("temp/status.html", {"applications": applications}, context_instance=RequestContext(request))
def map(request):
return render_to_response("temp/portal.html", locals(), context_instance=RequestContext(request))
def points(request):
return render_to_response("temp/points.html", locals(), context_instance=RequestContext(request))
def incident_portal(request):
form = incidentForm
if request.method == 'POST':
form = incidentForm(request.POST)
if form.is_valid():
form.save(commit=True)
#return index(request)
return HttpResponseRedirect(reverse("incident"))
else:
print form.errors
else:
form = incidentForm()
return render(request, 'temp/incident.html', {'form': form})
def weather_chart_view(request):
#Step 1: Create a DataPool with the data we want to retrieve.
weatherdata = \
DataPool(
series=
[{'options': {
'source': MonthlyWeatherByCity.objects.all()},
'terms': [
'number',
'males',
'females']}
])
#Step 2: Create the Chart object
cht = Chart(
datasource = weatherdata,
series_options =
[{'options':{
'type': 'line',
'stacking': False},
'terms':{
'number': [
'females',
'males']
}}],
chart_options =
{'title': {
'text': 'Weather Data of Boston and Houston'},
'xAxis': {
'title': {
'text': 'Month number'}}})
#Step 3: Send the chart object to the template.
return render_to_response({'weatherchart': cht})
@login_required
def application_portal(request):
form = applicationForm
if request.method == 'POST':
form = applicationForm(request.POST, request.FILES)
if form.is_valid():
new_form = form.save(commit=False)
new_form.user = request.user
cd = form.cleaned_data
email_to = cd['email']
subject = "{0} Update".format(cd['first_name'])
message = "Applicant: {0}\n\n Your application has been received".format(
cd['last_name'])
send_mail(subject, message, settings.DEFAULT_FROM_EMAIL,[email_to,])
new_form.save()
messages.success(request, 'Application Sent successfully')
return HttpResponseRedirect(reverse("apply"))
else:
print form.errors
else:
form = applicationForm()
images=application.objects.all()
return render(request, 'temp/apply.html', {'form': form,'images':images})
def add_point(request):
if request.method == 'POST':
form = incidenceForm(request.POST)
if form.is_valid():
new_point = incidence()
cd = form.cleaned_data
new_point.first_name = cd['first_name']
new_point.last_name = cd['last_name']
new_point.email = cd['email']
new_point.telephone = cd['telephone']
new_point.incidence_title = cd['incidence_title']
new_point.category = cd['category']
new_point.county = cd['county']
new_point.closest_town = cd['closest_town']
new_point.status = cd['status']
#new_point.photo = cd['photo']
coordinates = cd['coordinates'].split(',')
new_point.geom = Point(float(coordinates[0]), float(coordinates[1]))
new_point.save()
email_to = cd['email']
subject = "{0} Update".format(cd['incidence_title'])
message = "Applicant: {0}\n\n Your incidence has been received.Thank you for the report".format(
cd['first_name'])
send_mail(subject, message, settings.DEFAULT_FROM_EMAIL,[email_to,])
return HttpResponseRedirect('/incidences/')
else:
return HttpResponseRedirect('/incidences/')
else:
form = incidenceForm()
args = {}
args.update(csrf(request))
args['form'] = incidenceForm()
return render_to_response('temp/incidence.html', args)
def register_user(request):
args = {}
args.update(csrf(request))
if request.method == 'POST':
form = RegistrationForm(request.POST)
args['form'] = form
if form.is_valid():
form.save() # save user to database if form is valid
username = form.cleaned_data['username']
email = form.cleaned_data['email']
salt = hashlib.sha1(str(random.random())).hexdigest()[:5]
activation_key = hashlib.sha1(salt+email).hexdigest()
key_expires = datetime.datetime.today() + datetime.timedelta(2)
#Get user by username
user=User.objects.get(username=username)
# Create and save user profile
new_profile = UserProfile(user=user, activation_key=activation_key,
key_expires=key_expires)
new_profile.save()
# Send email with activation key
email_subject = 'Account confirmation'
email_body = "Hey %s, thanks for signing up. To activate your account, click this link within 48hours http://localhost:8000/confirm/%s" % (username, activation_key)
send_mail(email_subject, email_body, 'myemail@example.com',[email], fail_silently=False)
messages.success(request, 'Account created successfully.Check your mail to activate!')
return HttpResponseRedirect('/register/')
else:
args['form'] = RegistrationForm()
return render_to_response('temp/register.html', args, context_instance=RequestContext(request))
def register_confirm(request, activation_key):
if request.user.is_authenticated():
HttpResponseRedirect('')
user_profile = get_object_or_404(UserProfile, activation_key=activation_key)
if user_profile.key_expires < timezone.now():
return render_to_response('temp/confirm_expired.html')
user = user_profile.user
user.is_active = True
user.save()
return render_to_response('temp/confirm.html')
messages.success(request, 'Confirmation successfull')
def user_login(request):
args = {}
args.update(csrf(request))
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username, password=password)
if user:
if user.is_active:
if user.is_staff:
login(request, user)
return HttpResponseRedirect('/admin/')
else:
login(request, user)
return HttpResponseRedirect('/')
else:
#return HttpResponseRedirect(reverse("login"))
messages.error(request, "Error")
else:
messages.error(request, "Invalid username and password.Try again!")
return render_to_response('temp/login.html', args, context_instance=RequestContext(request))
else:
return render(request, 'temp/login.html', {})
@login_required
def user_logout(request):
logout(request)
return HttpResponseRedirect('accounts/login/')
def change_password(request):
template_response = views.password_change(request)
# Do something with `template_response`
return template_response
messages.success(request, 'Password changed successfully!')
def contact(request):
errors = []
if request.method == 'POST':
if not request.POST.get('subject', ''):
errors.append('Enter a subject.')
if not request.POST.get('message', ''):
errors.append('Enter a message.')
if request.POST.get('email') and '@' not in request.POST['email']:
errors.append('Enter a valid e-mail address.')
if not errors:
send_mail(
request.POST['subject'],
request.POST['message'],
request.POST.get('email', 'noreply@ke_ladm.com'),
['swanjohi9@gmail.com'], #email address where message is sent.
)
messages.success(request, 'Your message has been sent.Thank you for contacting us!')
return HttpResponseRedirect('/contacts/')
return render(request, 'temp/contact-us.html',
{'errors': errors})
def thanks(request):
return render_to_response('temp/thanks.html')
| {
"repo_name": "wanjohikibui/agrismart",
"path": "dss/views.py",
"copies": "1",
"size": "12344",
"license": "mit",
"hash": 7835541510754744000,
"line_mean": 34.9883381924,
"line_max": 176,
"alpha_frac": 0.6193292288,
"autogenerated": false,
"ratio": 4.210095497953615,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.009650596270525221,
"num_lines": 343
} |
__author__ = 'Alpha'
from visual import *
# Simple program where projectile follows path determined by forces applied to it
# Velocity is vector(number, number, number)
# Force is vector(number, number, number)
# Momentum is vector(number, number, number)
# Mass is number
# Position is vector(number, number, number)
# Acceleration is vector(number, number, number)
# Time is number
T_MAX = 1000
GRAV_ACCEL = vector(0, - 9.81, 0)
STEP = .01
# Mass Acceleration -> Force
def get_force(m, a):
f = m * a
return f
class Projectile:
def __init__(self, m, s, p, t, f):
self.m = m
self.s = s
self.p = p
self.t = t
self.f = f
self.grav_force()
# Projectile Force -> Projectile
def update_p(self):
p_f = self.p + self.f * STEP
self.p = p_f
# Projectile -> Projectile
def update_s(self):
s_f = self.s + STEP * self.p / self.m
self.s = s_f
# Projectile -> Projectile
def update_t(self):
t_f = self.t + STEP
self.t = t_f
# Projectile -> Force
def grav_force(self):
return get_force(self.m, GRAV_ACCEL)
# Force (listof Force) -> Projectile
def get_net_force(self, forces_on):
f_net = self.grav_force() + net_force(forces_on)
self.f = f_net
m0 = 1
s0 = vector(0, 0, 0)
p0 = vector(10, 20, 10)
t0 = 0
f0 = s0
BALL0 = Projectile(m0, s0, p0, t0, f0)
NO_OTHER_FORCES = [f0]
f_wind = vector(-1, -11, 4)
WIND = [f_wind]
MARKER_SCALE = .05
AXIS_SCALE = 70
SHOW_PATH = True
SHOW_POSITION_ARROW = True
SHOW_FORCE_ARROW = True
SHOW_MOMENTUM_ARROW = True
# (listof Force) -> Force
def net_force(forces):
f_net = vector(0, 0, 0)
for f in forces:
f_net += f
return f_net
# Projectile ->
def animate(projectile, forces):
s_i = projectile.s
projectile.get_net_force(forces)
xscale = AXIS_SCALE
yscale = AXIS_SCALE
zscale = AXIS_SCALE
width = .01 * AXIS_SCALE
xaxis = arrow(axis=(xscale, 0, 0),
shaftwidth=width)
yaxis = arrow(axis=(0, yscale, 0),
shaftwidth=width)
zaxis = arrow(axis=(0, 0, zscale),
shaftwidth=width)
unitx = (1, 0, 0)
unity = (0, 1, 0)
unitz = (0, 0, 1)
image = sphere(pos=projectile.s,
radius=projectile.m,
color=color.red)
if SHOW_PATH:
points(pos=[image.pos],
size=MARKER_SCALE*image.radius,
color=image.color)
if SHOW_POSITION_ARROW:
position_arrow = arrow(pos=vector(0, 0, 0),
axis=image.pos,
color=color.blue,
shaftwidth=width)
if SHOW_MOMENTUM_ARROW:
momentum_arrow = arrow(pos=image.pos,
axis=projectile.p,
color=color.green,
shaftwidth=width)
if SHOW_FORCE_ARROW:
net_force_arrow = arrow(pos=image.pos,
axis=projectile.f,
color=color.yellow,
shaftwidth=width)
while True:
rate(1/STEP)
if projectile.t > T_MAX:
break
elif projectile.s.y < s_i.y:
break
else:
projectile.update_s()
projectile.update_p()
projectile.update_t()
image.pos = projectile.s
if SHOW_PATH:
points(pos=[image.pos],
size=MARKER_SCALE*image.radius,
color=image.color)
if SHOW_POSITION_ARROW:
position_arrow.axis = image.pos
if SHOW_MOMENTUM_ARROW:
momentum_arrow.pos = image.pos
momentum_arrow.axis = projectile.p
if SHOW_FORCE_ARROW:
net_force_arrow.pos = image.pos
net_force_arrow.axis = projectile.f
#animate(BALL0, NO_OTHER_FORCES)
animate(BALL0, WIND) | {
"repo_name": "dilynfullerton/vpython",
"path": "projectile.py",
"copies": "1",
"size": "4065",
"license": "cc0-1.0",
"hash": -4995567611350809000,
"line_mean": 22.1022727273,
"line_max": 81,
"alpha_frac": 0.5259532595,
"autogenerated": false,
"ratio": 3.1907378335949765,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42166910930949764,
"avg_score": null,
"num_lines": null
} |
import math
from math import sin, cos, acos, tan, atan2, sqrt, radians, degrees, asin, floor, log, pi
R = 6371 #km
def convertHMStoDecimal( hms):
h = hms[0]
m = hms[1]
s = hms[2]
if h < 0:
sign = -1
else:
sign = 1
dec = (h + (m/60.0) + (s/3600.0) ) * sign#/1000000.0;
return round(dec, 6)
def convertDecimaltoHMS( dec):
if dec < 0:
sign = -1
else:
sign= 1
h = floor(dec ) * sign
m = floor( ((dec) - floor(dec)) * 60)
s = round( floor(((((dec) - floor(dec)) * 60) - floor(((dec) - floor(dec)) * 60)) * 100000) * 60/100000, 2)
return (h, m, s)
class Gps(object):
"""a simple gps class for holding lat and long and calculating distances and locations"""
def __init__(self, lat=0.0, lon=0.0, alt=0.0):
super(Gps, self).__init__()
self.lat = round(float(lat), 6)
self.lon = round(float(lon), 6)
self.alt = round(float(alt), 6)
def get_lattitude(self):
return self.lat
def get_longtitude(self):
return self.lon
def get_altitude(self):
return self.alt
def distanceTo(self, otherGps):
'''result in meters'''
deltaLatRad = radians(self.lat-otherGps.lat)
deltaLonRad = radians(self.lon-otherGps.lon)
lat1Rad = radians(self.lat)
lat2Rad = radians(otherGps.lat)
lon1Rad = radians(self.lon)
lon2Rad = radians(otherGps.lon)
# Spherical law of cosines
d = acos( (sin(lat1Rad)*sin(lat2Rad)) + (cos(lat1Rad)*cos(lat2Rad)*cos(lon2Rad-lon1Rad))) * R;
# Haversine formula
# a = (sin(deltaLatRad/2) * sin(deltaLatRad/2)) + (sin(deltaLonRad/2) * sin(deltaLonRad/2) * cos(lat1Rad) * cos(lat2Rad))
# c = 2 * atan2(sqrt(a), sqrt(1-a))
# d = R * c
return d*1000
def bearingTo(self, otherGps):
deltaLon = radians(otherGps.lon) - radians(self.lon)
deltaLat = radians(otherGps.lat) - radians(self.lat)
lat1 = radians(self.lat)
lat2 = radians(otherGps.lat)
# http://gis.stackexchange.com/questions/29239/calculate-bearing-between-two-decimal-gps-coordinates
dPhi = log(tan(lat2/2.0+pi/4.0)/tan(lat1/2.0+pi/4.0))
if abs(deltaLon) > pi:
if deltaLon > 0.0:
deltaLon = -(2.0 * pi - deltaLon)
else:
deltaLon = (2.0 * pi + deltaLon)
bearing = (degrees(atan2(deltaLon, dPhi)) + 360.0) % 360.0
return round(bearing, 6)
def locationOf(self, bearing, distance):
'''bearing in degrees, distance in meters'''
distance_km = distance/1000.0
bearing_r = radians(bearing)
lat1 = radians(self.lat)
lon1 = radians(self.lon)
lat2 = asin( (sin(lat1)*cos(distance_km/R)) + (cos(lat1)*sin(distance_km/R)*cos(bearing_r)) )
lon2 = lon1 + atan2( sin(bearing_r) * sin(distance_km/R) * cos(lat1), cos(distance_km/R) - (sin(lat1)*sin(lat2)) );
return Gps(lat=degrees(lat2), lon=degrees(lon2))
def isInBoundingCircle(self, otherGps, radius):
'''radius in meters'''
return self.distanceTo(otherGps) <= radius
def generateWaypointFile(self, altitude, commit=False, filename='wp.txt'):
template = '''QGC WPL 110
0 0 3 16 0.000000 0.000000 0.000000 0.000000 lat lon 0.000000 1
1 0 3 16 0.000000 0.000000 0.000000 0.000000 lat lon alt 1
2 1 3 17 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 1
3 0 3 20 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 1'''
template = template.replace('lat', str(self.get_lattitude()))
template = template.replace('lon', str(self.get_longtitude()))
template = template.replace('alt', str(float(altitude)))
if commit:
wpfile = open( filename, 'w')
wpfile.write(template)
wpfile.close()
return filename
def __unicode__(self):
return self.__str__()
def __str__(self):
lathms = convertDecimaltoHMS(self.lat)
lonhms = convertDecimaltoHMS(self.lon)
return 'D('+str(self.lat)+', '+str(self.lon)+') : HMS'+str(lathms)+', '+ str(lonhms)
if __name__ == '__main__':
coordHms1 = ( (39,52,4.35), (32,45,5.27))
coordHms2 = ( (39,52,7.79), (32,45,11.68))
print('HMS 1 -> ' + str(coordHms1))
print('HMS 2 -> ' + str(coordHms2))
coordDec1 = ( convertHMStoDecimal(coordHms1[0]), convertHMStoDecimal(coordHms1[1]))
coordDec2 = ( convertHMStoDecimal(coordHms2[0]), convertHMStoDecimal(coordHms2[1]))
print('Dec 1 ->' + str(coordDec1))
print('Dec 2 ->' + str(coordDec2))
backHms1 = ( convertDecimaltoHMS(coordDec1[0]), convertDecimaltoHMS(coordDec1[1]))
backHms2 = ( convertDecimaltoHMS(coordDec2[0]), convertDecimaltoHMS(coordDec2[1]))
print('HMS 1 -> ' + str(backHms1))
print('HMS 2 -> ' + str(backHms2))
loc1 = Gps( coordDec1[0], coordDec1[1] )
loc2 = Gps( coordDec2[0], coordDec2[1] )
distance = loc1.distanceTo(loc2)
bearing1 = loc1.bearingTo(loc2)
bearing2 = loc2.bearingTo(loc1)
print('distance: ' + str(distance))
print('bearing12: ' + str(bearing1))
print('bearing12: ' + str(convertDecimaltoHMS(bearing1)))
print('bearing21: ' + str(bearing2))
print('bearing21: ' + str(convertDecimaltoHMS(bearing2)))
loc2fromDistance = loc1.locationOf(bearing1, distance)
loc1fromDistance = loc2.locationOf(bearing2, distance)
print(loc1)
print(loc1fromDistance)
print(loc2 )
print(loc2fromDistance)
balciLatHms = (39, 53, 23.23)
balciLonHms = (32, 46, 30.70)
balciLatDec = convertHMStoDecimal(balciLatHms)
balciLonDec = convertHMStoDecimal(balciLonHms)
balci = Gps(balciLatDec, balciLonDec)
bodrumLatHms = (37, 1, 49.38)
bodrumLonHms = (27, 25, 1.41)
bodrumLatDec = convertHMStoDecimal(bodrumLatHms)
bodrumLonDec = convertHMStoDecimal(bodrumLonHms)
bodrum = Gps(bodrumLatDec, bodrumLonDec)
bodrum = Gps(bodrumLatDec, bodrumLonDec)
print('balci : '+str(balci))
print('bodrum : '+str(bodrum))
distance = balci.distanceTo(bodrum)/1000
bearing = balci.bearingTo(bodrum)
print('distance: '+str(distance))
print('bearing: '+str(bearing))
balci.generateWaypointFile(100, True)
| {
"repo_name": "alpsayin/python-gps",
"path": "gps.py",
"copies": "1",
"size": "6607",
"license": "mit",
"hash": -4079641733830758400,
"line_mean": 37.6374269006,
"line_max": 129,
"alpha_frac": 0.6092023611,
"autogenerated": false,
"ratio": 2.893999123959702,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4003201485059702,
"avg_score": null,
"num_lines": null
} |
__author__ = 'alvaro'
import sys
import os
import networkx as nx
import pylab as p
def main():
graphs = {}
if os.path.isdir(sys.argv[1]):
for file in os.listdir(sys.argv[1]):
graphs[file] = nx.read_gml(sys.argv[1] + '/' + file)
elif os.path.isfile(sys.argv[1]):
graphs[sys.argv[1]] = nx.read_gml(sys.argv[1])
if len(sys.argv) > 2:
label = sys.argv[2]
if len(sys.argv) > 3:
label += ":" + sys.argv[3]
for graph_key in graphs.keys():
for i in range(len(graphs[graph_key].node)):
current_label = graphs[graph_key].node[i]['label']
if current_label == label:
G = nx.bfs_tree(graphs[graph_key], i)
node_colors = ['r'] * len(G.node)
node_colors[0] = 'b';
for node in G.node:
G.add_edges_from(nx.bfs_edges(graphs[graph_key], node))
p.figure(graph_key + " - " + label)
print(str(len(G.node)) + '\n')
nx.draw_spring(G, with_labels=True, node_color=node_colors)
else:
for graph_key in graphs.keys():
node_colors = ['r'] * len(graphs[graph_key].node)
in_degrees = graphs[graph_key].in_degree(graphs[graph_key])
for n in graphs[graph_key].nodes():
if in_degrees[n] == 0:
node_colors[n] = 'b'
p.figure(graph_key)
nx.draw_spring(graphs[graph_key], with_labels=True, node_color=node_colors)
p.show()
return
if __name__ == '__main__':
main() | {
"repo_name": "servioticy/servioticy-vagrant",
"path": "puppet/files/other/topology_generator/topology_generator/benchmark/visualization.py",
"copies": "2",
"size": "1644",
"license": "apache-2.0",
"hash": -4770169965303069000,
"line_mean": 30.6346153846,
"line_max": 87,
"alpha_frac": 0.496350365,
"autogenerated": false,
"ratio": 3.3688524590163933,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9852205404768974,
"avg_score": 0.0025994838494838492,
"num_lines": 52
} |
""" @author: Alvaro Velasco Date: 10 Mayo 2016 """
import gtk
from random import randint
# DEFINITIONS
def apretado_undo(boton):
if undo == 1:
but_undo.set_label("Puedes deshacer %s vez mas" % undo)
else:
but_undo.set_label("Puedes deshacer %s veces mas" % undo)
def change_color():
for x in range(0, TAM):
for y in range (0, TAM):
i = x+TAM*y
boton = botones[i]
boton.posicion = (x, y)
if tablero[x][y] == 0:
change_black(boton)
else:
change_blue(boton)
def change_btr(boton):
image_btr = gtk.Image()
image_btr.set_from_file("images/btr.gif")
boton.set_image(image_btr)
def change_rtb(boton):
image_rtb = gtk.Image()
image_rtb.set_from_file("images/rtb.gif")
boton.set_image(image_rtb)
def change_blue(boton):
image_blue = gtk.Image()
image_blue.set_from_file("images/blue.png")
boton.set_image(image_blue)
def change_black(boton):
image_black = gtk.Image()
image_black.set_from_file("images/black.png")
boton.set_image(image_black)
def change_red(boton):
image_red = gtk.Image()
image_red.set_from_file("images/red.png")
boton.set_image(image_red)
def change_green(boton):
image_green = gtk.Image()
image_green.set_from_file("images/green.gif")
boton.set_image(image_green)
def change_gold(boton):
image_gold = gtk.Image()
image_gold.set_from_file("images/gold.png")
boton.set_image(image_gold)
def change_silver(boton):
image_silver = gtk.Image()
image_silver.set_from_file("images/silver.png")
boton.set_image(image_silver)
def change_white(boton):
image_white = gtk.Image()
image_white.set_from_file("images/white.png")
boton.set_image(image_white)
def change_level(boton):
global level, hits, tablero, undo, WarSecINFO
try:
level = int(tbox_level.get_text())
hits = 0
undo = 0
recoverhighscore()
while len(high_score) <= level:
high_score.append('SinPuntuacion')
golpes_txt.set_label("Llevas %s golpes" % (hits))
record_txt.set_label("Nivel: %s Record: %s" % (level, high_score[level-1]))
tablero_level(boton)
but_reboot.set_sensitive(True)
but_undo.set_sensitive(False)
firma.set_visible(False)
record_table.set_visible(False)
except ValueError:
tbox_level.set_text("Nivel con NUMERO")
def change_board(boton):
global tablero
(x, y) = boton.posicion
for i in range(-1, 2):
if tablero[x-2][y+i] == 0:
tablero[x-2][y+i] = 1
else:
tablero[x-2][y+i] = 0
if tablero[x + 2][y + i] == 0:
tablero[x +2][y + i] = 1
else:
tablero[x +2][y + i] = 0
for i in range(-2, 3):
if tablero[x-1][y+i] == 0:
tablero[x-1][y+i] = 1
else:
tablero[x-1][y+i] = 0
if tablero[x][y+i] == 0:
tablero[x][y+i] = 1
else:
tablero[x][y+i] = 0
if tablero[x+1][y+i] == 0:
tablero[x+1][y+i] = 1
else:
tablero[x+1][y+i] = 0
change_color()
def changehighscore():
global level, high_score, hits, undo
k = -1
for l in range(0, len(high_score)):
if high_score[l] != 'SinPuntuacion':
high_score[l] = int(high_score[l])
else:
high_score[l] = 'SinPuntuacion'
while len(high_score) < level: # Writing that it has levels without score
high_score.append('SinPuntuacion')
if high_score[(level + k)] == "SinPuntuacion":
high_score[(level + k)] = hits
tbox_level.set_text('Siuuh!')
record_txt.set_text('GOLPES: %s NIVEL: %s' % (hits, level))
golpes_txt.set_text('Eres el primero en jugar!')
index_interface(index_yes)
elif high_score[(level + k)] > hits:
tbox_level.set_text('Siuuh!')
record_txt.set_text('GOLPES: %s NIVEL: %s' % (hits, level))
golpes_txt.set_text('N U E V O R E C O R D')
high_score[(level + k)] = hits
index_interface(index_oro)
elif high_score[(level + k)] == hits:
tbox_level.set_text('Siuuh!')
record_txt.set_text('GOLPES: %s NIVEL: %s' % (hits, level))
golpes_txt.set_text('Igualas el record !')
index_interface(index_plata)
else:
tbox_level.set_text('Siuuh!')
record_txt.set_text('GOLPES: %s RECORD: %s' % (hits, high_score[(level + k)]))
golpes_txt.set_text('Nivel %s terminado' % level)
index_interface(index_ok)
but_reboot.set_sensitive(False)
ch = False # Saving
while ch == False:
try:
a = open(file_name, 'w')
for l in range(0, len(high_score)):
a.writelines(str(high_score[l]) + "\n")
ch = True
except IOError:
print 'Vaya, ha ocurrido un error mientras guardabamos las puntuaciones...'
hits = 0
level = 0
undo = 0
def checkend():
global tablero, checkit, high_score
if level != 0:
checkit = 0
for r in range(0, TAM):
for c in range(0, TAM):
if tablero[c][r] == 1:
checkit += 1
if checkit == 0:
but_left.set_sensitive(False)
but_undo.set_sensitive(False)
but_reboot.set_sensitive(False)
changehighscore()
print_hs()
def close_all(ventana):
gtk.main_quit
def funct_left(boton):
global level
level -= 1
tbox_level.set_text("%s" % level)
but_left.set_sensitive(True)
if level <= 1:
but_left.set_sensitive(False)
def funct_right(boton):
global level, botones
level += 1
tbox_level.set_text("%s" % level)
but_left.set_sensitive(True)
if level <= 1:
but_left.set_sensitive(False)
def index_interface(index): # CHANGE THE BUTTONS COLORS DEPENDING THE GAME
for i in range(0, TAM2):
b = botones[i]
if index[i] == 0: change_black(b)
elif index[i] == 1: change_red(b)
elif index[i] == 2: change_gold(b)
elif index[i] == 3: change_white(b)
elif index[i] == 4: change_green(b)
elif index[i] == 5: change_silver(b)
elif index[i] == 6: change_blue(b)
elif index[i] == 7: change_btr(b)
elif index[i] == 8: change_rtb(b)
def less_undo(boton): # Go to the before step
global undo
undo -= 1
x = memraw[undo]
y = memcol[undo]
boton.posicion = (x, y)
if undo <= 0:
but_undo.set_sensitive(False)
but_undo.set_label("No puedes deshacer")
if undo >= 0:
change_board(boton)
def more_undo(boton): # saving the hit to undo later
global undo, hits
but_undo.set_sensitive(True)
but_undo.set_label("DESHACER JUGADA")
(x,y) = boton.posicion
memraw[undo] = x
memraw.append(0)
memcol[undo] = y
memcol.append(0)
undo += 1
hits += 1
golpes_txt.set_label("Llevas %s golpes" % (hits))
record_txt.set_label("Nivel: %s Record: %s" % (level, high_score[level-1]))
if level == 0:
golpes_txt.set_label("Seleccione un nivel")
record_txt.set_label("Esta en modo prueba")
checkend()
def new_board(): # To make a new simple board
tablero = [0] * (TAM+4)
for i in range(0, TAM+4):
tablero[i] = [0] * (TAM+5)
return tablero
def recoverhighscore():
global high_score
ch = False
while ch == False: # Abrimos el fichero en 'a' para copiarlo en 'High score'
try:
file = open(file_name, 'r')
ch = True
except IOError:
filenew = open(file_name, 'w')
filenew.write("SinPuntuacion")
filenew.close()
high_score = file.read()
high_score = high_score.split()
file.close() # Cerramos el fichero
def copy_array(tab):
arr = new_board()
for i in range(0, len(tab)):
for j in range(0, len(tab)):
arr[i][j] = tab[i][j]
return arr
def soltado_undo(boton):
but_undo.set_label("CLICK PARA DESHACER")
def tablero_level(boton):
global tablero, undo, level, recomenzar
tablero = new_board()
for b in range (0, level):
x = randint(0, TAM-1)
y = randint(0, TAM-1)
boton.posicion = (x, y)
change_board(boton)
recomenzar = copy_array(tablero)
undo = 0
def recomienza(boton):
global tablero, recomenzar, undo, hits
tablero = copy_array(recomenzar)
golpes_txt.set_text("Vuelve a empezar!")
hits = 0
undo = 0
but_undo.set_sensitive(False)
change_color()
def print_hs(): # change te highscores showned
global punt_text, high_score
punt_text = "\n\n\n\n\n\n\n"
recoverhighscore()
mark = 0
for i in range (0, len(high_score) - 1):
if str(high_score[i]) != "SinPuntuacion":
mark = 1
punt_text += "\tNivel "
punt_text += str(i+1)
punt_text += " - "
punt_text += str(high_score[i])
punt_text += " golpes\t\n"
if mark == 0: punt_text += "\nNo hay records almacenados\n"
puntuaciones.set_text(punt_text)
def hs_on(boton): # SHOW / QUIT HIGH SCORES
global WarSecHS
if WarSecHS == 0:
supertable.attach(record_table, 1, 2, 0, 1)
record_table.attach(puntuaciones, 0, 1, 0, 1)
record_table.attach(records_image, 0, 1, 0, 1)
supertable.attach(firma, 1, 2, 1, 2)
firma.set_visible(True)
records_image.set_visible(True)
puntuaciones.set_visible(True)
WarSecHS = 1
if record_table.get_visible():
record_table.set_visible(False)
firma.set_visible(False)
else: record_table.set_visible(True); firma.set_visible(True)
def info_on(boton): # SHOW / QUIT TUTORIAL
global WarSecINFO, botones, image_tutorial
if WarSecINFO == 0:
for b in botones:
b.set_visible(False)
tabla.attach(image_tutorial, 0, 10, 0, 10)
WarSecINFO = 1
if image_tutorial.get_visible():
image_tutorial.set_visible(False)
for b in botones:
b.set_visible(True)
else:
for b in botones:
b.set_visible(False)
image_tutorial.set_visible(True)
# CODE
TAM = 10
TAM2 = TAM*TAM
file_name = "HS.txt"
memraw = [0]
memcol = [0]
undo = 0
hits = 0
high_score = []
level = 0
recoverhighscore()
botones = []
tablero = new_board()
recomenzar = new_board()
WarSecHS = 0 # Avoid a warning
WarSecINFO = 0 # avoid a warning
color_gris = gtk.gdk.color_parse('#151515') # Use to background
ventana = gtk.Window()
centro = gtk.WIN_POS_CENTER
tbox_level = gtk.Entry() # Textbox to set a level
tbox_level.set_text("?")
tbox_level.set_alignment(0.5)
tbox_level.set_width_chars(5)
but_level= gtk.Button() # Button to change te level
but_level.set_label("Cambiar nivel!")
but_left = gtk.Button() # button to decrease level
image_left = gtk.Image()
image_left.set_from_file("images/left.png")
but_left.set_image(image_left)
but_left.set_relief(gtk.RELIEF_NONE)
but_left.set_sensitive(False)
but_right = gtk.Button() # button to increase level
image_right = gtk.Image()
image_right.set_from_file("images/right.png")
but_right.set_image(image_right)
but_right.set_relief(gtk.RELIEF_NONE)
but_undo = gtk.Button() # button to undo the hit
but_undo.set_label("DESHACER!")
but_reboot = gtk.Button() # Button to restart game
image_reboot = gtk.Image()
image_reboot.set_from_file("images/reboot.png")
but_reboot.set_image(image_reboot)
but_reboot.set_sensitive(False)
but_info = gtk.Button() # Info button
image_info = gtk.Image()
image_info.set_from_file("images/info.png")
but_info.set_image(image_info)
but_hs = gtk.Button() # Show high scores
image_hs = gtk.Image()
image_hs.set_from_file("images/punt.png")
but_hs.set_image(image_hs)
image_tutorial = gtk.Image() # Gif tutorial image
image_tutorial.set_from_file("images/tutorial.gif")
records_image = gtk.Image() # Cup image
records_image.set_from_file("images/coup.png")
records_image.set_alignment(xalign=0.5, yalign=0)
golpes_txt = gtk.Label() # Label
golpes_txt.set_label("PRUEBE EL TABLERO")
record_txt = gtk.Label() # Label
record_txt.set_label("O ELIJA UN NIVEL")
puntuaciones = gtk.Label() # Label to show the highscores
puntuaciones.set_text("MAXIMAS PUNTUACIONES!")
puntuaciones.set_alignment(xalign=0.5, yalign=0)
puntuaciones.set_visible(True)
print_hs()
firma = gtk.Label()
firma.set_text("Realizado por:\nAlvaro Velasco Gil\nMayo 2016")
# TABLES FOR SHOW
' Codificacion = 0:negro 1:rojo 2:oro 3:blanco 4:verde 5:plata 6:azul 7:rojo-negro 8:negro-rojo'
index_flecha = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0,
0, 1, 8, 8, 0, 0, 0, 0, 8, 0, 1, 1, 1, 0, 0, 0, 0, 7, 8, 1, 1, 1, 0, 0, 0, 0, 0, 7, 1, 1, 1, 0, 0, 0, 0,
0, 0, 7, 1, 1, 8, 8, 0, 0, 0, 0, 0, 7, 7, 7, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
index_ok = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 4, 4, 0, 4, 0, 0, 0, 4, 0, 4, 0, 4, 0, 4,
0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 4, 0, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 0, 0, 4, 4, 4, 0, 4, 0, 4, 0, 4, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
index_oro = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 0, 2, 2, 3, 2, 2, 2, 2, 2, 2, 2, 2, 0, 2, 3, 2, 2,
2, 2, 0, 2, 2, 0, 0, 2, 2, 2, 2, 0, 0, 2, 0, 2, 0, 0, 2, 2, 0, 0, 2, 0, 0, 0, 2, 2, 2, 2, 2, 2, 0, 0, 0, 0,
0, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
index_plata = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 5, 5, 5, 5, 5, 5, 5, 0, 5, 5, 3, 5, 5, 5, 5, 5, 5, 5, 5, 0, 5, 3, 5,
5, 5, 5, 0, 5, 5, 0, 0, 5, 5, 5, 5, 0, 0, 5, 0, 5, 0, 0, 5, 5, 0, 0, 5, 0, 0, 0, 5, 5, 5, 5, 5, 5, 0, 0,
0, 0, 0, 0, 5, 5, 0, 0, 0, 0, 0, 0, 0, 5, 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
index_yes = [0, 3, 0, 3, 0, 3, 0, 3, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 2, 0, 2, 2, 0, 2, 2, 2, 2, 0, 2, 0, 2, 0,
0, 2, 0, 0, 2, 2, 2, 0, 2, 2, 0, 2, 2, 2, 0, 2, 0, 0, 2, 0, 0, 0, 0, 2, 0, 2, 0, 0, 2, 2, 0, 2, 2, 2, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 3, 0, 3, 0, 3, 3, 0, 3, 0, 3, 0, 3, 0, 3, 0]
# PLAY-BUTTONS
for i in range(0, TAM2):
imBlack = gtk.Image() # creamos la imagen
imBlack.set_from_file("images/black.png")
butBlack = gtk.Button() # creamos el boton
butBlack.set_image(imBlack)
butBlack.set_relief(gtk.RELIEF_NONE)
botones.append(butBlack)
# PRINCIPAL WINDOW COLORS
index_interface(index_flecha)
# NEW TABLES
tabla = gtk.Table(TAM, TAM, homogeneous=True)
menu_table = gtk.Table(2, 3, homogeneous=True)
left_table = gtk.Table(2, 3, homogeneous=False)
supertable = gtk.Table(2, 2, homogeneous=False)
record_table = gtk.Table(2, 1, homogeneous=False)
(x, y) = (0, 0) # ADD PLAY-BUTTONS TO Tabla
for b in botones:
b.posicion = (x, y)
tabla.attach(b, x, x + 1, y, y + 1)
x += 1
if x == TAM: x = 0; y += 1 # row change
(x, y) = (0, 0)
left_table.attach(but_left, 0, 1, 0, 1)
left_table.attach(tbox_level, 1, 2, 0, 1)
left_table.attach(but_right, 2, 3, 0, 1)
left_table.attach(but_level, 0, 3, 1, 2)
menu_table.attach(left_table, 0, 3, 0, 2)
menu_table.attach(golpes_txt, 3, 6, 0, 1)
menu_table.attach(but_undo, 6, 9, 0, 1)
menu_table.attach(record_txt, 3, 6, 1, 2)
menu_table.attach(but_reboot, 6, 7, 1, 2)
menu_table.attach(but_hs, 7, 8, 1, 2)
menu_table.attach(but_info, 8, 9, 1, 2)
(x, y) = (0, 0)
supertable.attach(tabla, 0, 1, 0, 1)
supertable.attach(menu_table, 0, 1, 1, 2)
for b in botones:
b.connect("clicked", change_board)
b.connect("clicked", more_undo)
but_left.connect("clicked", funct_left)
but_right.connect("clicked", funct_right)
but_level.connect("clicked", change_level)
but_undo.set_sensitive(False)
but_undo.set_label("No puedes deshacer")
but_undo.connect("clicked", less_undo)
but_undo.connect("pressed", apretado_undo)
but_undo.connect("released", soltado_undo)
but_reboot.connect("clicked", recomienza)
but_hs.connect("clicked", hs_on)
but_info.connect("clicked", info_on)
map = golpes_txt.get_colormap()
color = map.alloc_color("#808080")
style = golpes_txt.get_style().copy()
style.fg[gtk.STATE_NORMAL] = color
golpes_txt.set_style(style)
puntuaciones.set_style(style)
record_txt.set_style(style)
# END
ventana.modify_bg(gtk.STATE_NORMAL, color_gris)
ventana.set_title("THE CLEANER: Deja todo negro!")
ventana.set_position(centro)
ventana.connect("destroy", close_all)
ventana.add(supertable)
ventana.set_icon_from_file("images/icon.png")
ventana.set_resizable(False)
ventana.show_all()
gtk.main()
| {
"repo_name": "velastroll/First-Python-Game",
"path": "game.py",
"copies": "1",
"size": "15286",
"license": "artistic-2.0",
"hash": -4470701385233971700,
"line_mean": 28.0608365019,
"line_max": 120,
"alpha_frac": 0.6407169959,
"autogenerated": false,
"ratio": 2.2542397876419407,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8252020746954402,
"avg_score": 0.028587207317507773,
"num_lines": 526
} |
__author__ = 'alvertisjo'
import urllib2,urllib
import apiURLs
import json
import requests
from requests.exceptions import ConnectionError,Timeout
from random import randrange
import json
from django.utils.dateformat import format as timeformat
class RecommenderSECall(object):
def __init__(self,token, education=False, gender=False,age=False, interests=False, timestamp=None):
self.token=token
self.timestamp=timestamp
self.education=education
self.gender=gender
self.age=age
self.interests=interests
def setTimeStamp(self,timestamp):
self.timestamp=timestamp
def getPlaces(self,lat,lng,rad=3000):
query="places?long=%s&lat=%s&rad=%s"%(lng,lat,rad) ##Add location properties
full_url="%s%s"%(apiURLs.recommenderSE,query)#self.userID) ##Add user ID
##Add contextual properties
context=[]
if self.education:
context.append("education")
if self.gender:
context.append("gender")
if self.age:
context.append("age")
if self.interests:
context.append("interests")
if not context:
full_url="%s&context=all"%full_url
else:
full_url="%s&context="%full_url
for contextProperty in context:
full_url="%s%s,"%(full_url,contextProperty)
if full_url.endswith(','):
full_url = full_url[:-1]
##END: Add contextual properties
if self.timestamp is not None:
full_url="%s×tamp=%s"%(full_url,timeformat(self.timestamp,u'U'))
try:
header={"Authorization":self.token}
response = requests.get(full_url,headers=header)
print "Recommender URL: %s" %full_url
#print "got a respone with: %s" %response.text
return response.json()
except ConnectionError as e: # This is the correct syntax
print "error: %s" %e
response = "No response"
return json.dumps({"error":"connection error"})
except Timeout as t: # This is the correct syntax
print "Timeout error: %s" %t
response = "No response"
return json.dumps({"error":t.message})
except requests.exceptions.RequestException as e:
if self.request.retries >= self.max_retries:
print "max retries: %s" %e
return json.dumps({"error":"max retries"})
raise self.retry(exc=e)
except:
return json.dumps([])
def getProducts(self, category=None, method=None, shopId=None):
extraString=""
if str(category).lower()!='all':
extraString="&category=%s"%category
##Add contextual properties
full_url="%sproducts/?currency=euro&sortParam=%s%s"%(apiURLs.recommenderSE,method,extraString) ##Add user ID
context=[]
if self.education:
context.append("education")
if self.gender:
context.append("gender")
if self.age:
context.append("age")
if self.interests:
context.append("interests")
if not context:
full_url="%s&context=all"%full_url
else:
full_url="%s&context="%full_url
for contextProperty in context:
full_url="%s%s,"%(full_url,contextProperty)
if full_url.endswith(','):
full_url = full_url[:-1]
##END: Add contextual properties
if shopId=='Shop1' or shopId=='Shop2':
if shopId=='Shop1':
username = apiURLs.shop1_username
password = apiURLs.shop1_pass
else:
username = apiURLs.shop2_username
password = apiURLs.shop2_pass
oAuthCall=OPENiOAuth()
oAuthCall.getSession(username,password)
#print oAuthCall.getSessionToken()
#print oAuthCall.getAccessToken()
if oAuthCall.status_code ==200:
#request.session["openi-token"]=oAuthCall.getAccessToken()
full_url='%s&shop=%s'%(full_url,oAuthCall.getSessionToken())
else:
full_url='%s&shop=%s'%(full_url,'allStores')
if self.timestamp is not None:
full_url="%s×tamp=%s"%(full_url,self.timestamp)
try:
header={"Authorization":self.token}
response = requests.get(full_url,headers=header)
print "Recommender URL: %s" %full_url
#print "got a respone with: %s" %response.text
return response.json()
except ConnectionError as e: # This is the correct syntax
#print "error: %s" %e
response = "No response"
return json.dumps({"error":"connection error"})
except Timeout as t: # This is the correct syntax
#print "Timeout error: %s" %t
response = "No response"
return json.dumps({"error":t.message})
except:
return json.dumps([])
def getProductCategories(self):
full_url="%sproducts/categories/" %apiURLs.recommenderSE
try:
response = requests.get(full_url)
#print "Recommender URL: %s" %full_url
#print "got a respone with: %s" %response.text
return response.json()
except ConnectionError as e: # This is the correct syntax
#print "error: %s" %e
response = "No response"
return json.dumps({"error":"connection error"})
except Timeout as t: # This is the correct syntax
#print "Timeout error: %s" %t
response = "No response"
return json.dumps({"error":t.message})
except:
return json.dumps([])
def getApplications(self):
pass
class OpeniCall(object):
def __init__(self,token=None):
self.app_name="OPENi"
self.user="openihackathon"
self.tags=''
self.token=token
def getPhotos(self,lat, lng, cbs, tags=None):
self.objectName='photo'
self.cbs=cbs
apps=[]
app={}
app["cbs"]=self.cbs
app["app_name"]=self.app_name
apps.append(app)
self.data={}
self.data["lat"]=str(lat) # not sure if needed to be sent as string or long
self.data["lng"]=str(lng)
#print tags
if tags!=None:
searchtags=[]
for tag in tags.split(','):
searchtags.append(str(tag).replace(" ", ""))
#searchtags.append(str(tags))
self.method="filter_tags_photos"
#self.tags="&tags=[\"%s\"]"%tags
self.data["tags"]=searchtags
else:
self.method="search_media"
##example call: ### http://147.102.6.98t:1988/v.04/photo/?user=tsourom&apps=[{"cbs": "instagram", "app_name": "OPENi"}]&method=filter_tags_photos&data={"lat": "37.9667", "lng": "23.7167", "tags": ["athens"]}
##example call with tags: http://147.102.6.98t:1988/v.04/photo/?user=tsourom&apps=[{"cbs": "instagram", "app_name": "OPENi"}]&method=filter_tags_photos&data={"lat": "37.9667", "lng": "23.7167", "tags": ["athens"]}
query= "user=%s&apps=%s&method=%s&data=%s&format=json"%(self.user,str(apps),self.method, str(self.data))
url = "%s%s/"%(apiURLs.platformAPI,self.objectName)
full_url = url + '?' + query
try:
response = requests.get(full_url)
return response.json()
except ConnectionError as e: # This is the correct syntax
#print "error: %s" %e
return json.dumps({"error":"connection error"})
except Timeout as t: # This is the correct syntax
#print "Timeout error: %s" %t
response = "No response"
return json.dumps({"error":t.message})
except:
return json.dumps([])
def getPlaces(self,city, cbs, radius=3000, limit=20, user=None):
if user is not None:
self.user=user
self.objectName='photo'
self.cbs=cbs
apps=[]
app={}
app["cbs"]=self.cbs
app["app_name"]=self.app_name
apps.append(app)
self.data={}
self.data["near"]=str(city) # not sure if needed to be sent as string or long
self.data["radius"]=str(radius)
self.data["categoryId"]='4bf58dd8d48988d116941735'
self.data["limit"]='12'
self.method="search_venues"
##example call: http://147.102.6.98:1988/v.04/photo/?user=romanos&apps=[{"cbs": "foursquare", "app_name": "OPENi"}]&method=search_venues&data={"near": "Athens", "limit": "12", "radius": "800", "categoryId": "4bf58dd8d48988d116941735"}
query= "user=%s&apps=%s&method=%s&data=%s&format=json"%(self.user,str(apps),self.method, str(self.data))
url = "%s%s/"%(apiURLs.platformAPI,self.objectName)
full_url = url + '?' + query
#print full_url
try:
response = requests.get(full_url)
return response.json()
except ConnectionError as e: # This is the correct syntax
#print "error: %s" %e
return response
except Timeout as t: # This is the correct syntax
#print "Timeout error: %s" %t
return json.dumps({"error":t.message})
except:
return json.dumps([])
#http://localhost:1988/v.04/photo/?user=romanos.tsouroplis&apps=[{"cbs": "facebook", "app_name": "OPENi"}]&method=get_all_statuses_for_account&data={"account_id": "675350314"}
def getStatuses(self,account_id, cbs=None, tags=None):
self.objectName='photo'
self.cbs=cbs
#self.method='filter_tags_photos'
self.method="get_all_statuses_for_account"
#self.cbs='instagram'
apps=[]
app={}
app["cbs"]=self.cbs
app["app_name"]=self.app_name
apps.append(app)
tags=[]
tags.append(tags)
self.data={}
self.data["account_id"]=str(account_id) # not sure if needed to be sent as string or long
##if tags:
## self.data['tags']=tags
##example call: ### http://147.102.6.98t:1988/v.04/photo/?user=tsourom&apps=[{"cbs": "instagram", "app_name": "OPENi"}]&method=filter_tags_photos&data={"lat": "37.9667", "lng": "23.7167", "tags": ["athens"]}
query= "user=%s&apps=%s&method=%s&data=%s&format=json"%(self.username,str(apps),self.method, str(self.data))
url = "%s%s/"%(apiURLs.platformAPI,self.objectName)
full_url = url + '?' + query
try:
response = requests.get(full_url)
return response.json()
except ConnectionError as e: # This is the correct syntax
#print "error: %s" %e
return response
except Timeout as t: # This is the correct syntax
#print "Timeout error: %s" %t
return json.dumps({"error":t.message})
except:
return json.dumps([])
def getShops(self, cbs, user=None):
self.objectName='shop'
self.cbs=cbs
full_url= "%s%s/?api_key=special-key&format=json"%(apiURLs.swaggerAPI,self.objectName)
try:
response = requests.get(full_url)
#print response.text
return response.json()
except ConnectionError as e: # This is the correct syntax
#print "error: %s" %e
return response
except Timeout as t: # This is the correct syntax
#print "Timeout error: %s" %t
return json.dumps({"error":t.message})
except:
return json.dumps([])
def getOrders(self, cbs, user=None):
self.objectName='order'
self.cbs=cbs
full_url= "%s%s/?api_key=special-key&format=json"%(apiURLs.swaggerAPI,self.objectName)
try:
response = requests.get(full_url)
#print response.text
return response.json()
except ConnectionError as e: # This is the correct syntax
print "error: %s" %e
return response
except Timeout as t: # This is the correct syntax
print "Timeout error: %s" %t
return json.dumps({"error":t.message})
except:
return json.dumps([])
def getContext(self, objectID=None):
self.objectName='Context'
self.cbs='openi'
full_url= "%s"%(apiURLs.searchAPIPath)
try:
header={"Authorization":self.token}
response = requests.get(full_url,headers=header)
#print response.text
return response.json()
except ConnectionError as e: # This is the correct syntax
print "error: %s" %e
return response
except Timeout as t: # This is the correct syntax
print "Timeout error: %s" %t
return json.dumps({"error":t.message})
except requests.exceptions.RequestException as e:
if self.request.retries >= self.max_retries:
print "max retries: %s" %e
return json.dumps({"error":"connection error"})
raise self.retry(exc=e)
except:
return json.dumps([])
class CloudletCall(object):
def __init__(self,signature=None,user=None):
self.signature="Ko49aYdt2+1YHtaUSNbfAXfp6LYe2svOW7h5mA+WNLYZH+hFCykwQ1a+1Ig9i3DM5g1PBcsHdig3NIToyKANDQ=="
self.user="Alexis"
self.id=''
token={}
user_tmp={}
user_tmp['user']=self.user
#{ "token": { "user": "dmccarthy" }, "signature": "cVnf/YsH/h+554tlAAh5CvyLr3Y9xrqAK4zxTA/C8PMDWcjcUZistg90H2HiCL/tAL3VZe/53VbJcrFZGyFZDw==" }
token['token']=user_tmp
token['signature']=self.signature
full_url='%s%s'%(apiURLs.cloudletAPI,'cloudlets')
#print full_url
#print token
headerCall={}
headerCall["auth_token"]= token
#print headerCall
hdr={"auth_token": { "token": { "user": "dmccarthy" }, "signature": "cVnf/YsH/h+554tlAAh5CvyLr3Y9xrqAK4zxTA/C8PMDWcjcUZistg90H2HiCL/tAL3VZe/53VbJcrFZGyFZDw==" }}
#print(hdr)
try:
response = requests.get(full_url,headers=json.dumps(hdr),verify=False)
#print response.text
#print(response.json())
return response
except ConnectionError as e: # This is the correct syntax
print "error: %s" %e
response = None
return response
except Timeout as t: # This is the correct syntax
print "Timeout error: %s" %t
response = "No response"
return json.dumps({"error":t.message})
except:
return json.dumps([])
return None
class FoursquareCall(object):
def __init__(self, access_token=None):
self.version='20141016'
self.access_token= access_token
self.url='https://api.foursquare.com/v2/'
def getSelf(self):
full_url = "%susers/self?oauth_token=%s&v=%s"%(apiURLs.FoursquareURL, self.access_token,self.version)
#print(full_url)
try:
response = requests.get(full_url, verify=False)
#print response
return response.json()
except ConnectionError as e: # This is the correct syntax
print "error: %s" %e
return response.json()
except Timeout as t: # This is the correct syntax
print "Timeout error: %s" %t
return json.dumps({"error":t.message})
except:
return json.dumps([])
def getCheckins(self,USER_ID=None):
full_url = "%susers/%s/checkins?oauth_token=%s&v=%s&limit=250"%(apiURLs.FoursquareURL, USER_ID, self.access_token,self.version)
##print(full_url)
try:
response = requests.get(full_url, verify=False)
#print response
return response.json()
except ConnectionError as e: # This is the correct syntax
print "error: %s" %e
return response.json()
except Timeout as t: # This is the correct syntax
print "Timeout error: %s" %t
return json.dumps({"error":t.message})
except:
return json.dumps([])
def getPlacesAround(self, lat,lng,radius=3000):
full_url = "%svenues/search?oauth_token=%s&v=%s&ll=%s,%s&limit=50"%(apiURLs.FoursquareURL, self.access_token,self.version,lat,lng)
#print(full_url)
try:
response = requests.get(full_url, verify=False)
#print response
return response.json()
except ConnectionError as e: # This is the correct syntax
print "error: %s" %e
return response.json()
except Timeout as t: # This is the correct syntax
print "Timeout error: %s" %t
return json.dumps({"error":t.message})
except:
return json.dumps([])
class ProductDB(object):
def __init__(self, access_token=None):
self.access_token= access_token
self.productsIDs=["70000000","68000000","77000000","54000000",
#"53000000",
"83000000","47000000","67000000","66000000","65000000","58000000","78000000","50000000","63000000","51000000","72000000","75000000","73000000","81000000","88000000","61000000","64000000","10000000","79000000","85000000","71000000","62000000","84000000","80000000","82000000","86000000"]
def getRandomCategory (self):
categories=self.productsIDs
# with open('product-categories.json') as f:
# for line in f:
# data.append(json.loads(line))
return categories[randrange(len(categories))]
def getProducts(self, limit=3):
#full_url = "%s?limit=%s&category=%s"%(apiURLs.productsDBurl, limit, self.getRandomCategory())
full_url = "%s?limit=%s"%(apiURLs.productsDBurl, limit)
#print(full_url)
try:
response = requests.get(full_url, verify=False)
#print response
return response.json()
except ConnectionError as e: # This is the correct syntax
print "error: %s" %e
return response.json()
except Timeout as t: # This is the correct syntax
print "Timeout error: %s" %t
return json.dumps({"error":t.message})
except:
return json.dumps([])
class OPENiOAuth(object):
def __init__(self):
self.access_token= None
self.session= None
self.created= None
self.status_code=None
def getAccessToken(self):
return self.access_token
def getSessionToken(self):
return self.session
def getSession(self, username, password):
full_url = "%ssessions"%(apiURLs.demo2APIoAuth)
#print(full_url)
try:
#data={"username":username,"password":password, "scope":""}
data='{"username":"%s","password":"%s", "scope":""}'%(username,password)
response = requests.post(full_url, data, verify=False)
#print response
self.status_code=response.status_code
self.session=response.json()["session"]
#print self.session
except ConnectionError as e: # This is the correct syntax
print "error: %s" %e
self.access_token=None
except Timeout as t: # This is the correct syntax
print "Timeout error: %s" %t
self.access_token=None
except:
self.access_token=None
def authorize(self, username, password):
self.getSession(username,password)
full_url = "%sauthorizations"%(apiURLs.demo2APIoAuth)
#print(full_url)
try:
data={"session":self.session,"client_id":username}
response = requests.post(full_url, data, verify=False)
#print response
self.status_code=response.status_code
self.access_token=response.json()["token"]
return response.json()
except ConnectionError as e: # This is the correct syntax
print "error: %s" %e
return response.json()
except Timeout as t: # This is the correct syntax
print "Timeout error: %s" %t
return json.dumps({"error":t.message})
except:
return json.dumps([]) | {
"repo_name": "OPENi-ict/ntua_demo",
"path": "openiPrototype/appUI/queryHandlers.py",
"copies": "1",
"size": "20464",
"license": "apache-2.0",
"hash": 227421526402075100,
"line_mean": 40.8507157464,
"line_max": 312,
"alpha_frac": 0.5726153245,
"autogenerated": false,
"ratio": 3.7833240894804954,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9755072831273184,
"avg_score": 0.020173316541462354,
"num_lines": 489
} |
__author__ = 'alvertisjo'
from django.core.serializers import json
import requests
from requests.packages.urllib3 import Timeout
from requests.packages.urllib3.exceptions import ConnectionError
class OpenProductData(object):
def getData(self):
# rowStep=100
# currentPage=0
# #####documentation: http://pod.opendatasoft.com/api/doc/#doc-datasets-search
# full_url='http://pod.opendatasoft.com/api/records/1.0/search' #http://pod.opendatasoft.com/api/records/1.0/search?dataset=pod_gtin&rows=10&start=11&facet=gpc_s_nm&facet=brand_nm&facet=owner_nm&facet=gln_nm&facet=prefix_nm
# dataset='pod_gtin'
# #print(full_url)
# try:
# response = requests.get(full_url, verify=False)
# #print response
# return response.json()
# except ConnectionError as e: # This is the correct syntax
# print "error: %s" %e
# return response.json()
# except Timeout as t: # This is the correct syntax
# print "Timeout error: %s" %t
# return json.dumps({"error":t.message})
# except:
# return json.dumps([])
pass
def readDataFromFile(self):
pass
def storeToGraph(self,data):
# POST http://localhost:7474/db/data/transaction/commit
# Accept: application/json; charset=UTF-8
# Content-Type: application/json
url= 'http://snf-561492.vm.okeanos.grnet.gr:7474/'
# {
# "statements" : [ {
# "statement" : "CREATE (n) RETURN id(n)"
# } ]
# }
pass | {
"repo_name": "OPENi-ict/ntua_demo",
"path": "openiPrototype/appUI/importProductData.py",
"copies": "1",
"size": "1614",
"license": "apache-2.0",
"hash": 3468585145048355000,
"line_mean": 37.4523809524,
"line_max": 231,
"alpha_frac": 0.5978934325,
"autogenerated": false,
"ratio": 3.463519313304721,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4561412745804721,
"avg_score": null,
"num_lines": null
} |
__author__ = 'am6puk'
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from email import Encoders
import os
import ConfigParser
name = 'simple_backup.conf'
conf_path = '/etc/simple_backup/'
config = ConfigParser.ConfigParser()
config.read(conf_path+name)
SMTP_HOST = config.get('smtp', 'SMTP_HOST')
SMTP_PORT = config.get('smtp', 'SMTP_PORT')
SMTP_USER = config.get('smtp', 'SMTP_USER')
SMTP_PASS = config.get('smtp', 'SMTP_PASS')
TO = config.get('smtp', 'TO')
gmail_user = SMTP_USER
gmail_pwd = SMTP_PASS
def mail(to, subject, text, attach=None):
msg = MIMEMultipart()
msg['From'] = gmail_user
msg['To'] = to
msg['Subject'] = subject
msg.attach(MIMEText(text))
if attach:
part = MIMEBase('application', 'octet-stream')
part.set_payload(open(attach, 'rb').read())
Encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="%s"' % os.path.basename(attach))
msg.attach(part)
mailServer = smtplib.SMTP(SMTP_HOST, SMTP_PORT)
mailServer.ehlo()
mailServer.starttls()
mailServer.ehlo()
mailServer.login(gmail_user, gmail_pwd)
mailServer.sendmail(gmail_user, to, msg.as_string())
mailServer.close()
def send(subject, text):
mail(TO, subject, text)
| {
"repo_name": "Am6puk/Simple_Mysql_Backup",
"path": "src/simple_backup/modules/mail.py",
"copies": "1",
"size": "1338",
"license": "mit",
"hash": 4046335615415301000,
"line_mean": 28.0869565217,
"line_max": 100,
"alpha_frac": 0.6943198804,
"autogenerated": false,
"ratio": 3.170616113744076,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9283287423357884,
"avg_score": 0.01632971415723833,
"num_lines": 46
} |
__author__ = 'Am6puk'
#!/usr/bin/env python
"""
simple_backup setup file
"""
from distutils.core import setup
import glob
install_requires = [
'mysql-python>=1.2.3',
'argparse',
'ConfigParser'
]
setup(
name='simple_backup',
version='0.1.4',
description='Simple Mysql Backup',
author='Andrey Rozhkov',
author_email='am6puk@gmail.com',
url='http://unixhelp.org/',
long_description=open('README.md').read(),
license='MIT License',
zip_safe=False,
platforms='any',
install_requires=install_requires,
package_dir={'': 'src'},
packages=['simple_backup', 'simple_backup.modules', 'simple_backup.app'],
scripts=glob.glob('src/simple-backup'),
classifiers=[
'License :: OSI Approved :: MIT License',
'Environment :: Console',
'Intended Audience :: System Administrators',
'Intended Audience :: Developers',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Topic :: Database',
'Topic :: Utilities'
],
data_files=[
('/etc/simple_backup', glob.glob('./conf/*.conf')),
],
keywords = 'mysql dump hotcopy',
)
| {
"repo_name": "Am6puk/Simple_Mysql_Backup",
"path": "setup.py",
"copies": "1",
"size": "1310",
"license": "mit",
"hash": 3454326659753379000,
"line_mean": 26.2916666667,
"line_max": 79,
"alpha_frac": 0.5664122137,
"autogenerated": false,
"ratio": 3.9696969696969697,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.503610918339697,
"avg_score": null,
"num_lines": null
} |
__author__ = 'amancevice'
import pline
from nose.tools import assert_equal, assert_dict_equal, assert_true, assert_in
def test_activity_shape():
my_activity = pline.activities.ShellCommandActivity(name='MyActivity', id='Activity_adbc1234')
my_activity.command = "echo $1 $2"
my_activity.scriptArgument = ['hello', 'world']
returned = dict(my_activity)
expected = {
'id' : 'Activity_adbc1234',
'name' : 'MyActivity',
'fields' : sorted([
{ 'key': 'command', 'stringValue': 'echo $1 $2' },
{ 'key': 'type', 'stringValue': 'ShellCommandActivity' },
{ 'key': 'scriptArgument', 'stringValue': 'hello' },
{ 'key': 'scriptArgument', 'stringValue': 'world' }])}
yield assert_equal, returned.keys(), expected.keys()
for key in ['id', 'name']:
yield assert_equal, returned[key], expected[key]
yield assert_equal, len(returned['fields']), len(expected['fields'])
for item in returned['fields']:
yield assert_in, item, expected['fields']
def test_initattr():
node = pline.data_nodes.S3DataNode(
id='MyDataNode1', name='MyDataNode1', workerGroup='TestGroup')
returned = node.workerGroup
expected = 'TestGroup'
assert_equal(returned, expected)
def test_setattr():
node = pline.data_nodes.S3DataNode(
id='MyDataNode1', name='MyDataNode1', workerGroup='TestGroup')
node.directoryPath = 's3://bucket/pipeline/'
returned = node.directoryPath
expected = 's3://bucket/pipeline/'
assert_equal(returned, expected)
def test_node_shape():
node = pline.data_nodes.S3DataNode(
id='MyDataNode1', name='MyDataNode1', workerGroup='TestGroup')
node.directoryPath = 's3://bucket/pipeline/'
returned = dict(node)
expected = {
'id' : 'MyDataNode1',
'name' : 'MyDataNode1',
'fields' : [
{ 'stringValue' : 'TestGroup', 'key' : 'workerGroup' },
{ 'stringValue' : 's3://bucket/pipeline/', 'key' : 'directoryPath' },
{ 'stringValue' : 'S3DataNode', 'key' : 'type' }]}
yield assert_equal, returned.keys(), expected.keys()
for key in ['id', 'name']:
yield assert_equal, returned[key], expected[key]
yield assert_equal, len(returned['fields']), len(expected['fields'])
for item in returned['fields']:
yield assert_in, item, expected['fields']
def test_param_shape():
my_param = pline.parameters.String(
id = 'MyParam1',
value = 'Here is the value I am using',
description = 'This value is extremely important',
watermark = 'Choose a value between 0 and 99.')
returned = dict(my_param)
expected = {
'id' : 'MyParam1',
'stringValue' : 'Here is the value I am using',
'attributes' : [
{ 'key': 'type', 'stringValue': 'String' },
{ 'key': 'description', 'stringValue': 'This value is extremely important' },
{ 'key': 'watermark', 'stringValue': 'Choose a value between 0 and 99.' }]}
yield assert_equal, returned.keys(), expected.keys()
for key in ['id', 'stringValue']:
yield assert_equal, returned[key], expected[key]
yield assert_equal, len(returned['attributes']), len(expected['attributes'])
for item in returned['attributes']:
yield assert_in, item, expected['attributes']
class MyCustomS3DataNode(pline.data_nodes.S3DataNode):
TYPE_NAME = 'S3DataNode'
def test_custom_class_type():
node = MyCustomS3DataNode(id='Foo', name='Bar')
assert_equal(node.type, 'S3DataNode')
def test_pipeline_assembly():
pipeline = pline.Pipeline(
name = 'MyPipeline',
unique_id = 'MyPipeline1',
desc = 'An example pipeline description',
region = 'us-west-2' )
schedule = pline.Schedule(
id = 'Schedule1',
name = 'Schedule',
period = '1 day',
startAt = pline.keywords.startAt.FIRST_ACTIVATION_DATE_TIME,
occurrences = 1 )
definition = pipeline.definition( schedule,
pipelineLogUri = "s3://bucket/pipeline/log" )
resource = pline.resources.Ec2Resource(
id = 'Resource1',
name = 'Resource',
role = 'DataPipelineDefaultRole',
resourceRole = 'DataPipelineDefaultResourceRole',
schedule = schedule )
activity = pline.activities.ShellCommandActivity(
id = 'MyActivity1',
name = 'MyActivity',
runsOn = resource,
schedule = schedule,
command = 'echo hello world' )
param = pline.parameters.String(
id = 'myShellCmd',
value = 'grep -rc "GET" ${INPUT1_STAGING_DIR}/* > ${OUTPUT1_STAGING_DIR}/output.txt',
description = 'Shell command to run' )
param_activity = pline.activities.ShellCommandActivity(
id = 'MyParamActivity1',
name = 'MyParamActivity1',
runsOn = resource,
schedule = schedule,
command = param )
pipeline.add(schedule, definition, resource, activity, param_activity)
pipeline.add_param(param)
returned = pipeline.payload()
expected = {
'pipelineId' : None,
'parameterValues' : [{
'stringValue': 'grep -rc "GET" ${INPUT1_STAGING_DIR}/* > ${OUTPUT1_STAGING_DIR}/output.txt', 'id': 'myShellCmd'}],
'parameterObjects' : [{
'attributes': [
{'stringValue': 'String', 'key': 'type'},
{'stringValue': 'Shell command to run', 'key': 'description'}],
'id': 'myShellCmd'}],
'pipelineObjects' : [{
'fields': [
{'stringValue': 'DataPipelineDefaultResourceRole', 'key': 'resourceRole'},
{'stringValue': 'DataPipelineDefaultRole', 'key': 'role'},
{'stringValue': 'Ec2Resource', 'key': 'type'},
{'refValue': 'Schedule1', 'key': 'schedule'}],
'id': 'Resource1',
'name': 'Resource'}, {
'fields': [
{'stringValue': '#{myShellCmd}', 'key': 'command'},
{'refValue': 'Schedule1', 'key': 'schedule'},
{'stringValue': 'ShellCommandActivity', 'key': 'type'},
{'refValue': 'Resource1', 'key': 'runsOn'}],
'id': 'MyParamActivity1',
'name': 'MyParamActivity1'}, {
'fields': [
{'stringValue': 'echo hello world', 'key': 'command'},
{'refValue': 'Schedule1', 'key': 'schedule'},
{'stringValue': 'ShellCommandActivity', 'key': 'type'},
{'refValue': 'Resource1', 'key': 'runsOn'}],
'id': 'MyActivity1',
'name': 'MyActivity'}, {
'fields': [
{'stringValue': 'FIRST_ACTIVATION_DATE_TIME', 'key': 'startAt'},
{'stringValue': 'Schedule', 'key': 'type'},
{'stringValue': '1 day', 'key': 'period'},
{'stringValue': '1', 'key': 'occurrences'}],
'id': 'Schedule1', 'name': 'Schedule'}, {
'fields': [
{'stringValue': 's3://bucket/pipeline/log', 'key': 'pipelineLogUri'},
{'refValue': 'Schedule1', 'key': 'schedule'},
{'stringValue': 'DataPipelineDefaultResourceRole', 'key': 'resourceRole'},
{'stringValue': 'CASCADE', 'key': 'failureAndRerunMode'},
{'stringValue': 'DataPipelineDefaultRole', 'key': 'role'},
{'stringValue': 'cron', 'key': 'scheduleType'}],
'id': 'Default', 'name': 'Default'}]}
yield assert_equal, len(returned['pipelineObjects']), len(expected['pipelineObjects'])
yield assert_equal, len(returned['parameterObjects']), len(expected['parameterObjects'])
yield assert_equal, len(returned['parameterValues']), len(expected['parameterValues'])
for val in returned['parameterValues']:
yield assert_in, val, expected['parameterValues']
for val in returned['parameterObjects']:
for exp in [x for x in expected['parameterObjects'] if x['id'] == val['id']]:
for attr in val['attributes']:
yield assert_in, attr, exp['attributes']
for val in returned['pipelineObjects']:
for exp in [x for x in expected['pipelineObjects'] if x['id'] == val['id']]:
for attr in val['fields']:
yield assert_in, attr, exp['fields']
| {
"repo_name": "amancevice/pline",
"path": "tests/__init__.py",
"copies": "1",
"size": "8552",
"license": "mit",
"hash": 7757091248981622000,
"line_mean": 41.1280788177,
"line_max": 126,
"alpha_frac": 0.5647801684,
"autogenerated": false,
"ratio": 3.8714350384789498,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.493621520687895,
"avg_score": null,
"num_lines": null
} |
__author__ = 'amandeep'
import re
from datetime import datetime, timedelta
from time import mktime, gmtime
import calendar
class DM(object):
def __init__(self):
self.name = "Date Manipulation"
@staticmethod
def iso8601date(date, date_format=None):
"""Convert a date to ISO8601 date format
input format: YYYY-MM-DD HH:MM:SS GMT (works less reliably for other TZs)
or YYYY-MM-DD HH:MM:SS.0
or YYYY-MM-DD
or epoch (13 digit, indicating ms)
or epoch (10 digit, indicating sec)
output format: iso8601"""
date = date.strip()
if date_format:
try:
if date_format.find('%Z') != -1:
date_format = date_format.replace('%Z', '')
match_object = re.search('(([-+])(\d{2})(\d{2}))', date)
tz = match_object.groups()
dt = datetime.strptime(date.replace(tz[0], ''), date_format)
delta = timedelta(hours=int(tz[2]), minutes=int(tz[3]))
if tz[1] == '-': delta = delta * -1
dt = dt + delta
return dt.isoformat()
return datetime.strptime(date, date_format).isoformat()
except Exception:
pass
try:
return datetime.strptime(date, "%Y-%m-%d %H:%M:%S %Z").isoformat()
except Exception:
pass
try:
return datetime.strptime(date, "%A, %b %d, %Y").isoformat()
except Exception:
pass
try:
return datetime.strptime(date, "%Y-%m-%d %H:%M:%S.0").isoformat()
except:
pass
try:
return datetime.strptime(date, "%Y-%m-%d").isoformat()
except:
pass
try:
return datetime.strptime(date, "%b %d, %Y").isoformat()
except:
pass
try:
return datetime.strptime(date, "%B %d, %Y").isoformat()
except:
pass
try:
return datetime.strptime(date, "%B %d, %Y %I:%M %p").isoformat()
except:
pass
try:
date = int(date)
if 1000000000000 < date < 9999999999999:
# 13 digit epoch
return datetime.fromtimestamp(mktime(gmtime(date / 1000))).isoformat()
except:
pass
try:
date = int(date)
if 1000000000 < date < 9999999999:
# 10 digit epoch
return datetime.fromtimestamp(mktime(gmtime(date))).isoformat()
except:
pass
# If all else fails, return input
return ''
@staticmethod
def translate_date(string, in_format, out_format):
"""Convert a date to ISO8601 date format without time"""
try:
return datetime.strptime(string.strip(), in_format).date().strftime(out_format)
except Exception:
pass
return ''
@staticmethod
def conver_time_to_epoch(date, format=None):
date = date.strip()
if format:
try:
calendar.timegm(datetime.strptime(date, format).timetuple())
except:
pass
try:
return calendar.timegm(datetime.strptime(date, "%Y-%m-%dT%H:%M:%S").timetuple())
except:
pass
return ''
@staticmethod
def epoch_to_iso8601(timestamp):
ts=float(timestamp)
if len(timestamp) == 13:
ts = ts/1000
elif len(timestamp) == 16:
ts = ts/1000000
return datetime.fromtimestamp(ts).isoformat()
@staticmethod
def get_year_from_iso_date(iso_date):
if iso_date:
return iso_date[0:4]
return ''
@staticmethod
def get_current_time():
return datetime.today().strftime("%Y-%m-%d %H:%M:%S")
| {
"repo_name": "darkshadows123/dig-alignment",
"path": "versions/3.0/karma/python/date_manipulation.py",
"copies": "2",
"size": "3984",
"license": "apache-2.0",
"hash": -2942264588346968600,
"line_mean": 28.2941176471,
"line_max": 92,
"alpha_frac": 0.5022590361,
"autogenerated": false,
"ratio": 4.293103448275862,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.005403101539640183,
"num_lines": 136
} |
__author__ = 'amandeep'
import json
from flask import request
from flask import Response
from flask import make_response
from functools import wraps
from flask import Flask
from elasticsearch_manager import ElasticSearchManager
from dig_bulk_folders import BulkFolders
import ConfigParser
application = Flask(__name__)
phone_field = 'hasFeatureCollection.phonenumber_feature.phonenumber'
basic_username = None
basic_password = None
def init():
global basic_username
global basic_password
if basic_username is None and basic_password is None:
configuration = ConfigParser.RawConfigParser()
configuration.read('config.properties')
basic_username = configuration.get('BasicAuth', 'username')
basic_password = configuration.get('BasicAuth', 'password')
def check_auth(username, password):
return username is not None and password is not None
def authenticate():
"""Sends a 401 response that enables basic auth"""
return Response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'})
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if not auth or not check_auth(auth.username, auth.password):
return authenticate()
return f(*args, **kwargs)
return decorated
def requires_basic_auth(f):
@wraps(f)
def decorated_basic(*args, **kwargs):
auth = request.authorization
if not auth or not check_basic_auth(auth.username, auth.password):
return authenticate()
return f(*args, **kwargs)
return decorated_basic
def check_basic_auth(username, password):
init()
return username == basic_username and password == basic_password
@application.route('/', methods=['GET'])
def instructions():
return 'Read api details at - https://github.com/usc-isi-i2/dig-export-csv'
@application.route('/api/ads', methods=['GET'])
@requires_basic_auth
def get_ads():
es = ElasticSearchManager()
bf = BulkFolders()
ad_id = request.args.get('uri')
postids = request.args.get('post_ids')
phone = request.args.get('phone')
size = request.args.get('size')
headings = request.args.get('heading')
store = request.args.get('store')
"""first line columns names if headings = 1"""
if headings is None:
headings = '0'
if size is None:
size = "20"
if headings == "1":
result = "\t".join(bf.ht_headings) + '\n'
else:
result = ''
if store is None:
store = "1"
try:
if ad_id is not None:
ids = [ad_id]
result += process_results(bf, es.search_es(ElasticSearchManager.create_ids_query(ids), None))
if store == "1":
response = make_response(result)
response.headers["Content-Disposition"] = "attachment; filename=data.tsv"
return response
else:
return Response(result, 200)
except Exception as e:
return Response(str(e), 500)
try:
if postids is not None:
post_ids = postids.split(',')
for post_id in post_ids:
res = es.search_es(ElasticSearchManager.create_postid_query(post_id), int(size))
hits = res['hits']['hits']
for hit in hits:
ad = hit['_source']
if post_id in ad['url']:
tab_separated = "\t".join(bf.ht_to_array(ad))
result = result + tab_separated + '\n'
if store == "1":
response = make_response(result)
response.headers["Content-Disposition"] = "attachment; filename=data.tsv"
return response
else:
return Response(result, 200)
except Exception as e:
return Response(str(e), 500)
try:
if phone is not None:
phones = [phone]
result += process_results(bf, es.search_es(
ElasticSearchManager.create_terms_query(phone_field, phones), int(size)))
if store == "1":
response = make_response(result)
response.headers["Content-Disposition"] = "attachment; filename=data.tsv"
return response
else:
return Response(result, 200)
except Exception as e:
return Response(str(e), 500)
@application.route('/api/ads/bulk-query', methods=['POST'])
@requires_basic_auth
def process_csv():
try:
json_data = json.loads(str(request.get_data()))
esm = ElasticSearchManager()
es_request = convert_csv_to_esrequest(json_data['csv'])
size = request.args.get('size')
headings = request.args.get('heading')
store = request.args.get('store')
if size is None:
size = '20'
if headings is None:
headings = '0'
if store is None:
store = '1'
bf = BulkFolders()
if headings == "1":
result = "\t".join(bf.ht_headings) + '\n'
else:
result = ''
# print result
if 'ids' in es_request:
result += process_results(bf, esm.search_es(ElasticSearchManager.create_ids_query
(es_request['ids']), len(es_request['ids'])))
if 'phone' in es_request:
result += process_results(bf,
esm.search_es(ElasticSearchManager.create_terms_query(phone_field,
es_request['phone']), int(size)))
if store == "1":
response = make_response(result)
response.headers["Content-Disposition"] = "attachment; filename=data.tsv"
return response
else:
return Response(result, 200)
except Exception as e:
return Response(str(e), 500)
"""folder_name = _all for all folders"""
@application.route('/api/users/<user>/folders/<folder_name>/ads', methods=['GET'])
@requires_auth
def get_user_folders(user, folder_name):
bf = BulkFolders()
password = request.authorization.password
# print folder_name
headings = request.args.get('heading')
store = request.args.get('store')
if store is None:
store = '1'
if headings is None:
headings = '0'
try:
if store == "1":
response = make_response(bf.construct_tsv_response(
bf.dereference_uris(bf.construct_uri_to_folder_map(bf.get_folders(user, password), folder_name)), headings))
response.headers["Content-Disposition"] = "attachment; filename=data.tsv"
return response
else:
return Response(bf.construct_tsv_response(
bf.dereference_uris(bf.construct_uri_to_folder_map(bf.get_folders(user, password))), headings), 200)
except Exception as e:
return Response(str(e), 500)
def process_results(bf, res):
hits = res['hits']['hits']
result = ''
for hit in hits:
ad = hit['_source']
tab_separated = "\t".join(bf.ht_to_array(ad))
result = result + tab_separated + '\n'
return result[:result.rfind('\n')]
def convert_csv_to_esrequest(lines):
es_request = {}
ids = []
phonenumbers = []
# line is of format - uri,phonenumber etc
for line in lines:
data = line.split(',')
if len(data) > 1 and data[0].strip() != '':
ids.append(data[0])
if len(data) > 1 and data[1].strip() != '':
phonenumbers.append((data[1]))
if len(ids) > 0:
es_request["ids"] = ids
if len(phonenumbers) > 0:
es_request["phone"] = phonenumbers
return es_request
if __name__ == "__main__":
application.run()
| {
"repo_name": "usc-isi-i2/dig-export-csv",
"path": "application.py",
"copies": "1",
"size": "7963",
"license": "apache-2.0",
"hash": 8918503335890991000,
"line_mean": 29.5095785441,
"line_max": 124,
"alpha_frac": 0.5800577672,
"autogenerated": false,
"ratio": 4.0298582995951415,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5109916066795142,
"avg_score": null,
"num_lines": null
} |
__author__ = 'amandeep'
import re
import hashlib
from urlparse import urlparse
DOLLAR_PRICE_REGEXPS = [re.compile(r'''\$\s*(?:\d{1,3},\s?)*\d{1,3}(?:(?:\.\d+)|[KkMm])?''', re.IGNORECASE),
re.compile(r'''USD\s*\d{1,7}(?:\.\d+)?''', re.IGNORECASE),
re.compile(r'''\d{1,7}(?:\.\d+)?\s*USD''', re.IGNORECASE)
]
BITCOIN_PRICE_REGEXPS = [re.compile(r'''(?:BTC|XBT|XBC)\s*\d{1,7}(?:\.\d+)?''', re.IGNORECASE),
re.compile(r'''\d{1,7}(?:\.\d+)?\s*(?:BTC|XBT|XBC)''', re.IGNORECASE)
]
class SM(object):
def __init__(self):
self.name = "String Manipulation"
@staticmethod
def non_whitespace(x):
"""Return the string removing all spaces."""
y = re.sub(r'\s+', '', x.strip())
return y
@staticmethod
def non_ascii_chars(x):
"""Return a set of the non-ascii chars in x"""
return set(re.sub('[\x00-\x7f]', '', x))
@staticmethod
def non_ascii_chars_as_string(x):
"""Return a string containing a comma-separated list of non-ascii chars in x"""
y = list(SM.non_ascii_chars(x))
y.sort()
return ', '.join(y)
@staticmethod
def ascii_chars(x, replacement_string=' '):
"""Remove non-ascii chars in x replacing consecutive ones with a single space"""
return re.sub(r'[^\x00-\x7F]+', replacement_string, x)
@staticmethod
def alpha_numeric(x, replacement_string=' '):
"""Replace consecutive non-alphanumeric bya replacement_string"""
return re.sub('[^A-Za-z0-9]+', replacement_string, x)
@staticmethod
def numeric_only(x):
"""Remove non-numeric chars from the string x"""
return re.sub('[^0-9]+', '', x)
@staticmethod
def alpha_only(x):
"""Remove non-alphabetic chars from the string x"""
return re.sub('[^A-Za-z]+', '', x)
@staticmethod
def remove_alpha(x):
"""Remove alphabetic chars from the string x"""
return re.sub('[A-Za-z]+', '', x)
@staticmethod
def alpha_only_preserve_space(x):
x = re.sub('[^A-Za-z\s]+', '', x)
y = re.sub(r'\s+', ' ', x.strip())
return y
@staticmethod
def is_symbol(char1):
if char1.isalnum():
return False
return True
@staticmethod
def fingerprint_string(x):
"""Make a fingerprint like the one google refine makes"""
x = SM.alpha_numeric(SM.ascii_chars(x)).lower()
y = list(set(x.split()))
y.sort()
return '_'.join(y)
@staticmethod
def md5_hash(x):
"""Return md5 hash of x"""
return hashlib.md5(x).hexdigest()
@staticmethod
def sha1_hash(text):
"""return upper cased sha1 hash of the string"""
if text:
return hashlib.sha1(text.encode('utf-8')).hexdigest().upper()
return ''
@staticmethod
def get_string(string, start, end):
if len(string) < start:
return ''
if end > len(string):
return string[start:]
return string[start:end+1]
@staticmethod
def clean_age(x):
"""Return the clean age"""
stripped = x.strip().lower()
"""take only first value of any range"""
stripped = stripped.split('-')[0].strip()
try:
age = int(stripped)
if age < 1 or age > 99:
return None
except:
return None
return age
@staticmethod
def clean_email(x):
"""Return a clean email address"""
if len(x) > 0 and x.find("@") != -1:
em = x.strip().lower()
em = SM.non_whitespace(em)
return em
return ''
@staticmethod
def convert_to_float_string(number):
"""return the number as a float string, eg: scientific notation numbers"""
try:
return '{0:.15f}'.format(float(number))
except:
return ''
@staticmethod
def to_title_case_if_upper(x):
"""Return the string in title case if it is all upper, otherwise leave capitalization alone."""
x = x.strip()
if x.isupper():
return x.title()
else:
return x
@staticmethod
def to_title_case_cleaned(x):
"""Return the string in title case cleaning spaces."""
y = re.sub(r'\s+', ' ', x.strip())
return y.title()
@staticmethod
def get_website_domain(url):
"""input www.google.com, output google.com"""
parsed_uri = urlparse(url)
if parsed_uri:
domain = parsed_uri.netloc
if domain:
if domain.startswith("www."):
domain = domain[4:]
return domain
return ''
@staticmethod
def get_website_domain_only(url):
"""input www.google.com, output google"""
parsed_uri = urlparse(url)
if parsed_uri:
domain = parsed_uri.netloc
if domain:
if domain.startswith("www."):
domain = domain[4:]
idx = domain.find('.')
if idx != -1:
domain2 = domain[idx+1:]
if domain2.find('.') != -1:
domain = domain2
return domain
return ''
@staticmethod
def get_dollar_prices(*texts):
matches = []
for t in texts:
for r in DOLLAR_PRICE_REGEXPS:
for m in r.findall(t):
matches.append(m.replace('$ ', '$').replace(',', '').replace('$', '').replace('K', "000")
.replace('k', "000").replace("M", "000").replace('m', "000"))
return "|".join(matches)
@staticmethod
def get_bitcoin_prices(*texts):
matches = []
for t in texts:
for r in BITCOIN_PRICE_REGEXPS:
for m in r.findall(t):
matches.append(m.replace('BTC', '').replace('XBT', '').replace('XBC', '').replace(' ', ''))
return "|".join(matches)
@staticmethod
def clean_name(x):
x = SM.toTitleCaseCleaned(x)
if SM.isSymbol(x[0:1]):
return ''
return x
@staticmethod
def toTitleCaseCleaned(x):
"""Return the string in title case cleaning spaces."""
y = re.sub(r'\s+', ' ', x.strip())
return y.title()
@staticmethod
def isSymbol(char1):
if char1.isalnum():
return False
return True
@staticmethod
def clean_ethnicity(x):
stripped = x.strip().lower().replace(" ","")
return stripped
@staticmethod
def clean_height(x):
stripped = x.strip().lower()
# take only first measurement of any range
stripped = stripped.split('-')[0].strip()
try:
# First, 5'6" or 6' or 6'7
dimensions = stripped.split("'")
if len(dimensions) >= 2:
feet = int(dimensions[0])
try:
inches = int(dimensions[1].strip('"'))
except:
# empty inches
inches = 0
# return nearest5(int(2.54 * (12 * feet) + inches))
# no binning
return int(2.54 * (12 * feet) + inches)
else:
# no inches, so try centimeters
# Second, 137
# return nearest5(int(stripped))
# no binning
return int(stripped)
except:
return None
return None
@staticmethod
def base_clean_rate(x):
clean = x.strip().lower()
if clean[0] == "0":
return None
rate = int(float(clean))
if rate < 20 or rate > 1000:
return None
return rate
@staticmethod
def clean_rate60(x):
rate = SM.base_clean_rate(x)
if rate != None:
return "%s-per-60min" % rate
return ''
@staticmethod
def clean_rate15(x):
rate = SM.base_clean_rate(x)
if rate != None:
return "%s-per-15min" % rate
return ''
@staticmethod
def clean_rate30(x):
rate = SM.base_clean_rate(x)
if rate != None:
return "%s-per-30min" % rate
return ''
@staticmethod
def rate_price(cleaned):
if cleaned:
idx = cleaned.find("-")
if idx != -1:
return int(cleaned[0:idx])
return ''
@staticmethod
def rate_duration(cleaned):
if cleaned:
idx = cleaned.find("per-")
if idx != -1:
str = cleaned[idx+4:]
dur = str[0: len(str)-3]
return dur
return ''
@staticmethod
def rate_unit(cleaned):
if cleaned:
idx = cleaned.find("min")
if idx != -1:
return "MIN"
idx = cleaned.find("sec")
if idx != -1:
return "SEC"
idx = cleaned.find("hr")
if idx != -1:
return "HUR"
return ''
@staticmethod
def clean_weight(x):
"""In kg.unmarked weight < 90 is interpreted as kg, >=90 as lb"""
x = str(x).strip().lower()
def lb_to_kg(lb):
return int(float(lb)/2.2)
def sanityCheck(kg):
if kg >= 40 and kg <= 200:
return kg
else:
return None
try:
cleaned = x
# # first try for st/stone
l = re.split("stone", cleaned)
if len(l) == 1:
l = re.split("st", cleaned)
if len(l) > 1:
stone = float(l[0])
lb = l[1]
lb = lb.strip('s')
lb = lb.strip('lb')
lb = lb.strip('pound')
try:
lb = float(lb)
except ValueError, e:
lb = 0
# return sanityCheck(nearest2(lb_to_kg(int(stone*14+lb))))
# no binning
return sanityCheck(lb_to_kg(int(stone*14+lb)))
lb = cleaned.strip('s')
# now try for just pounds
if lb.endswith("lb"):
# return sanityCheck(nearest2(lb_to_kg(int(float(lb.strip('lb'))))))
# no binning
return sanityCheck(lb_to_kg(int(float(lb.strip('lb')))))
if lb.endswith('pound'):
# return sanityCheck(nearest2(lb_to_kg(int(float(lb.strip('pound'))))))
# no binning
return sanityCheck(lb_to_kg(int(float(lb.strip('pound')))))
# now kg
kg = cleaned.strip('s')
if kg.endswith("kg"):
# return sanityCheck(nearest2(int(float(kg.strip('kg')))))
# no binning
return sanityCheck(int(float(kg.strip('kg'))))
if kg.endswith("kilo"):
# return sanityCheck(nearest2(int(float(kg.strip('kilo')))))
# no binning
return sanityCheck(int(float(kg.strip('kilo'))))
if kg.endswith('kilogram'):
# return sanityCheck(nearest2(int(float(kg.strip('kilogram')))))
# no binning
return sanityCheck(int(float(kg.strip('kilogram'))))
# now assume number sans unit
num = int(float(cleaned))
if num < 90:
# assume kg
# return sanityCheck(nearest2(num))
# no binning
return sanityCheck(num)
else:
# assume lb
# return sanityCheck(nearest2(lb_to_kg(num)))
# no binning
return sanityCheck(lb_to_kg(num))
except Exception, e:
return None
| {
"repo_name": "usc-isi-i2/dig-alignment",
"path": "versions/3.0/karma/python/string_manipulation.py",
"copies": "2",
"size": "11969",
"license": "apache-2.0",
"hash": -275451079328352540,
"line_mean": 29.7686375321,
"line_max": 111,
"alpha_frac": 0.48441808,
"autogenerated": false,
"ratio": 4.001671681711802,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5486089761711801,
"avg_score": null,
"num_lines": null
} |
__author__ = 'amandeep'
import re
country_codes_2 = [
"AF", "AL", "DZ", "AS", "AD", "AO", "AI", "AQ", "AG", "AR", "AM", "AW", "AU", "AT", "AZ", "BS", "BH", "BD", "BB",
"BY", "BE", "BZ", "BJ", "BM", "BT", "BO", "BA", "BA", "BA", "BW", "BV", "BV", "BR", "IO", "BN", "BG", "BF", "BI",
"KH", "CM", "CA", "CV", "KY", "CF", "TD", "CL", "CN", "CX", "CC", "CO", "KM", "CG", "CD", "CK", "CR", "CI", "HR",
"HR", "CU", "CY", "CZ", "DK", "DJ", "DM", "DO", "TP", "EC", "EG", "SV", "GQ", "ER", "EE", "ET", "FK", "FK", "FO",
"FJ", "FI", "FR", "FX", "GF", "PF", "TF", "GA", "GM", "GE", "DE", "GH", "GI", "GR", "GL", "GD", "GP", "GU", "GT",
"GN", "GW", "GY", "HT", "HM", "VA", "VA", "HN", "HK", "HU", "IS", "IN", "ID", "IR", "IQ", "IE", "IL", "IT", "JM",
"JP", "JO", "KZ", "KE", "KI", "KP", "KP", "KR", "KR", "KR", "KW", "KG", "LA", "LV", "LB", "LS", "LR", "LY", "LI",
"LT", "LU",
"MO", "MK", "MG", "MW", "MY", "MV", "ML", "MT", "MH", "MQ", "MR", "MU", "YT", "MX", "FM", "FM", "MD", "MD", "MC",
"MN", "ME", "MS", "MA", "MZ", "MM", "MM", "NA", "NR", "NP", "NL", "AN", "NC", "NZ", "NI", "NE", "NG", "NU", "NF",
"MP", "NO", "OM", "PK", "PW", "PA", "PG", "PY", "PE", "PH", "PN", "PL", "PT", "PR", "QA", "RE", "RO", "RU", "RW",
"KN", "LC", "VC", "WS", "SM", "ST", "SA", "SN", "RS", "SC", "SL", "SG", "SK", "SK", "SI", "SB", "SO", "ZA", "SS",
"GS", "ES", "LK", "SH", "PM", "SD", "SR", "SJ", "SZ", "SE", "CH", "SY", "TW", "TJ", "TZ", "TZ", "TH", "TG", "TK",
"TO", "TT", "TN", "TR", "TM", "TC", "TV", "UG", "UA", "AE", "GB", "US", "UM", "UY", "UZ", "VU", "VE", "VN", "VG",
"VI", "WF", "EH", "YE", "ZM", "ZW"
]
country_codes_3 = [
"AFG", "ALB", "DZA", "ASM", "AND", "AGO", "AIA", "ATA", "ATG", "ARG", "ARM", "ABW", "AUS", "AUT", "AZE", "BHS",
"BHR", "BGD", "BRB", "BLR", "BEL", "BLZ", "BEN", "BMU", "BTN", "BOL", "BIH", "BIH", "BIH", "BWA", "BVT", "BVT",
"BRA", "IOT", "BRN", "BGR", "BFA", "BDI", "KHM", "CMR", "CAN", "CPV", "CYM", "CAF", "TCD", "CHL", "CHN", "CXR",
"CCK", "COL", "COM", "COG", "COD", "COK", "CRI", "CIV", "HRV", "HRV", "CUB", "CYP", "CZE", "DNK", "DJI", "DMA",
"DOM", "TMP", "ECU", "EGY", "SLV", "GNQ", "ERI", "EST", "ETH", "FLK", "FLK", "FRO", "FJI", "FIN", "FRA", "FXX",
"GUF", "PYF", "ATF", "GAB", "GMB", "GEO", "DEU", "GHA", "GIB", "GRC", "GRL", "GRD", "GLP", "GUM", "GTM", "GIN",
"GNB", "GUY", "HTI", "HMD", "VAT", "VAT", "HND", "HKG", "HUN", "ISL", "IND", "IDN", "IRN", "IRQ", "IRL", "ISR",
"ITA", "JAM", "JPN", "JOR", "KAZ", "KEN", "KIR", "PRK", "PRK", "KOR", "KOR", "KOR", "KWT", "KGZ", "LAO", "LVA",
"LBN", "LSO",
"LBR", "LBY", "LIE", "LTU", "LUX", "MAC", "MKD", "MDG", "MWI", "MYS", "MDV", "MLI", "MLT", "MHL", "MTQ", "MRT",
"MUS", "MYT", "MEX", "FSM", "FSM", "MDA", "MDA", "MCO", "MNG", "MNE", "MSR", "MAR", "MOZ", "MMR", "MMR", "NAM",
"NRU", "NPL", "NLD", "ANT", "NCL", "NZL", "NIC", "NER", "NGA", "NIU", "NFK", "MNP", "NOR", "OMN", "PAK", "PLW",
"PAN", "PNG", "PRY", "PER", "PHL", "PCN", "POL", "PRT", "PRI", "QAT", "REU", "ROM", "RUS", "RWA", "KNA", "LCA",
"VCT", "WSM", "SMR", "STP", "SAU", "SEN", "SRB", "SYC", "SLE", "SGP", "SVK", "SVK", "SVN", "SLB", "SOM", "ZAF",
"SSD", "SGS", "ESP", "LKA", "SHN", "SPM", "SDN", "SUR", "SJM", "SWZ", "SWE", "CHE", "SYR", "TWN", "TJK", "TZA",
"TZA", "THA", "TGO", "TKL", "TON", "TTO", "TUN", "TUR", "TKM", "TCA", "TUV", "UGA", "UKR", "ARE", "GBR", "USA",
"UMI", "URY", "UZB", "VUT", "VEN", "VNM", "VGB", "VIR", "WLF", "ESH", "YEM", "ZMB", "ZWE"
]
country_names = [
"AFGHANISTAN", "ALBANIA", "ALGERIA", "AMERICANSAMOA", "ANDORRA", "ANGOLA", "ANGUILLA", "ANTARCTICA",
"ANTIGUAANDBARBUDA", "ARGENTINA", "ARMENIA", "ARUBA", "AUSTRALIA", "AUSTRIA", "AZERBAIJAN", "BAHAMAS", "BAHRAIN",
"BANGLADESH", "BARBADOS", "BELARUS", "BELGIUM", "BELIZE", "BENIN", "BERMUDA", "BHUTAN", "BOLIVIA",
"BOSNIAANDHERZEGOWINA", "BOSNIA", "HERZEGOWINA", "BOTSWANA", "BOUVETISLAND", "NORWAY", "BRAZIL",
"BRITISHINDIANOCEANTERRITORY", "BRUNEIDARUSSALAM", "BULGARIA", "BURKINAFASO", "BURUNDI", "CAMBODIA",
"CAMEROON", "CANADA", "CAPEVERDE", "CAYMANISLANDS", "CENTRALAFRICANREPUBLIC", "CHAD", "CHILE", "CHINA",
"CHRISTMASISLAND", "COCOSISLANDS", "COLOMBIA", "COMOROS", "CONGO", "CONGOTHEDRC", "COOKISLANDS", "COSTARICA",
"COTED'IVOIRE", "CROATIA", "HRVATSKA", "CUBA", "CYPRUS", "CZECHREPUBLIC", "DENMARK", "DJIBOUTI", "DOMINICA",
"DOMINICANREPUBLIC", "EASTTIMOR", "ECUADOR", "EGYPT", "ELSALVADOR", "EQUATORIALGUINEA", "ERITREA", "ESTONIA",
"ETHIOPIA", "FALKLANDISLANDS", "MALVINAS", "FAROEISLANDS", "FIJI", "FINLAND", "FRANCE", "FRANCEMETROPOLITAN",
"FRENCHGUIANA", "FRENCHPOLYNESIA", "FRENCHSOUTHERNTERRITORIES", "GABON", "GAMBIA", "GEORGIA", "GERMANY",
"GHANA", "GIBRALTAR", "GREECE", "GREENLAND", "GRENADA", "GUADELOUPE", "GUAM", "GUATEMALA", "GUINEA",
"GUINEA-BISSAU", "GUYANA", "HAITI", "HEARDANDMCDONALDISLANDS", "HOLYSEE", "VATICANCITYSTATE", "HONDURAS",
"HONGKONG", "HUNGARY", "ICELAND", "INDIA", "INDONESIA", "IRAN", "IRAQ", "IRELAND", "ISRAEL", "ITALY", "JAMAICA",
"JAPAN", "JORDAN", "KAZAKHSTAN", "KENYA", "KIRIBATI", "KOREADPRO", "NORTHKOREA", "KOREAREPUBLICOF",
"SOUTHKOREA", "REPUBLICOFKOREA",
"KUWAIT", "KYRGYZSTAN", "LAOS", "LATVIA", "LEBANON", "LESOTHO", "LIBERIA", "LIBYANARABJAMAHIRIYA",
"LIECHTENSTEIN", "LITHUANIA", "LUXEMBOURG", "MACAU", "MACEDONIA", "MADAGASCAR", "MALAWI", "MALAYSIA", "MALDIVES",
"MALI", "MALTA", "MARSHALLISLANDS", "MARTINIQUE", "MAURITANIA", "MAURITIUS", "MAYOTTE", "MEXICO",
"MICRONESIA,FEDERATEDSTATESOF", "FEDERATEDSTATESOFMICRONESIA", "MOLDOVAREPUBLICOF", "REPUBLICOFMOLDOVA",
"MONACO", "MONGOLIA", "MONTENEGRO", "MONTSERRAT", "MOROCCO", "MOZAMBIQUE", "MYANMAR", "BURMA", "NAMIBIA", "NAURU",
"NEPAL", "NETHERLANDS", "NETHERLANDSANTILLES", "NEWCALEDONIA", "NEWZEALAND", "NICARAGUA", "NIGER", "NIGERIA",
"NIUE", "NORFOLKISLAND", "NORTHERNMARIANAISLANDS", "NORWAY", "OMAN", "PAKISTAN", "PALAU", "PANAMA",
"PAPUANEWGUINEA", "PARAGUAY", "PERU", "PHILIPPINES", "PITCAIRN", "POLAND", "PORTUGAL", "PUERTORICO", "QATAR",
"REUNION", "ROMANIA", "RUSSIANFEDERATION", "RWANDA", "SAINTKITTSANDNEVIS", "SAINTLUCIA",
"SAINTVINCENTANDTHEGRENADINES", "SAMOA", "SANMARINO", "SAOTOMEANDPRINCIPE", "SAUDIARABIA", "SENEGAL",
"SERBIA", "SEYCHELLES", "SIERRALEONE", "SINGAPORE", "SLOVAKIA", "SLOVAKREPUBLIC", "SLOVENIA", "SOLOMONISLANDS",
"SOMALIA", "SOUTHAFRICA", "SOUTHSUDAN", "SOUTHGEORGIAANDSOUTHSS", "SPAIN", "SRILANKA", "STHELENA",
"STPIERREANDMIQUELON", "SUDAN", "SURINAME", "SVALBARDANDJANMAYENISLANDS", "SWAZILAND", "SWEDEN",
"SWITZERLAND", "SYRIANARABREPUBLIC", "TAIWAN", "TAJIKISTAN", "TANZANIA", "UNITEDREPUBLICOFTANZANIA",
"THAILAND", "TOGO", "TOKELAU", "TONGA", "TRINIDADANDTOBAGO", "TUNISIA", "TURKEY", "TURKMENISTAN",
"TURKSANDCAICOSISLANDS", "TUVALU", "UGANDA", "UKRAINE", "UNITEDARABEMIRATES", "UNITEDKINGDOM",
"UNITEDSTATES", "USMINORISLANDS", "URUGUAY", "UZBEKISTAN", "VANUATU", "VENEZUELA", "VIETNAM",
"VIRGINISLANDSBRITISH", "VIRGINISLANDSUS", "WALLISANDFUTUNAISLANDS", "WESTERNSAHARA", "YEMEN",
"ZAMBIA", "ZIMBABWE"
]
country_names_readable = [
"Afghanistan", "Albania", "Algeria", "American Samoa", "Andorra", "Angola", "Anguilla", "Antarctica",
"Antigua And Barbuda", "Argentina", "Armenia", "Aruba", "Australia", "Austria", "Azerbaijan", "Bahamas", "Bahrain",
"Bangladesh", "Barbados", "Belarus", "Belgium", "Belize", "Benin", "Bermuda", "Bhutan", "Bolivia",
"Bosnia And Herzegowina", "Bosnia", "Herzegowina", "Botswana", "Bouvet Island", "Norway", "Brazil",
"British Indian Ocean Territory", "Brunei Darussalam", "Bulgaria", "Burkina Faso", "Burundi", "Cambodia",
"Cameroon", "Canada", "Cape Verde", "Cayman Islands", "Central African Republic", "Chad", "Chile", "China",
"Christmas Island", "Cocos Islands", "Colombia", "Comoros", "Congo", "Congo, The Drc", "Cook Islands", "Costa Rica",
"Cote D'ivoire", "Croatia", "Hrvatska", "Cuba", "Cyprus", "Czech Republic", "Denmark", "Djibouti", "Dominica",
"Dominican Republic", "East Timor", "Ecuador", "Egypt", "El Salvador", "Equatorial Guinea", "Eritrea", "Estonia",
"Ethiopia", "Falkland Islands", "Malvinas", "Faroe Islands", "Fiji", "Finland", "France", "France, Metropolitan",
"French Guiana", "French Polynesia", "French Southern Territories", "Gabon", "Gambia", "Georgia", "Germany",
"Ghana", "Gibraltar", "Greece", "Greenland", "Grenada", "Guadeloupe", "Guam", "Guatemala", "Guinea",
"Guinea-bissau", "Guyana", "Haiti", "Heard And Mc Donald Islands", "Holy See", "Vatican City State", "Honduras",
"Hong Kong", "Hungary", "Iceland", "India", "Indonesia", "Iran", "Iraq", "Ireland", "Israel", "Italy", "Jamaica",
"Japan", "Jordan", "Kazakhstan", "Kenya", "Kiribati", "Korea, D.p.r.o.", "North Korea", "Korea, Republic Of",
"South Korea", "Republic Of Korea",
"Kuwait", "Kyrgyzstan", "Laos", "Latvia", "Lebanon", "Lesotho", "Liberia", "Libyan Arab Jamahiriya",
"Liechtenstein", "Lithuania", "Luxembourg", "Macau", "Macedonia", "Madagascar", "Malawi", "Malaysia", "Maldives",
"Mali", "Malta", "Marshall Islands", "Martinique", "Mauritania", "Mauritius", "Mayotte", "Mexico",
"Micronesia, Federated States Of", "Federated States Of Micronesia", "Moldova, Republic Of", "Republic Of Moldova",
"Monaco", "Mongolia", "Montenegro", "Montserrat", "Morocco", "Mozambique", "Myanmar", "Burma", "Namibia", "Nauru",
"Nepal", "Netherlands", "Netherlands Antilles", "New Caledonia", "New Zealand", "Nicaragua", "Niger", "Nigeria",
"Niue", "Norfolk Island", "Northern Mariana Islands", "Norway", "Oman", "Pakistan", "Palau", "Panama",
"Papua New Guinea", "Paraguay", "Peru", "Philippines", "Pitcairn", "Poland", "Portugal", "Puerto Rico", "Qatar",
"Reunion", "Romania", "Russian Federation", "Rwanda", "Saint Kitts And Nevis", "Saint Lucia",
"Saint Vincent And The Grenadines", "Samoa", "San Marino", "Sao Tome And Principe", "Saudi Arabia", "Senegal",
"Serbia", "Seychelles", "Sierra Leone", "Singapore", "Slovakia", "Slovak Republic", "Slovenia", "Solomon Islands",
"Somalia", "South Africa", "South Sudan", "South Georgia And South S.S.", "Spain", "Sri Lanka", "St. Helena",
"St. Pierre And Miquelon", "Sudan", "Suriname", "Svalbard and Jan Mayen Islands", "Swaziland", "Sweden",
"Switzerland", "Syrian Arab Republic", "Taiwan", "Tajikistan", "Tanzania", "United Republic Of Tanzania",
"Thailand", "Togo", "Tokelau", "Tonga", "Trinidad And Tobago", "Tunisia", "Turkey", "Turkmenistan",
"Turks And Caicos Islands", "Tuvalu", "Uganda", "Ukraine", "United Arab Emirates", "United Kingdom",
"United States", "U.S. Minor Islands", "Uruguay", "Uzbekistan", "Vanuatu", "Venezuela", "Vietnam",
"Virgin Islands (British)", "Virgin Islands (U.S.)", "Wallis And Futuna Islands", "Western Sahara", "Yemen",
"Zambia", "Zimbabwe"
]
us_states_names = [
"ALABAMA", "ALASKA", "ARIZONA", "ARKANSAS", "CALIFORNIA", "COLORADO", "CONNECTICUT", "DELAWARE", "FLORIDA",
"GEORGIA", "HAWAII", "IDAHO", "ILLINOIS", "INDIANA", "IOWA", "KANSAS", "KENTUCKY", "LOUISIANA", "MAINE", "MARYLAND",
"MASSACHUSETTS", "MICHIGAN", "MINNESOTA", "MISSISSIPPI", "MISSOURI", "MONTANA", "NEBRASKA", "NEVADA",
"NEWHAMPSHIRE", "NEWJERSEY", "NEWMEXICO", "NEWYORK", "NORTHCAROLINA", "NORTHDAKOTA", "OHIO", "OKLAHOMA", "OREGON",
"PENNSYLVANIA", "RHODEISLAND", "SOUTHCAROLINA", "SOUTHDAKOTA", "TENNESSEE", "TEXAS", "UTAH", "VERMONT", "VIRGINIA",
"WASHINGTON", "WESTVIRGINIA", "WISCONSIN", "WYOMING"
]
us_states_names_readable = [
"Alabama", "Alaska", "Arizona", "Arkansas", "California", "Colorado", "Connecticut", "Delaware", "Florida",
"Georgia", "Hawaii", "Idaho", "Illinois", "Indiana", "Iowa", "Kansas", "Kentucky", "Louisiana", "Maine", "Maryland",
"Massachusetts", "Michigan", "Minnesota", "Mississippi", "Missouri", "Montana", "Nebraska", "Nevada",
"New Hampshire", "New Jersey", "New Mexico", "New York", "North Carolina", "North Dakota", "Ohio", "Oklahoma",
"Oregon", "Pennsylvania", "Rhode Island", "South Carolina", "South Dakota", "Tennessee", "Texas", "Utah", "Vermont",
"Virginia", "Washington", "West Virginia", "Wisconsin", "Wyoming"
]
us_states_codes = [
"AL", "AK", "AZ", "AR", "CA", "CO", "CT", "DE", "FL", "GA", "HI", "ID", "IL", "IN", "IA", "KS", "KY", "LA", "ME",
"MD", "MA", "MI", "MN", "MS", "MO", "MT", "NE", "NV", "NH", "NJ", "NM", "NY", "NC", "ND", "OH", "OK", "OR", "PA",
"RI", "SC", "SD", "TN", "TX", "UT", "VT", "VA", "WA", "WV", "WI", "WY"
]
canada_states_names = [
"ALBERTA", "BRITISHCOLUMBIA", "MANITOBA", "NEWBRUNSWICK", "NEWFOUNDLANDANDLABRADOR", "NOVASCOTIA",
"NORTHWESTTERRITORIES", "NUNAVUT", "ONTARIO", "PRINCEEDWARDISLAND", "QUEBEC", "SASKATCHEWAN", "YUKON"
]
canada_states_names_readable = [
"Alberta", "British Columbia", "Manitoba", "New Brunswick", "Newfoundland and Labrador", "Nova Scotia",
"Northwest Territories", "Nunavut", "Ontario", "Prince Edward Island", "Quebec", "Saskatchewan", "Yukon"
]
canada_states_codes = [
"AB", "BC", "MB", "NB", "NL", "NS", "NT", "NU", "ON", "PE", "QC", "SK", "YT"
]
class LM(object):
def __init__(self):
self.name = "Location Manipulation"
@staticmethod
def standard_country_code(country):
if len(country) == 0:
return ''
country = country.upper()
if len(country) == 2:
try:
idx = country_codes_2.index(country)
return country
except ValueError:
idx = -1
if len(country) == 3:
try:
idx = country_codes_3.index(country)
code = country_codes_2[idx]
return code
except ValueError:
idx = -1
try:
country = SM.alpha_only(country).upper()
idx = country_names.index(country)
code = country_codes_2[idx]
return code
except ValueError:
return ''
@staticmethod
def standard_country_name(country):
if len(country) == 0:
return ''
country = country.upper()
if len(country) == 2:
try:
idx = country_codes_2.index(country)
return country_names_readable[idx]
except ValueError:
idx = -1
if len(country) == 3:
try:
idx = country_codes_3.index(country)
return country_names_readable[idx]
except ValueError:
idx = -1
try:
country = SM.alpha_only(country).upper()
idx = country_names.index(country)
return country_names_readable[idx]
except ValueError:
return ''
@staticmethod
def clean_country(country):
clean_country = LM.standard_country_name(country)
if clean_country == '':
return country
return clean_country
@staticmethod
def standard_state_code(country, state):
if len(state) == 0:
return ''
codes_arr = []
names_arr = []
if country == "US" or country == "United States":
codes_arr = us_states_codes
names_arr = us_states_names
if country == "CA" or country == "Canada":
codes_arr = canada_states_codes
names_arr = canada_states_names
if len(codes_arr) > 0:
state = state.upper()
if len(state) == 2:
try:
idx = codes_arr.index(state)
return state
except ValueError:
idx = -1
try:
state = SM.alpha_only(state).upper()
idx = names_arr.index(state)
code = codes_arr[idx]
return code
except ValueError:
return ''
return state
@staticmethod
def standardize_state_name(country, state):
if len(state) == 0:
return ''
codes_arr = []
names_arr = []
names_readable_arr = []
if country == "US" or country == "United States":
codes_arr = us_states_codes
names_arr = us_states_names
names_readable_arr = us_states_names_readable
if country == "CA" or country == "Canada":
codes_arr = canada_states_codes
names_arr = canada_states_names
names_readable_arr = canada_states_names_readable
if len(codes_arr) > 0:
state = state.upper()
if len(state) == 2:
try:
idx = codes_arr.index(state)
return names_readable_arr[idx]
except ValueError:
idx = -1
try:
state = SM.alpha_only(state).upper()
idx = names_arr.index(state)
return names_readable_arr[idx]
except ValueError:
return ''
return state
@staticmethod
def get_decimal_coodinate(lat):
result = 0
x = SM.get_string(lat, 0, 1)
if x:
result += int(x)
x = SM.get_string(lat, 2, 3)
if x:
result += int(x)/float("60")
x = SM.get_string(lat, 4, 5)
if x:
result += int(x)/float("3600")
return str(result)
@staticmethod
def get_decimal_coodinate(lat):
result = 0
x = SM.get_string(lat, 0, 1)
if x:
result += int(x)
x = SM.get_string(lat, 2, 3)
if x:
result += int(x)/float("60")
x = SM.get_string(lat, 4, 5)
if x:
result += int(x)/float("3600")
return str(result)
@staticmethod
def parse_latitude_longitude(latlon):
# Examples: LATMIN:2310N04350W
# LATDEC:351025.3N0790125.7W
idx = latlon.find(":")
if idx != -1:
ltype = latlon[0:idx]
latlon = latlon[idx+1:]
idx = latlon.find("-")
if idx != -1:
lat = latlon[0:idx-1]
lon = latlon[idx+2:]
else:
latlon = re.sub('[^0-9\.]+', ',', latlon)
latlons = latlon.split(",")
lat = latlons[0]
lon = latlons[1]
if ltype == "LATMIN" or ltype == "LATDEC":
return [LM.get_decimal_coodinate(lat), LM.get_decimal_coodinate(lon)]
else:
return [lat, lon]
return [-1, -1]
| {
"repo_name": "darkshadows123/dig-alignment",
"path": "versions/3.0/karma/python/location_manipulation.py",
"copies": "2",
"size": "18663",
"license": "apache-2.0",
"hash": -3154156346501291500,
"line_mean": 53.2529069767,
"line_max": 120,
"alpha_frac": 0.5446605583,
"autogenerated": false,
"ratio": 2.453398185881425,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3998058744181425,
"avg_score": null,
"num_lines": null
} |
__author__ = 'amandeep'
import time
class HbaseManager(object):
def __init__(self, sc, conf, hbase_hostname, hbase_tablename, **kwargs):
self.name = "ES2HBase"
self.sc = sc
self.conf = conf
self.hbase_conf = {"hbase.zookeeper.quorum": hbase_hostname}
self.hbase_table = hbase_tablename
self.columns_list = None
self.row_start = None
self.row_stop = None
self.key_only = False
self.time_sleep = 0 # default is to save right away
if 'columns_list' in kwargs:
self.columns_list = kwargs['columns_list']
if 'time_sleep' in kwargs:
self.time_sleep = int(kwargs['time_sleep'])
if 'row_start' in kwargs:
self.row_start = kwargs['row_start']
if 'row_stop' in kwargs:
self.row_stop = kwargs['row_stop']
if 'key_only' in kwargs:
self.key_only = bool(kwargs['key_only'])
def rdd2hbase(self, data_rdd):
self.hbase_conf['hbase.mapred.outputtable'] = self.hbase_table
self.hbase_conf['mapreduce.outputformat.class'] = "org.apache.hadoop.hbase.mapreduce.TableOutputFormat"
self.hbase_conf['mapreduce.job.output.key.class'] = "org.apache.hadoop.hbase.io.ImmutableBytesWritable"
self.hbase_conf['mapreduce.job.output.value.class'] = "org.apache.hadoop.io.Writable"
key_conv = "org.apache.spark.examples.pythonconverters.StringToImmutableBytesWritableConverter"
value_conv = "org.apache.spark.examples.pythonconverters.StringListToPutConverter"
# 1) saveAsNewAPIHadoopDataset seems to fail sometime with errors like:
# - Container [pid=7897,containerID=container_1459636669274_6150_01_000002] is running beyond physical memory limits. Current usage: 8.0 GB of 8 GB physical memory used; 41.7 GB of 16.8 GB virtual memory used. Killing container.
# - anyway to set some parameters to fix this?
print("[HbaseManager.rdd2hbase] Will save to HBase in {}s using conf: {}".format(self.time_sleep, self.hbase_conf))
if self.time_sleep:
time.sleep(self.time_sleep)
data_rdd.saveAsNewAPIHadoopDataset(
conf=self.hbase_conf,
keyConverter=key_conv,
valueConverter=value_conv)
def read_hbase_table(self):
self.hbase_conf['hbase.mapreduce.inputtable'] = self.hbase_table
# https://hbase.apache.org/xref/org/apache/hadoop/hbase/mapreduce/TableInputFormat.html
if self.columns_list:
print("[HbaseManager.read_hbase_table] Will read only columns: {}".format(','.join(self.columns_list)))
self.hbase_conf['hbase.mapreduce.scan.columns'] = ' '.join(self.columns_list)
if self.row_start:
print("[HbaseManager.read_hbase_table] Will start reading from row: {}".format(self.row_start))
self.hbase_conf['hbase.mapreduce.scan.row.start'] = str(self.row_start)
if self.row_stop:
print("[HbaseManager.read_hbase_table] Will strop reading at row: {}".format(self.row_stop))
self.hbase_conf['hbase.mapreduce.scan.row.stop'] = str(self.row_stop)
# # how to integrate org.apache.hadoop.hbase.filter.KeyOnlyFilter to read only row keys?
# # Actually does not seem possible for now, need to edit 'createScanFromConfiguration' in 'TableInputFormat'
# # would need to instantiate filter and call setFilter, add methods like addColumn(s) called addFilter(s)?
# if self.key_only:
# print("[HbaseManager.read_hbase_table] Will return only keys.")
# self.hbase_conf['org.apache.hadoop.hbase.filter'] = "org.apache.hadoop.hbase.filter.KeyOnlyFilter"
# self.hbase_conf['hbase.mapreduce.scan.setFilter'] = "org.apache.hadoop.hbase.filter.KeyOnlyFilter"
key_conv = "org.apache.spark.examples.pythonconverters.ImmutableBytesWritableToStringConverter"
value_conv = "org.apache.spark.examples.pythonconverters.HBaseResultToStringConverter"
hbase_rdd = self.sc.newAPIHadoopRDD("org.apache.hadoop.hbase.mapreduce.TableInputFormat",
"org.apache.hadoop.hbase.io.ImmutableBytesWritable",
"org.apache.hadoop.hbase.client.Result",
keyConverter=key_conv,
valueConverter=value_conv,
conf=self.hbase_conf)
# do that outside if needed
# hbase_rdd = hbase_rdd.flatMapValues(lambda v: v.split("\n")).mapValues(json.loads)
return hbase_rdd
| {
"repo_name": "svebk/DeepSentiBank_memex",
"path": "workflows/packages/python-lib/hbase_manager.py",
"copies": "1",
"size": "4696",
"license": "bsd-2-clause",
"hash": 5313052693085453000,
"line_mean": 56.2682926829,
"line_max": 236,
"alpha_frac": 0.6352214651,
"autogenerated": false,
"ratio": 3.6658860265417643,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9753714264511527,
"avg_score": 0.009478645426047342,
"num_lines": 82
} |
__author__ = 'amandeep'
class HbaseManager(object):
def __init__(self, sc, conf, hbase_hostname, hbase_tablename):
self.name = "ES2HBase"
self.sc = sc
self.conf = conf
self.hbase_conf = {"hbase.zookeeper.quorum": hbase_hostname}
self.hbase_table = hbase_tablename
def rdd2hbase(self, data_rdd):
self.hbase_conf['hbase.mapred.outputtable'] = self.hbase_table
self.hbase_conf['mapreduce.outputformat.class'] = "org.apache.hadoop.hbase.mapreduce.TableOutputFormat"
self.hbase_conf['mapreduce.job.output.key.class'] = "org.apache.hadoop.hbase.io.ImmutableBytesWritable"
self.hbase_conf['mapreduce.job.output.value.class'] = "org.apache.hadoop.io.Writable"
key_conv = "org.apache.spark.examples.pythonconverters.StringToImmutableBytesWritableConverter"
value_conv = "org.apache.spark.examples.pythonconverters.StringListToPutConverter"
print self.hbase_conf
# datamap = data_rdd.flatMap(HbaseManager.create_ads_tuple)
data_rdd.saveAsNewAPIHadoopDataset(
conf=self.hbase_conf,
keyConverter=key_conv,
valueConverter=value_conv)
def read_hbase_table(self):
self.hbase_conf['hbase.mapreduce.inputtable'] = self.hbase_table
key_conv = "org.apache.spark.examples.pythonconverters.ImmutableBytesWritableToStringConverter"
value_conv = "org.apache.spark.examples.pythonconverters.HBaseResultToStringConverter"
hbase_rdd = self.sc.newAPIHadoopRDD("org.apache.hadoop.hbase.mapreduce.TableInputFormat",
"org.apache.hadoop.hbase.io.ImmutableBytesWritable",
"org.apache.hadoop.hbase.client.Result",
keyConverter=key_conv,
valueConverter=value_conv,
conf=self.hbase_conf)
# hbase_rdd = hbase_rdd.flatMapValues(lambda v: v.split("\n")).mapValues(json.loads)
return hbase_rdd
| {
"repo_name": "usc-isi-i2/WEDC",
"path": "spark_dependencies/python_lib/digSparkUtil/hbase_manager.py",
"copies": "2",
"size": "2096",
"license": "apache-2.0",
"hash": -1097130780771201500,
"line_mean": 50.1219512195,
"line_max": 111,
"alpha_frac": 0.6312022901,
"autogenerated": false,
"ratio": 3.749552772808587,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5380755062908588,
"avg_score": null,
"num_lines": null
} |
__author__ = 'amandeep'
"""
USE THESE FOR HBASE
self.hbase_host = 'zk04.xdata.data-tactics-corp.com:2181'
self.hbase_table = 'test_ht_aman'
"""
"""
EXECUTE AS
spark-submit --master local[*] --executor-memory=4g --driver-memory=4g \
--jars jars/elasticsearch-hadoop-2.2.0-m1.jar,jars/spark-examples_2.10-2.0.0-SNAPSHOT.jar,jars/random-0.0.1-SNAPSHOT-shaded.jar \
es2hbase.py -hostname els.istresearch.com -port 19200 \
-username memex -password <es_password> -indexname <esindex> \
-doctype <esdoctype> -hbasehostname <hbasehostname> \
-hbasetablename <hbase_tablename>
"""
import argparse
from pyspark import SparkContext, SparkConf
from hbase_manager import HbaseManager
class ES(object):
def __init__(self, spark_context, spark_conf, index, doc, es_hostname, es_port, es_username, es_password):
self.name = "ES2HBase"
self.sc = spark_context
self.conf = spark_conf
self.es_conf = {}
self.es_conf['es.resource'] = index + "/" + doc
self.es_conf['es.nodes'] = es_hostname + ":" + str(es_port)
self.es_conf['es.index.auto.create'] = "no"
self.es_conf['es.net.http.auth.user'] = es_username
self.es_conf['es.net.http.auth.pass'] = es_password
self.es_conf['es.net.ssl'] = "true"
self.es_conf['es.nodes.discovery'] = "false"
self.es_conf['es.http.timeout'] = "1m"
self.es_conf['es.http.retries'] = "1"
self.es_conf['es.nodes.client.only'] = "false"
self.es_conf['es.nodes.wan.only'] = "true"
def set_output_json(self):
# Does this give the "_timestamp" field?
self.es_conf['es.output.json'] = "true"
def set_read_metadata(self):
self.es_conf['es.read.metadata'] = "true"
def es2rdd(self, query):
self.es_conf['es.query'] = query
print self.es_conf
es_rdd = self.sc.newAPIHadoopRDD(inputFormatClass="org.elasticsearch.hadoop.mr.EsInputFormat",
keyClass="org.apache.hadoop.io.NullWritable",
valueClass="org.apache.hadoop.io.Text",
conf=self.es_conf)
# es_rdd.map(lambda x: json.dumps(ES2HBase.printable_doc(x))).saveAsTextFile('/tmp/cdr_v2_ads')
return es_rdd
# if __name__ == '__main__':
# argp = argparse.ArgumentParser()
# argp.add_argument("-hostname", help="Elastic Search Server hostname, defaults to 'localhost'", default="localhost")
# argp.add_argument("-port", type=int, help="Elastic Search Server port,defaults to 9200", default=9200)
# argp.add_argument("-username", help="username for ElasticSearch", default="")
# argp.add_argument("-password", help="password for ElasticSearch", default="")
# argp.add_argument("-indexname", help="ElasticSearch index name")
# argp.add_argument("-doctype", help="ElasticSearch doc type")
# argp.add_argument("-hbasehostname", help="ElasticSearch doc type")
# argp.add_argument("-hbasetablename", help="ElasticSearch doc type")
#
# # ads query
# query = "{\"query\": {\"filtered\": {\"filter\": {\"exists\": {\"field\": \"extractions\"}},\"query\": " \
# "{\"match_all\": {}}}}}"
# # images query
# """query = "{\"query\": {\"filtered\": {\"filter\": {\"exists\": {\"field\": \"obj_parent\"}},\"query\": {\"match_all\"" \
# ": {}}}},\"size\": 4000}" """
#
# arguments = argp.parse_args()
# es2hbase = ES(arguments.indexname, arguments.doctype, arguments.hostname, arguments.port, arguments.username,
# arguments.password)
# es_rdd = es2hbase.es2rdd(query)
# hm = HbaseManager(arguments.hbasehostname, arguments.hbasetablename)
# hm.rdd2hbase(es_rdd)
# es2hbase.sc.stop()
# print "Done!"
| {
"repo_name": "svebk/DeepSentiBank_memex",
"path": "workflows/packages/python-lib/elastic_manager.py",
"copies": "1",
"size": "3827",
"license": "bsd-2-clause",
"hash": 3703418965140165600,
"line_mean": 44.5595238095,
"line_max": 133,
"alpha_frac": 0.6064802718,
"autogenerated": false,
"ratio": 3.168046357615894,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9257342849487076,
"avg_score": 0.003436755985763737,
"num_lines": 84
} |
__author__ = "Amaral LAN"
__copyright__ = "Copyright 2017-2018, Amaral LAN"
__credits__ = ["Amaral LAN"]
__license__ = "GPL"
__version__ = "1.0"
__maintainer__ = "Amaral LAN"
__email__ = "amaral@northwestern.edu"
__status__ = "Development"
import pystache
import pymongo
from copy import copy
from my_settings import SECTION_NAMES, COLLECTION_NAMES
from my_mongo_db_login import DB_LOGIN_INFO, DATABASE_NAME
if __name__ == "__main__":
pystache.defaults.DELIMITERS = ('\|', '|/') # Change delimiters to avoid conflicts with TeX
renderer = pystache.Renderer(search_dirs=['./Formatting_files'])
connection = pymongo.MongoClient(DB_LOGIN_INFO['credentials'], DB_LOGIN_INFO['port'])
db = connection[DATABASE_NAME]
print('\nOpened connection')
# Create tex files for title block
#
collection = db['bio-info']
data = collection.find_one()
for filename in ['variables', 'title_block']:
tex_file = './Tex_files/' + filename + '.tex'
print('\n' + filename + '\n' + tex_file )
with open(tex_file, 'w', encoding='utf-8') as file_out:
result = renderer.render_name(filename, data)
file_out.write(result)
# Create tex files for all other sections of CV
#
section_list = copy( list(SECTION_NAMES.keys()) )
section_list.remove('bio-info')
for filename in section_list:
tex_file = './Tex_files/' + filename + '.tex'
print('\n' + filename + '\n' + tex_file )
with open(tex_file, 'w', encoding='utf-8') as file_out:
# Create section header
if SECTION_NAMES[filename] is not None:
result = renderer.render_name('section', {'NAME': SECTION_NAMES[filename],
'Clean_NAME': list(SECTION_NAMES[filename].split())[0]}
)
file_out.write(result)
for name in COLLECTION_NAMES[filename]:
if name is None:
name = filename
else:
print(name)
result = renderer.render_name('subsection', {'NAME': name, 'Clean_NAME': name.replace(' ', '_')})
file_out.write(result)
name = filename + '_' + name.lower()
data = []
for document in db[name].find():
data.append(document)
for i in range(len(data)-1, -1, -1):
if filename == 'publications':
data[i]['Authors'] = data[i]['Authors'].replace('Amaral LAN', '{\\textbf{Amaral LAN}}')
try:
logic_citations = bool(data[i]['GS_cites']) or \
bool(data[i]['Scopus_cites']) or \
bool(data[i]['WoS_cites'])
logic_altmetrics = bool(data[i]['Alt_score'])
except:
logic_citations = False
logic_altmetrics = False
data[i].update({'Citations': logic_citations, 'Altmetrics': logic_altmetrics})
else:
result = renderer.render_name(filename, data[i])
file_out.write(result)
# Sort publications by year of publication and then by title
#
if filename == 'publications':
data.sort(key = lambda k: (k['Year'], k['Title']))
for i in range(len(data)-1, -1, -1):
data[i].update({'Number': str(i + 1)})
if 'Alt_score' in data[i].keys():
temp_string = data[i]['Alt_score']
if temp_string != False:
data[i]['Alt_score'] = temp_string.replace('%', '\\%')
# print(data[i]['Citations'], data[i]['Altmetrics'], temp_string)
result = renderer.render_name(filename, data[i])
file_out.write(result)
# Inelegant, but effective, way to add space between subsections
#
file_out.write('\\vspace*{0.2cm}')
| {
"repo_name": "lamaral1968/maintaining_latex_cv",
"path": "make_tex_files1.0.py",
"copies": "1",
"size": "4326",
"license": "mit",
"hash": -6343429728497818000,
"line_mean": 40.2,
"line_max": 117,
"alpha_frac": 0.4889042996,
"autogenerated": false,
"ratio": 4.204081632653061,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5192985932253061,
"avg_score": null,
"num_lines": null
} |
__author__ = "Amaral LAN"
__copyright__ = "Copyright 2017-2018, Amaral LAN"
__credits__ = ["Amaral LAN"]
__license__ = "GPL"
__version__ = "1.1"
__maintainer__ = "Amaral LAN"
__email__ = "amaral@northwestern.edu"
__status__ = "Production"
import pymongo
from bs4 import BeautifulSoup
from splinter import Browser
from time import sleep
from random import randint
from nltk.metrics import distance
from my_settings import PUBLICATION_TYPES, FLAGS, URLS
from my_mongo_db_login import DB_LOGIN_INFO, DATABASE_NAME
if __name__ == "__main__":
connection = pymongo.MongoClient(DB_LOGIN_INFO['credentials'], DB_LOGIN_INFO['port'])
db = connection[DATABASE_NAME]
print('\nOpened connection')
# Get WoS and Google Scholar citation counts
url_gs = URLS['google_scholar']
CVname = 'Amaral'
with Browser('chrome') as browser:
for pub_type in PUBLICATION_TYPES:
flag = True
collection_name = 'publications' + '_' + pub_type.lower()
collection = db[collection_name]
print('\n\n', pub_type.upper(), '--', collection_name)
# Because we will be writing to database, which will change order of documents,
# I collect a list of indices before any changes are made
#
paper_ids = []
for paper in collection.find():
paper_ids.append(paper['_id'])
print('There are {} papers in this group'.format(len(paper_ids)))
for paper_id in paper_ids:
update = False
if FLAGS['update_gs']:
update = True
else:
paper = collection.find_one({'_id': paper_id})
if 'GS_cites' not in paper.keys():
update = True
elif paper['GS_cites'] == False:
update = True
if update:
sleep(randint(0, 5))
title = paper['Title'].lower()
first_author = paper['Authors'].split()[0].replace("\`",
'').replace("\'",
'').replace("\:",
'').lower()
paper_code = paper['Year'] + ' ' + first_author + ' ' + title
print(paper_code)
browser.visit(url_gs)
browser.fill('q', paper_code + ' ' + paper['Journal'])
button = browser.find_by_name('btnG')
button.click()
# Slow things down first time around because of browser coming up
if flag:
input('Enter information by browser and then enter something here')
flag = False
# Get html code and parse information
#
html_content = browser.html
soup = BeautifulSoup(html_content, 'html.parser')
tmp = soup.find('h3', {'class': 'gs_rt'})
# Google will find out you are scrapping and will send you test to make sure you are not a robot
# This will give you a change to answer tests to its satisfaction.
# At that point, answer question with any key
try:
title_gs = tmp.text
title_gs = title_gs.replace('&', 'and').lstrip('[PDF]').lstrip('[HTML]').lower()
except:
input('Enter information by browser and then enter something here')
browser.fill('q', paper_code + ' ' + paper['Journal'])
button = browser.find_by_name('btnG')
button.click()
match = soup.find('div', {'class': 'gs_ri'})
children = match.findAll('div')
reference_gs = children[0].text
cites = children[2].findAll()
gs_cites = False
wos_cites = False
for item in cites:
if 'Cited by' in item.text:
gs_cites = int(item.text.split()[-1])
if 'Web of' in item.text:
wos_cites = int(item.text.split(':')[1])
# Update citation information if there is a match of paper titles
is_match = False
if title_gs == title:
is_match = True
elif distance.edit_distance(title_gs, title) < 3:
is_match = True
print(is_match)
print(title_gs.encode('utf-8'))
print(title)
print('---', gs_cites, wos_cites)
if is_match:
collection.update_one({'_id': paper_id}, {'$set': {'GS_cites': gs_cites,
'WoS_cites': wos_cites}})
else:
print('Titles were not a match!')
a = input('Do you want to update citations nonetheless? [Y/n]')
if a.lower() != 'n':
collection.update_one({'_id': paper_id}, {'$set': {'GS_cites': gs_cites,
'WoS_cites': wos_cites}})
| {
"repo_name": "lamaral1968/maintaining_latex_cv",
"path": "scrape_google_scholar_citations.py",
"copies": "1",
"size": "5688",
"license": "mit",
"hash": 7935431556485854000,
"line_mean": 42.4198473282,
"line_max": 116,
"alpha_frac": 0.4474331927,
"autogenerated": false,
"ratio": 4.74395329441201,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.569138648711201,
"avg_score": null,
"num_lines": null
} |
__author__ = 'amarchaudhari'
import config
import requests
from will.plugin import WillPlugin
from will.decorators import respond_to, periodic, hear, randomly, route, rendered_template, require_settings
class GetSwitchPortStatus(WillPlugin):
@respond_to("switchport status (?P<server_id>.*)")
def say_switchport_status(self, message,server_id=None):
if server_id:
full_server_id = "BWND"+str(server_id)
lsw_key = config.lsw_api_key
# Request: LeaseWeb API (https://api.leaseweb.com/v1/bareMetals)
try:
baremetalid=get_baremetal_id(full_server_id)
if baremetalid:
url = "https://api.leaseweb.com/v1/bareMetals/"+str(baremetalid)+"/switchPort"
r =requests.get(url,headers={"Accept": "application/json","X-Lsw-Auth": lsw_key })
if r.status_code==200:
data = r.json()
if data['switchPort']['status'] == 'open':
self.reply(message,str(data['switchPort']['serverName'])+" : Enabled",color="green")
elif data['switchPort']['status'] == 'closed':
self.reply(message,str(data['switchPort']['serverName'])+" : Disabled",color="red")
elif r.status_code==400:
self.reply(message, "BareMetal Server Not found")
else:
self.reply(message, "BareMetal Server Not found")
except requests.exceptions.Timeout, e:
#Exception
self.reply(message, "Request timed out , try again !",color="red")
else:
self.reply(message, "No Server ID",color="red")
class SwitchPortDisable(WillPlugin):
@respond_to("switchport disable (?P<server_id>.*)")
def say_switchport_disable(self, message,server_id=None):
if server_id:
full_server_id = "BWND"+str(server_id)
lsw_key = config.lsw_api_key
baremetalid=get_baremetal_id(full_server_id)
try:
if baremetalid:
# headers
headers = {'Accept': 'application/json','X-Lsw-Auth': lsw_key}
apicall = "https://api.leaseweb.com/v1/bareMetals/"+str(baremetalid)+"/switchPort/close"
r =requests.post(url=apicall,headers=headers)
if r.status_code==200:
self.reply(message,"Switch Port of "+str(full_server_id)+" has been Disabled",color="green")
elif r.status_code==500:
self.reply(message,"Leaseweb api down/not working , try again in a while",color="red")
elif r.status_code==404:
self.reply(message, "BareMetal Server Not found",color="red")
else:
self.reply(message, "BareMetal Server Not found",color="red")
except requests.exceptions.Timeout, e:
self.reply(message, "Request timed out , try again !",color="red")
class SwitchPortEnable(WillPlugin):
@respond_to("switchport enable (?P<server_id>.*)")
def say_switchport_enable(self, message,server_id=None):
if server_id:
full_server_id = "BWND"+str(server_id)
lsw_key = config.lsw_api_key
baremetalid=get_baremetal_id(full_server_id)
try:
if baremetalid:
# headers
headers = {'Accept': 'application/json','X-Lsw-Auth': lsw_key}
apicall = "https://api.leaseweb.com/v1/bareMetals/"+str(baremetalid)+"/switchPort/open"
r =requests.post(url=apicall,headers=headers)
if r.status_code==200:
self.reply(message,"Switch Port of "+str(full_server_id)+" has been Enabled",color="green")
elif r.status_code==500:
self.reply(message,"Leaseweb api down/not working , try again in a while",color="red")
elif r.status_code==404:
self.reply(message, "BareMetal Server Not found",color="red")
else:
self.reply(message, "BareMetal Server Not found",color="red")
except requests.exceptions.Timeout, e:
self.reply(message, "Request timed out , try again !",color="red")
def get_baremetal_id(server_id):
lsw_key = config.lsw_api_key
try:
r =requests.get("https://api.leaseweb.com/v1/bareMetals",headers={"Accept": "application/json","X-Lsw-Auth": lsw_key })
# Success
print('Response status ' + str(r.status_code))
data = r.json()
for server in data['bareMetals']:
if server['bareMetal']['serverName'] == server_id:
baremetalid=server['bareMetal']['bareMetalId']
break
try:
if baremetalid:
return baremetalid
else:
return False
except UnboundLocalError:
return False
except requests.exceptions.Timeout, e:
return False
| {
"repo_name": "Amar-Chaudhari/lswhipchatbot",
"path": "lswbot.py",
"copies": "1",
"size": "5560",
"license": "mit",
"hash": -4216513200885038000,
"line_mean": 47.7719298246,
"line_max": 135,
"alpha_frac": 0.5145683453,
"autogenerated": false,
"ratio": 4.234577303884235,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5249145649184235,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.