text stringlengths 0 1.05M | meta dict |
|---|---|
__author__ = 'casey'
"""
@package coverage_model.util.numpy_utils
@file coverage_model/util/numpy_utils.py
@author Casey Bryant
@brief Common numpy array manipulation routines.
"""
import numpy as np
import random
import string
class NumpyUtils(object):
@classmethod
def sort_flat_arrays(cls, np_dict, sort_parameter):
sorted_array_dict = {}
sort_array = np_dict[sort_parameter]
if len(sort_array) > 0:
sorted_indexes = np.argsort(sort_array)
for key, value in np_dict.iteritems():
sorted_array_dict[key] = value[sorted_indexes]
return sorted_array_dict
return np_dict
@classmethod
def create_numpy_object_array(cls, array):
if isinstance(array, np.ndarray):
array = array.tolist()
arr = np.empty(len(array), dtype=object)
arr[:] = array
return arr
@classmethod
def create_filled_array(cls, shape, value, dtype):
arr = np.empty(shape, dtype=dtype)
arr[:] = value
return arr
@classmethod
def get_duplicate_values(cls, np_arr, presorted=False):
if not presorted:
np_arr.sort()
dup_vals = np.unique(np_arr[np_arr[1:] == np_arr[:-1]])
return dup_vals
class DedupedNumpyArrayDict(object):
"""
Accepts an aligned dictionary of numpy arrays (think numpy record array, but
kept as separate arrays for programmatic reasons).
Removes indexes ('records') with duplicate values in for the array found at dedupe_key.
If add_aggregate is specified, an aggregate record, as specified by an implementing class,
is appended.
The resulting dictionary contains arrays that are sorted in the order determined by dedupe_key values.
If the supplied arrays are already presorted, that can be specified to avoid additional sort processing.
For performance reasons, the duplication identification algorithm relies on sorted values so the presorted
flag should be used with caution.
Implementation class supports custom non-dedupe key values for duplicate dedupe_key values, the add_aggregate
option causes a new aggregated 'record' for each duplicate to be appended to each numpy array in the dictionary
of numpy arrays. This can be computationally costly because appends require a deep copy of the array.
"""
def __init__(self, np_dict, dedupe_key, dict_arrays_are_presorted=False, add_aggregate=False):
self.dedupe_key = dedupe_key
self.add_aggregate = add_aggregate
if not dict_arrays_are_presorted:
np_dict = NumpyUtils.sort_flat_arrays(np_dict, self.dedupe_key)
alignment_array = np_dict[self.dedupe_key]
duplicate_values = NumpyUtils.get_duplicate_values(alignment_array, presorted=True)
indices_to_remove = set()
to_append = []
for value in duplicate_values:
indices = np.where(alignment_array==value)[0]
prefered_indices, append_set = self.resolve_duplicate_value(np_dict, indices)
if prefered_indices is not None:
tmp = np.searchsorted(indices, prefered_indices)
indices = np.delete(indices, tmp)
indices_to_remove.update(indices)
if self.add_aggregate:
to_append.append(append_set)
valid_indices = np.delete(np.arange(alignment_array.size), list(indices_to_remove))
self.deduped_dict = {}
for k, v in np_dict.iteritems():
self.deduped_dict[k] = v[valid_indices]
if len(to_append) > 0:
append_list = []
for append_dict in to_append:
append_list.append(append_dict[k])
append_arr = np.array(append_list,dtype=self.deduped_dict[k].dtype)
self.deduped_dict[k] = np.hstack((self.deduped_dict[k], append_arr))
if len(to_append) > 0 and self.add_aggregate:
self.deduped_dict = NumpyUtils.sort_flat_arrays(self.deduped_dict, self.dedupe_key)
@property
def np_dict(self):
return self.deduped_dict
def resolve_duplicate_value(self, np_dict, indices):
raise NotImplementedError('Base class not implemented')
class MostRecentRecordNumpyDict(DedupedNumpyArrayDict):
"""
Retains only the most recent record for a duplicate as specified by the array referenced by 'most_recent_key'.
"""
def __init__(self, np_dict, dedupe_key, most_recent_key, dict_arrays_are_presorted=False, reverse=False):
self.most_recent_key = most_recent_key
self.reverse = reverse
super(MostRecentRecordNumpyDict, self).__init__(np_dict, dedupe_key, dict_arrays_are_presorted)
def resolve_duplicate_value(self, np_dict, indices):
preference_array = np_dict[self.most_recent_key]
pref_vals = preference_array[indices]
if not self.reverse:
prefered_indicies = pref_vals.max()
else:
prefered_indicies = pref_vals.min()
return (np.where(preference_array==prefered_indicies), {})
class MostRecentValidValueNumpyDict(DedupedNumpyArrayDict):
"""
Creates a new record for duplicates where the value of each array is the most recent valid value. Other
duplicate values are removed.
"""
def __init__(self, np_dict, dedupe_key, ingest_times_dict, valid_values_dict, dict_arrays_are_presorted=False, add_aggregate=False):
self._ingest_times_dict = ingest_times_dict
self._valid_values_dict = valid_values_dict
self.valid_mask_key_mutation = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(12))
self.ingest_time_key_mutation = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(12))
for k, v in self.valid_values_dict.iteritems():
np_dict[k+self.valid_mask_key_mutation] = v
for k, v in self.ingest_times_dict.iteritems():
np_dict[k+self.ingest_time_key_mutation] = v
super(MostRecentValidValueNumpyDict, self).__init__(np_dict, dedupe_key, dict_arrays_are_presorted, add_aggregate)
for k in self.np_dict.keys():
v = self.deduped_dict[k]
if k.endswith(self.valid_mask_key_mutation):
nk = k[:-len(self.valid_mask_key_mutation)]
self._valid_values_dict[nk] = v
self.deduped_dict.pop(k)
if k.endswith(self.ingest_time_key_mutation):
nk = k[:-len(self.ingest_time_key_mutation)]
self._ingest_times_dict[nk] = v
self.deduped_dict.pop(k)
def resolve_duplicate_value(self, np_dict, indices):
# preference_array = np_dict[self.most_recent_key]
# pref_vals = preference_array[indices]
# sorted_dict = NumpyUtils.sort_flat_arrays({'i':indices, 'v':pref_vals}, 'v')
deduped_dict_values = {}
use_index = None
if not self.add_aggregate:
use_index = indices[-1]
for k, v in np_dict.iteritems():
if k.endswith(self.valid_mask_key_mutation) or k.endswith(self.ingest_time_key_mutation):
continue
new_val = None
is_valid = False
ingest_time = 0
pref_vals = np_dict[k+self.ingest_time_key_mutation][indices]
sorted_dict = NumpyUtils.sort_flat_arrays({'i':indices, 'v':pref_vals}, 'v')
for index in sorted_dict['i'][::-1]:
is_valid = np_dict[k+self.valid_mask_key_mutation][index]
if is_valid:
new_val = np_dict[k][index]
is_valid = True
ingest_time = np_dict[k+self.ingest_time_key_mutation][index]
break
elif new_val is None:
new_val = np_dict[k][index]
if not self.add_aggregate:
np_dict[k][use_index] = new_val
np_dict[k+self.valid_mask_key_mutation][use_index] = is_valid
np_dict[k+self.ingest_time_key_mutation][use_index] = ingest_time
else:
deduped_dict_values[k] = new_val
deduped_dict_values[k+self.valid_mask_key_mutation] = is_valid
deduped_dict_values[k+self.ingest_time_key_mutation] = ingest_time
return (use_index, deduped_dict_values)
@property
def valid_values_dict(self):
return self._valid_values_dict
@property
def ingest_times_dict(self):
return self._ingest_times_dict
class AggregatedDuplicatesNumpyDict(MostRecentValidValueNumpyDict):
"""
Creates a new record for duplicates where the value of each array is the most recent valid value. Other
duplicate values are not removed, meaning the array must be appended. For large arrays, this can be time consuming.
"""
def __init__(self, np_dict, dedupe_key, most_recent_key, valid_values_dict, dict_arrays_are_presorted=False):
super(AggregatedDuplicatesNumpyDict, self).__init__(np_dict, dedupe_key, most_recent_key, valid_values_dict, dict_arrays_are_presorted,
add_aggregate=True)
def resolve_duplicate_value(self, np_dict, indices):
not_used, deduped_dict_values = super(AggregatedDuplicatesNumpyDict, self).resolve_duplicate_value(np_dict, indices)
return indices, deduped_dict_values
| {
"repo_name": "ooici/coverage-model",
"path": "coverage_model/util/numpy_utils.py",
"copies": "1",
"size": "9439",
"license": "bsd-2-clause",
"hash": -6871114243662425000,
"line_mean": 44.1626794258,
"line_max": 143,
"alpha_frac": 0.6339654624,
"autogenerated": false,
"ratio": 3.7205360662199447,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4854501528619945,
"avg_score": null,
"num_lines": null
} |
__author__ = 'castilla'
import numpy as np
import operator
import csv
if __name__ == "__main__":
palavras=[]
ocorrencias=[]
vocab = open('../vocab.txt', 'r')
lista = vocab.readlines()
for item in lista:
partes = item.split(" ")
palavras.append(partes[0])
ocorrencias.append(int(partes[1].strip()))
order = range(0,len(palavras)+1)
wordMap = dict(zip(palavras, order))
sorted_wordMap = sorted(wordMap.items(), key=operator.itemgetter(1))
vocab_size = len(palavras)
vetores = open('../vectors.bin', 'rb')
vetores.seek(0, 2)
vector_size = (vetores.tell()/16/vocab_size)-1
vetores.seek(0, 0)
with vetores as fid:
data_array = np.fromfile(fid).reshape((-1, vector_size+1))
data_array = np.delete(data_array, np.s_[-1:], 1)
data_arrays = np.split(data_array, 2)
W1 = data_arrays[0]
W2 = data_arrays[1]
W = np.add(W1, W2)
Wf = W / np.sqrt(np.sum(W*W, axis=1))[:,np.newaxis]
filenames = ['capital-common-countries']
'''
,'capital-world', 'currency' , 'city-in-state' ,
'family', 'gram1-adjective-to-adverb','gram2-opposite' ,'gram3-comparative',
'gram4-superlative', 'gram5-present-participle' , 'gram6-nationality-adjective'
,'gram7-past-tense', 'gram8-plural', 'gram9-plural-verbs']
'''
path = './question-data/'
split_size = 100
correct_sem = 0
correct_syn = 0
correct_tot = 0
count_syn = 0
count_tot = 0
full_count = 0
for arquivo in filenames:
full_arquivo = path + arquivo+".txt"
ind1 = []
ind2 = []
ind3 = []
ind4 = []
inds = ind1, ind2, ind3, ind4
with open(full_arquivo, 'rb') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
#for row in reader:
itens = row[0].split(" ")
i = 0
for item in inds:
try:
item.append(wordMap[itens[i]])
except KeyError:
item.append(float('-inf'))
i += 1
fullsize = len(ind1)
ind1 = np.asarray(ind1)
ind2 = np.asarray(ind2)
ind3 = np.asarray(ind3)
mx = np.zeros((1, fullsize))
num_iter = int(np.ceil(float(fullsize)/split_size))
print num_iter
for num in range(1, num_iter+1):
x = range((num-1)*split_size, min((num)*split_size, fullsize))
dist = np.dot(Wf, (Wf[ind2[x], :].T - Wf[ind1[x], :].T + Wf[ind3[x], :].T))
for i in range(0,len(x)):
dist[ind1[x[i]],i] = float('-inf')
dist[ind2[x[i]],i] = float('-inf')
dist[ind3[x[i]],i] = float('-inf')
# [~, mx(range)] estamos aqui
v, z = dist.max(0), dist.argmax(0)
print v, z
for item4 in z:
print wordMap.keys()[wordMap.values().index(item4)]
pass
| {
"repo_name": "castilla/pyGlove",
"path": "read_evaluate.py",
"copies": "1",
"size": "3015",
"license": "mit",
"hash": 8006662475827723000,
"line_mean": 28.2718446602,
"line_max": 92,
"alpha_frac": 0.5157545605,
"autogenerated": false,
"ratio": 3.140625,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9127241238731633,
"avg_score": 0.005827664353673564,
"num_lines": 103
} |
__author__ = 'catatonic'
"""
harvest.py:
Identifies potential candidate packets for TospoVirus disclosed passwords.
"""
from scapy.all import *
import urllib
from subprocess import Popen
import difflib
ap_list = {}
def monitor(pkt):
if pkt.haslayer(Dot11) and pkt.type == 0 and pkt.subtype == 8:
if pkt.addr2 not in ap_list and len(pkt.info) > 0:
ap_list[pkt.addr2] = pkt.info
print "adding: %s %s" %(pkt.addr2, pkt.info)
if pkt.haslayer(Dot11) and pkt.type == 0 and pkt.subtype == 4 and len(pkt.info) == 32:
dec = Popen(['openssl', 'rsautl', '-decrypt', '-inkey', 'tvd.pem'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
dec.stdin.write(pkt.info)
dec.stdin.close()
dec.wait()
rc = dec.returncode
if rc == 0:
for close_match in difflib.get_close_matches(pkt.addr2, ap_list):
print "AP: %s\t\tGuessed SSID: %s\t\tPass: %s" %(pkt.addr2, ap_list[close_match], dec.stdout.readline())
if len(sys.argv) < 2:
print "Usage: sudo python harvest.py [monitor-iface]"
exit()
conf.iface = sys.argv[1]
print '[*] Monitoring on %s' %(sys.argv[1])
sniff(prn=monitor, store=0)
| {
"repo_name": "catatonicprime/TospoVirus",
"path": "harvest.py",
"copies": "1",
"size": "1187",
"license": "mit",
"hash": -2219019290169201400,
"line_mean": 34.9696969697,
"line_max": 139,
"alpha_frac": 0.6427969671,
"autogenerated": false,
"ratio": 2.9164619164619165,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8981003150818725,
"avg_score": 0.015651146548638216,
"num_lines": 33
} |
__author__ = 'catears'
# Henrik 'catears' Adolfsson
# henad221@student.liu.se
# 2015 - 01 - 15
from collections import defaultdict
from pygame.locals import *
# Mapping of pygame keys to application keynames
# Changing this will result in a different keypress
# for the user but not for the internal application
# (assuming you change a key and not a value)
KEYNAMES = {
K_UP : 'u',
K_DOWN : 'd',
K_LEFT : 'l',
K_RIGHT : 'r',
K_SPACE : 'space',
K_LCTRL : 'switch',
K_0 : 'lshift',
}
class KeyMap:
"""
[description]
A class that keeps track of keypresses
The main program stores all keypresses in an internal dictionary
that is accessed with the values inside KEYNAMES
How a Context interprets the keymap is up to the Context
There are four methods that are used, hold, press, set and unset
[methods]
set($key) - sets the value of $key to True
unset($key) - sets the value of $key to False
hold($key) - returns true if $key has been pressed (is true)
press($key) - returns true if $key has been pressed and sets $key to False
"""
def __init__(self, start=defaultdict(bool)):
self.keymap = start
def hold(self, key):
return self.keymap[key]
def press(self, key):
tmp = self.keymap[key]
self.keymap[key] = False
return tmp
def set(self, key):
self.keymap[key] = True
def unset(self, key):
self.keymap[key] = False
| {
"repo_name": "CatEars/PygameContext",
"path": "keyhandle.py",
"copies": "1",
"size": "1494",
"license": "mit",
"hash": 6631663074209222000,
"line_mean": 24.3220338983,
"line_max": 78,
"alpha_frac": 0.6338688086,
"autogenerated": false,
"ratio": 3.6174334140435835,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9660412499871904,
"avg_score": 0.018177944554335974,
"num_lines": 59
} |
__author__ = 'catears'
# Henrik 'catears' Adolfsson
# henad221@student.liu.se
"""
Example context with a random_polygon
"""
import random
import functools
def make_random_polygon():
rand = lambda: (random.randint(0, g.width), random.randint(0, g.height))
return [rand() for _ in range(0, random.randint(3, 9))]
##########################################
# Exported Context #
##########################################
import pygame
import g
import circle
from context import EmptyContext
class PolygonContext(EmptyContext):
def __init__(self, *args, **kwargs):
self.points = None
self.color = None
super(PolygonContext, self).__init__()
def act(self, keymap):
pygame.draw.polygon(g.screen, self.color, self.points)
if keymap.press('space'):
self.points = make_random_polygon()
if keymap.press('u'):
rand = functools.partial(random.randint, 0, 255)
self.color = (rand(), rand(), rand())
if keymap.press('lshift'):
keymap.set('switch')
return circle.BallContext()
def setup(self, points=make_random_polygon()):
self.points = points
rand = functools.partial(random.randint, 0, 255)
self.color = (rand(), rand(), rand())
| {
"repo_name": "CatEars/PygameContext",
"path": "example_contexts/polygon.py",
"copies": "1",
"size": "1312",
"license": "mit",
"hash": -2786070712827979000,
"line_mean": 25.24,
"line_max": 76,
"alpha_frac": 0.5670731707,
"autogenerated": false,
"ratio": 3.8475073313782993,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9874008111505909,
"avg_score": 0.008114478114478114,
"num_lines": 50
} |
__author__ = 'catears'
# Henrik 'catears' Adolfsson
# henad221@student.liu.se
"""
Example context with balls that move randomly
"""
import random
from context import EmptyContext
def outside_l(circle):
return circle['pos'][0] - circle['r'] <= 0
def outside_r(circle):
return circle['pos'][0] + circle['r'] >= g.width
def outside_u(circle):
return circle['pos'][1] - circle['r'] <= 0
def outside_d(circle):
return circle['pos'][1] + circle['r'] >= g.height
def update_movement(circle):
if outside_l(circle) and circle['movement'][0] < 0:
circle['movement'][0] *= -1
elif outside_r(circle) and circle['movement'][0] > 0:
circle['movement'][0] *= -1
if outside_u(circle) and circle['movement'][1] < 0:
circle['movement'][1] *= -1
elif outside_d(circle) and circle['movement'][1] > 0:
circle['movement'][1] *= -1
def make_circle(a, b, c, e):
return {
'color': a,
'pos': b,
'r': c,
'movement': e
}
def make_random_circle():
movement = [random.randint(-5, 5), random.randint(-5, 5)]
r, gr, b = random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)
radius = random.randint(20, 50)
pos = [random.randint(10, g.width-(radius+10)), random.randint(10, g.height-(radius+10))]
return make_circle((r,gr,b), pos, radius, movement)
##########################################
# Exported Context #
##########################################
import pygame
import g
import polygon
class BallContext(EmptyContext):
def __init__(self, *args, **kwargs):
super(BallContext, self).__init__(*args, **kwargs)
self.circles = [make_random_circle()]
self.background = pygame.Surface(g.size)
self.background.fill(g.background_rgb)
def act(self, keymap):
g.screen.blit(self.background, (0, 0))
for circle in self.circles:
circle['pos'][0] += circle['movement'][0]
circle['pos'][1] += circle['movement'][1]
if keymap.hold('u') and not outside_u(circle):
circle['pos'][1] -= g.push_factor
if keymap.hold('d') and not outside_d(circle):
circle['pos'][1] += g.push_factor
if keymap.hold('l') and not outside_l(circle):
circle['pos'][0] -= g.push_factor
if keymap.hold('r') and not outside_r(circle):
circle['pos'][0] += g.push_factor
update_movement(circle)
pygame.draw.circle(g.screen, circle['color'], circle['pos'], circle['r'])
if keymap.press('space'):
self.circles.append(make_random_circle())
if keymap.press('lshift'):
keymap.set('switch')
return polygon.PolygonContext()
def setup(self):
pass | {
"repo_name": "CatEars/PygameContext",
"path": "example_contexts/circle.py",
"copies": "1",
"size": "2860",
"license": "mit",
"hash": 729906810790811800,
"line_mean": 26.5096153846,
"line_max": 93,
"alpha_frac": 0.5486013986,
"autogenerated": false,
"ratio": 3.4499396863691194,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44985410849691193,
"avg_score": null,
"num_lines": null
} |
__author__ = 'catherine'
if __name__ == "__main__":
try:
from docutils.core import publish_cmdline
from docutils.utils import Reporter
except:
raise NameError("Cannot find `docutils` for the selected interpreter.")
import sys
command = sys.argv[1]
args = sys.argv[2:]
COMMANDS = {"rst2html": "html", "rst2latex": "latex",
"rst2pseudoxml": "pseudoxml", "rst2s5": "s5", "rst2xml": "xml"}
if command == "rst2odt":
from docutils.writers.odf_odt import Writer, Reader
writer = Writer()
reader = Reader()
publish_cmdline(reader=reader, writer=writer, argv=args)
elif command == "rstpep2html":
publish_cmdline(reader_name='pep', writer_name='pep_html', argv=args)
elif command == "rst2html_no_code":
publish_cmdline(writer_name="html",
settings_overrides={'syntax_highlight': 'none'}, argv=args)
else:
publish_cmdline(writer_name=COMMANDS[command],
settings_overrides={'report_level': Reporter.ERROR_LEVEL},
argv=args)
| {
"repo_name": "jwren/intellij-community",
"path": "python/helpers/rest_runners/rst2smth.py",
"copies": "8",
"size": "1126",
"license": "apache-2.0",
"hash": -7414342318307841000,
"line_mean": 34.1875,
"line_max": 83,
"alpha_frac": 0.5905861456,
"autogenerated": false,
"ratio": 3.923344947735192,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8513931093335192,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cauanicastro'
__copyright__ = "Copyright 2015, Cauani Castro"
__credits__ = ["Cauani Castro"]
__license__ = "Apache License 2.0"
__version__ = "1.0"
__maintainer__ = "Cauani Castro"
__email__ = "cauani.castro@hotmail.com"
__status__ = "Examination program"
def calculaRaiz(numero, aproximacoes):
raiz = 0
for i in range(aproximacoes):
if (i == 0):
raiz = numero / 2
else:
raiz = (raiz**2+numero) / (2 * raiz)
return "Num = %.5f Aprox = %d Raiz Quadrada = %.10f\n" % (numero, aproximacoes, raiz)
def main():
print("Este programa ira calcular a raiz quadrada de uma sequencia de numeros positivos, baseado no metodo de aproximacoes sucessivas de newton.")
print("Para sair do programa digite um numero menor ou igual a zero.")
while True:
numero = float(input("Digite um numero (real, positivo) para calcular a sua raiz quadrada:\n"))
if numero <= 0:
break
aproximacoes = int(input("Digite o numero (inteiro) de aproximacoes desejada:\n"))
print(calculaRaiz(numero, aproximacoes))
print("\n#####################################")
print(" FIM DO PROGRAMA")
print("#####################################")
if __name__ == '__main__':
main() | {
"repo_name": "cauanicastro/Prog1Ifes",
"path": "atividade4.py",
"copies": "1",
"size": "1285",
"license": "apache-2.0",
"hash": 788944542810382800,
"line_mean": 37.9696969697,
"line_max": 150,
"alpha_frac": 0.5789883268,
"autogenerated": false,
"ratio": 2.9337899543378994,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.896969503579196,
"avg_score": 0.008616649069187701,
"num_lines": 33
} |
__author__ = "Cauani Castro"
__copyright__ = "Copyright 2015, Cauani Castro"
__credits__ = ["Cauani Castro"]
__license__ = "Apache License 2.0"
__version__ = "1.0"
__maintainer__ = "Cauani Castro"
__email__ = "cauani.castro@hotmail.com"
__status__ = "Examination program"
def ExibeEstatisticas():
print("##################################")
print(" REPORT DA FABRICA")
print("##################################")
print("Lista de operarios:\n")
print(listaOperarios)
print("A folha mensal de pagamento da fabrica e de: R$ %.2f" % totalFolhaPagamento)
print("A fabricacao mensal foi de %d pecas" % totalPecasFabricadas)
if operariosHomensA > 0:
aux = pecasFabricadasHomemA / operariosHomensA
print("A media de fabricacao de pecas pelos homens da classe A foi de %.2f pecas" % aux)
if operariosHomensB > 0:
aux = pecasFabricadasHomemB / operariosHomensB
print("A media de fabricacao de pecas pelos homens da classe B foi de %.2f pecas" % aux)
if operariosHomensC > 0:
aux = pecasFabricadasHomemC / operariosHomensC
print("A media de fabricacao de pecas pelos homens da classe C foi de %.2f pecas" % aux)
if operariosMulherA > 0:
aux = pecasFabricadasMulherA / operariosMulherA
print("A media de fabricacao de pecas pelas mulheres da classe A foi de %.2f pecas" % aux)
if operariosMulherB > 0:
aux = pecasFabricadasMulherB / operariosMulherB
print("A media de fabricacao de pecas pelas mulheres da classe B foi de %.2f pecas" % aux)
if operariosMulherC > 0:
aux = pecasFabricadasMulherC / operariosMulherC
print("A media de fabricacao de pecas pelas mulheres da classe C foi de %.2f pecas" % aux)
print("O operario de maior salario foi o operario de numero %d, com um salario de R$ %.2f" % (operarioMaiorSalarioNumero, operarioMaiorSalario))
return True
def ProcessaOperario(numero, pecas, sexo):
#declaracao de uso de variaveis globais
global listaOperarios
global totalFolhaPagamento
global totalPecasFabricadas
global operariosHomensA
global operariosHomensB
global operariosHomensC
global operariosMulherA
global operariosMulherB
global operariosMulherC
global pecasFabricadasHomemA
global pecasFabricadasHomemB
global pecasFabricadasHomemC
global pecasFabricadasMulherA
global pecasFabricadasMulherB
global pecasFabricadasMulherC
global operarioMaiorSalario
global operarioMaiorSalarioNumero
global salarioMinimo
#calcula salario e classe
salario = salarioMinimo
classe = "A"
if pecas > 30 and pecas <= 35:
salario += (pecas - 30) * ((3 * salarioMinimo) / 100.00)
classe = "B"
elif pecas > 35:
salario += (pecas - 30) * ((5 * salarioMinimo) / 100.00)
classe = "C"
#atualiza lista e total de pg e pecas fabricadas
listaOperarios += "Operario %d - R$ %.2f\n" % (numero, salario)
totalFolhaPagamento += salario
totalPecasFabricadas += pecas
#verifica maior salario
if operarioMaiorSalario <= salario:
operarioMaiorSalarioNumero = numero
operarioMaiorSalario = salario
#atualiza lista de pecas por sexo e classe
if classe == "A":
if (sexo == "m" or sexo == "M"):
operariosHomensA += 1
pecasFabricadasHomemA += pecas
else:
operariosMulherA += 1
pecasFabricadasMulherA += pecas
elif classe == "B":
if (sexo == "m" or sexo == "M"):
operariosHomensB += 1
pecasFabricadasHomemB += pecas
else:
operariosMulherB += 1
pecasFabricadasMulherB += pecas
elif classe == "C":
if (sexo == "m" or sexo == "M"):
operariosHomensC += 1
pecasFabricadasHomemC += pecas
else:
operariosMulherC += 1
pecasFabricadasMulherC += pecas
#fim
return True
def main():
print("Este programa ira ler uma lista de funcionarios e imprimir os dados em seguida.")
print("Entre com os dados do funcionario conforme solicitado. Para sair do modo de insercao de funcionarios e exibir os dados, digite 0 para o numero do operario")
while True:
numero = int(input("Entre com o numero do operario (ex.: 001)\n"))
if numero == 0:
break
pecas = int(input("Entre com o numero de pecas fabricada pelo operario (ex.: 32)\n"))
sexo = " "
sexo = input("Entre com o sexo do operario (M ou F) \n")
ProcessaOperario(numero, pecas, sexo)
ExibeEstatisticas()
return True
#Declaracao de variaveis
salarioMinimo = 2000.00
listaOperarios = ""
totalFolhaPagamento = 0
totalPecasFabricadas = 0
operariosHomensA = 0
operariosHomensB = 0
operariosHomensC = 0
operariosMulherA = 0
operariosMulherB = 0
operariosMulherC = 0
pecasFabricadasHomemA = 0
pecasFabricadasHomemB = 0
pecasFabricadasHomemC = 0
pecasFabricadasMulherA = 0
pecasFabricadasMulherB = 0
pecasFabricadasMulherC = 0
operarioMaiorSalario = 0
operarioMaiorSalarioNumero = 0
if __name__ == '__main__':
main()
| {
"repo_name": "cauanicastro/Prog1Ifes",
"path": "atividade3.py",
"copies": "1",
"size": "5150",
"license": "apache-2.0",
"hash": 9191986900468464000,
"line_mean": 34.7638888889,
"line_max": 167,
"alpha_frac": 0.6576699029,
"autogenerated": false,
"ratio": 2.9211571185479297,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40788270214479294,
"avg_score": null,
"num_lines": null
} |
__author__ = "cauanicastro"
__copyright__ = "Copyright 2016, cauanicastro"
__credits__ = ["Cauani Castro"]
__license__ = "Apache License 2.0"
__version__ = "1.0"
__created_on__ = "16-04-13"
__maintainer__ = "cauanicastro"
__email__ = "cauani.castro@hotmail.com"
def contaPalavras(linha):
aux = ""
separadores = ['-']
palavras = 0
for letra in linha:
if letra.isalpha() or (aux and letra in separadores):
aux += letra
else:
if aux:
palavras += 1
aux = ""
if aux:
palavras += 1
return palavras
def separaPal(texto):
lista = []
#diferentes teclados podem ter diferentes inputs aceitos como "aspas". Incluindo todos eles.
aspas = ["'", '"', "`"]
aux = ""
citacao = False
for i in texto:
if not i.isalnum():
if citacao:
if i == citacao:
aux += i
citacao = False
lista.append(aux)
aux = ""
else:
aux += i
else:
if aux:
lista.append(aux)
aux = ""
if i in aspas:
citacao = i
aux = i
else:
aux += i
if aux:
lista.append(aux)
return lista
def extraiTokens(texto):
auxLista = []
auxToken = ""
auxTipo = ""
for l in texto:
tipo = checaCampo(l)
if (not tipo) or (auxTipo and tipo != auxTipo):
if auxToken:
auxLista.append(auxToken)
if (not l.isspace()):
auxToken = l
else:
auxToken = ""
else:
auxToken += l
auxTipo = tipo
if auxToken:
auxLista.append(auxToken)
return auxLista
'''
def abreArquivoLista(nome):
arquivo = open(nome, 'rt')
return [linha.strip() for linha in arquivo]
'''
def abreArquivoLista(nome):
listRetorno = []
with open(nome, 'rt') as arquivo:
aux = arquivo.readline()
while aux:
listRetorno.append(aux.strip())
aux = arquivo.readline()
return listRetorno or False
def abreListaArquivo(nomes):
return [open(nome, 'rt') for nome in nomes]
def abreArquivoCallback(nome, callback):
with open(nome, 'rt') as arquivo:
return [callback(linha.strip()) for linha in arquivo]
def salvaArquivo(nome, dados):
with open(nome, 'wt') as arquivo:
arquivo.write("\n".join(dados))
return arquivo.close()
def reverteLinha(linha):
return "".join([l for l in linha[::-1]])
def coparq(nome, novo_nome):
return salvaArquivo(novo_nome, abreArquivoLista(nome))
def concatarq(arquivos):
aux = []
for index in range(len(arquivos)):
arquivo = arquivos[index]
if index == len(arquivos) - 1:
salvaArquivo(arquivo, aux)
break
aux.extend(abreArquivoLista(arquivo))
return True
def interarq(arquivos):
aux = []
novaLista = []
maxEl = 0
for index in range(len(arquivos)):
arquivo = arquivos[index]
if index == len(arquivos) - 1:
for i in range(maxEl):
for a in aux:
if i >= len(a):
continue
novaLista.append(a[i])
salvaArquivo(arquivo, novaLista)
break
arquivoAux = abreArquivoLista(arquivo)
maxEl = maxEl > len(arquivoAux) and maxEl or len(arquivoAux)
aux.append(abreArquivoLista(arquivo))
return True
def interarq_noaux(arquivos):
novaLista = []
nome_arquivo = arquivos[-1]
arquivos = abreListaArquivo(arquivos[:-1])
cont = 0
while arquivos:
if cont >= len(arquivos):
cont = 0
el = arquivos[cont]
aux = el.readline()
if aux:
novaLista.append(aux.strip())
else:
del arquivos[cont]
cont += 1
return salvaArquivo(nome_arquivo, novaLista)
def separaPalavraCompara(linha, valor):
aux = ""
separadores = ['-']
palavras = 0
for letra in linha:
if letra.isalpha() or (aux and letra in separadores):
aux += letra
else:
if aux and aux == valor:
palavras += 1
aux = ""
if aux and aux == valor:
palavras += 1
return palavras
def separaPalavraDicionario(linha, dicionario):
aux = ""
separadores = ['-']
for letra in linha:
if letra.isalpha() or (aux and letra in separadores):
aux += letra
else:
if aux and len(aux) > 2:
dicionario[aux] = aux in dicionario and dicionario[aux] + 1 or 1
aux = ""
if aux and len(aux) > 2:
dicionario[aux] = aux in dicionario and dicionario[aux] + 1 or 1
return dicionario
def geraDicionario(arquivo):
dicionario = dict()
with open(arquivo, 'rt') as arq:
linha = arq.readline()
while linha:
aux = separaPalavraDicionario(linha.strip().lower(), dicionario)
linha = arq.readline()
return dicionario
def comparaDicionarios(dic1, dic2):
listaRetorno = []
for chave in dic1:
if chave in dic2:
listaRetorno.append((chave, (dic1[chave] + dic2[chave]) // 2))
return listaRetorno
def intersec(arquivo1, arquivo2):
dic1 = geraDicionario(arquivo1)
dic2 = geraDicionario(arquivo2)
return comparaDicionarios(dic1,dic2)
def ordenaCrescente(lista):
nova_lista = []
for l in lista:
pos = 0
for nl in nova_lista:
if l[1] <= nl[1]:
break
pos += 1
nova_lista.insert(pos, l)
return nova_lista
def ordenaCrescenteDividirConquistar(lista):
nova_lista = []
for l in lista:
pos = 0
for nl in nova_lista:
if l[1] <= nl[1]:
break
pos += 1
nova_lista.insert(pos, l)
return nova_lista
| {
"repo_name": "cauanicastro/Prog1Ifes",
"path": "biblioteca.py",
"copies": "1",
"size": "6091",
"license": "apache-2.0",
"hash": 5601128984822846000,
"line_mean": 24.2738589212,
"line_max": 96,
"alpha_frac": 0.5319323592,
"autogenerated": false,
"ratio": 3.226165254237288,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.922393622616882,
"avg_score": 0.006832277453693786,
"num_lines": 241
} |
__author__ = 'cbdasg'
import requests
from requests_oauthlib import OAuth1
import hmac
from hashlib import sha1
def fix_signature_url_chars(str):
str = str.replace(":", "%3A")
str = str.replace("/", "%2F")
str = str.replace("=", "%3D")
str = str.replace("&", "%26")
return str
def fix_request_url_chars(str):
str = str.replace("+", "%2B")
return str
def send_request(key, signature_fields, request_fields, payload_values):
cmd = "GET"
url = "http://pinkyscreativerecipes.com/recipes/oauth1/request"
raw = cmd + "&" + fix_signature_url_chars(url) + "&"
prefix = ""
for signature_field in signature_fields:
raw += fix_signature_url_chars(prefix + signature_field + "=" + payload_values[signature_field])
prefix = "&"
hashed=hmac.new(key, raw, sha1)
payload_values["oauth_signature"] = fix_request_url_chars(hashed.digest().encode("base64").rstrip("\n"))
req_txt = url
prefix = "?"
for request_field in request_fields:
req_txt += (prefix + request_field + "=" + payload_values[request_field])
prefix = "&"
print req_txt
r = requests.get("http://pinkyscreativerecipes.com/recipes/oauth1/request", params = payload_values)
print str(r.text)
signature_fields_1 = ["oauth_consumer_key", "oauth_nonce", "oauth_signature_method",
"oauth_timestamp", "oauth_version"]
request_fields_1 = ["oauth_consumer_key", "oauth_signature_method", "oauth_timestamp",
"oauth_nonce", "oauth_version", "oauth_signature"]
payload_values_1 = {"oauth_consumer_key" : "zTOCIM1R772I" , "oauth_signature_method" : "HMAC-SHA1",
"oauth_timestamp" : "1499648039", "oauth_nonce" : "D7LCfBJGJxM", "oauth_version": "1.0",
"oauth_signature" : ""}
key = "qZurqPPfuD1yl4DEzAXj3cCyvLMWPblUWLeJprbJtHonHeRt&"
send_request(key, signature_fields_1, request_fields_1, payload_values_1)++++
| {
"repo_name": "unchaoss/unchaoss",
"path": "engine/py/wordpressops/wordpressops.py",
"copies": "2",
"size": "1920",
"license": "apache-2.0",
"hash": 9090684381935403000,
"line_mean": 34.5555555556,
"line_max": 108,
"alpha_frac": 0.6427083333,
"autogenerated": false,
"ratio": 3.1423895253682486,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9657406884106534,
"avg_score": 0.02553819491234286,
"num_lines": 54
} |
__author__ = 'cbdasg'
import json
#=============================== SINLGE KEY ENCRYPT/DECRYPT ====================================
from Crypto.Cipher import AES
import base64, os
#======= SINGLE KEY ENCRYPT/DECRYPT (https://gist.github.com/syedrakib/d71c463fc61852b8d366) ==========
class singleKeCryptDecrypt:
# Inspired from https://pythonprogramming.net/encryption-and-decryption-in-python-code-example-with-explanation/
# PyCrypto docs available at https://www.dlitz.net/software/pycrypto/api/2.6/
def __init__(self, master_password = None, padding_character = ' '):
self.padding_character = padding_character
# AES key length must be either 16, 24, or 32 bytes long
AES_key_length = 16 # use larger value in production
if master_password == None:
# generate a random secret key with the decided key length
# this secret key will be used to create AES cipher for encryption/decryption
secret_key = os.urandom(AES_key_length)
else:
# Pad the ley to the correct length
secret_key = master_password
key_length = len(master_password)
while key_length < AES_key_length:
secret_key += padding_character
key_length += 1
# encode this secret key for storing safely in database
self.encoded_secret_key = base64.b64encode(secret_key)
def get_key(self):
return self.encoded_secret_key
def encrypt_message(self, private_msg):
# decode the encoded secret key
secret_key = base64.b64decode(self.encoded_secret_key)
# use the decoded secret key to create a AES cipher
cipher = AES.new(secret_key)
# pad the private_msg
# because AES encryption requires the length of the msg to be a multiple of 16
padded_private_msg = private_msg + (self.padding_character * ((16-len(private_msg)) % 16))
# use the cipher to encrypt the padded message
encrypted_msg = cipher.encrypt(padded_private_msg)
# encode the encrypted msg for storing safely in the database
encoded_encrypted_msg = base64.b64encode(encrypted_msg)
# return encoded encrypted message
return encoded_encrypted_msg
def decrypt_message(self, encoded_encrypted_msg):
# decode the encoded encrypted message and encoded secret key
secret_key = base64.b64decode(self.encoded_secret_key)
encrypted_msg = base64.b64decode(encoded_encrypted_msg)
# use the decoded secret key to create a AES cipher
cipher = AES.new(secret_key)
# use the cipher to decrypt the encrypted message
decrypted_msg = cipher.decrypt(encrypted_msg)
# unpad the encrypted message
unpadded_private_msg = decrypted_msg.rstrip(self.padding_character)
# return a decrypted original private message
return unpadded_private_msg
def single_key_demo():
private_msg = """
Lorem ipsum dolor sit amet, malis recteque posidonium ea sit, te vis meliore verterem. Duis movet comprehensam eam ex, te mea possim luptatum gloriatur. Modus summo epicuri eu nec. Ex placerat complectitur eos.
"""
single_key_crypt_decrypt = singleKeCryptDecrypt()
encrypted_msg = single_key_crypt_decrypt.encrypt_message(private_msg)
decrypted_msg = single_key_crypt_decrypt.decrypt_message(encrypted_msg)
secret_key = single_key_crypt_decrypt.get_key()
print(" Secret Key: %s - (%d)" % (secret_key, len(secret_key)))
print("Encrypted Msg: %s - (%d)" % (encrypted_msg, len(encrypted_msg)))
print("Decrypted Msg: %s - (%d)" % (decrypted_msg, len(decrypted_msg)))
#=============================== PUB/PVT ENCRYPT/DECRYPT ====================================
from Crypto import Random
from Crypto.PublicKey import RSA
import base64
#======= PUB/PVT ENCRYPT/DECRYPT (https://gist.github.com/syedrakib/241b68f5aeaefd7ef8e2) ==========
class pubPvtCryptDecrypt:
# Inspired from http://coding4streetcred.com/blog/post/Asymmetric-Encryption-Revisited-(in-PyCrypto)
# PyCrypto docs available at https://www.dlitz.net/software/pycrypto/api/2.6/
def __init__(self):
# RSA modulus length must be a multiple of 256 and >= 1024
modulus_length = 256*4 # use larger value in production
self.privatekey = RSA.generate(modulus_length, Random.new().read)
self.publickey = self.privatekey.publickey()
def get_keys(self):
return (self.privatekey, self.publickey)
def encrypt_message(self, a_message):
encrypted_msg = self.publickey.encrypt(a_message, 32)[0]
encoded_encrypted_msg = base64.b64encode(encrypted_msg) # base64 encoded strings are database friendly
return encoded_encrypted_msg
def decrypt_message(self, encoded_encrypted_msg):
decoded_encrypted_msg = base64.b64decode(encoded_encrypted_msg)
decoded_decrypted_msg = self.privatekey.decrypt(decoded_encrypted_msg)
return decoded_decrypted_msg
def pub_pvt_demo():
pub_pvt_crypt_decrypt = pubPvtCryptDecrypt()
a_message = "The quick brown fox jumped over the lazy dog"
encrypted_msg = pub_pvt_crypt_decrypt.encrypt_message(a_message)
decrypted_msg = pub_pvt_crypt_decrypt.decrypt_message(encrypted_msg)
(privatekey, publickey) = pub_pvt_crypt_decrypt.get_keys()
print("%s - (%d)" % (privatekey.exportKey() , len(privatekey.exportKey())))
print("%s - (%d)" % (publickey.exportKey() , len(publickey.exportKey())))
print(" Original content: %s - (%d)" % (a_message, len(a_message)))
print("Encrypted message: %s - (%d)" % (encrypted_msg, len(encrypted_msg)))
print("Decrypted message: %s - (%d)" % (decrypted_msg, len(decrypted_msg)))
#======= JSON FILE ENCRYPT/DECRYPT ==========
import json
# Replaces specifed values in a JSON file with their encrypted values. The keys list
# is used to locate the values to replace. A key of the form key1__key2 means
# key1 is a dict containing key2 as a key (and so on with __key3 etc). Encrypt
# is done using the supplied master key
def encrypt_json(in_json_file, out_json_file, master_key, encrypt_keys_list):
single_key_crypt_decrypt = singleKeCryptDecrypt(master_key)
with open(in_json_file) as fd:
json_to_encrypt = json.load(fd)
for key_to_encrypt_expr in encrypt_keys_list:
jsn = json_to_encrypt
keys_list_to_encrypt = key_to_encrypt_expr.split("__")
for index in range(len(keys_list_to_encrypt)):
key_to_encrypt = keys_list_to_encrypt[index]
print("key " + key_to_encrypt + " jsn " + str(jsn))
if key_to_encrypt not in jsn:
return None
if index != len(keys_list_to_encrypt) - 1:
jsn = jsn[key_to_encrypt]
jsn[key_to_encrypt] =\
single_key_crypt_decrypt.encrypt_message(jsn[key_to_encrypt])
with open(out_json_file, "w") as fd:
json.dump(json_to_encrypt, fd)
# Replaces specifed values in a JSON file with their decrypted values. The keys list
# is used to locate the values to replace. A key of the form key1__key2 means
# key1 is a dict containing key2 as a key (and so on with __key3 etc). Decrypt
# is done using the supplied master key
def decrypt_json(in_json_file, out_json_file, master_key, decrypt_keys_list):
single_key_crypt_decrypt = singleKeCryptDecrypt(master_key)
with open(in_json_file) as fd:
json_to_decrypt = json.load(fd)
for key_to_decrypt_expr in decrypt_keys_list:
jsn = json_to_decrypt
keys_list_to_decrypt = key_to_decrypt_expr.split("__")
for index in range(len(keys_list_to_decrypt)):
key_to_decrypt = keys_list_to_decrypt[index]
if key_to_decrypt not in jsn:
return None
if index != len(keys_list_to_decrypt) - 1:
jsn = jsn[key_to_decrypt]
jsn[key_to_decrypt] =\
single_key_crypt_decrypt.decrypt_message(jsn[key_to_decrypt])
with open(out_json_file, "w") as fd:
json.dump(json_to_decrypt, fd)
if __name__ == "__main__":
exit(0)
single_key_demo()
pub_pvt_demo()
| {
"repo_name": "unchaoss/unchaoss",
"path": "engine/py/core/util.py",
"copies": "1",
"size": "8185",
"license": "apache-2.0",
"hash": 813533174354395500,
"line_mean": 43.0053763441,
"line_max": 215,
"alpha_frac": 0.6536346976,
"autogenerated": false,
"ratio": 3.5143838557320737,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9641001542035055,
"avg_score": 0.005403402259403913,
"num_lines": 186
} |
__author__ = 'cbdasg'
import os
import util
def get_credentials_base():
home_dir = os.path.expanduser('~')
credentials_base = os.path.join(home_dir, '.credentials')
if not os.path.exists(credentials_base):
os.makedirs(credentials_base)
return credentials_base
# UNCHAOSS expects the same master key to be used on every run but does not save
# the key itself as a security measure. The key must be externally provided
# (entered by a user using getpass or a through a web page displayed by a
# locally running server). After entry, and as a precaution against typos,
# there is a one time manual step where the master key is manually encrypted
# using itself (i.e. the master key is# both the text to encrypt and the
# encryption key) and saved in a file used by the code to verifies user entries.
# The class singleKeCryptDecrypt can be used for this manual step
def cross_check_master_key(candidate):
master_password_cross_check_file = os.path.join(get_credentials_base(), "master_password_cross_check")
with open(master_password_cross_check_file) as fd:
master_password_cross_check_value = fd.readlines()[0].strip()
single_key_crypt_decrypt = util.singleKeCryptDecrypt(candidate)
encrypted_msg = single_key_crypt_decrypt.encrypt_message(candidate)
if encrypted_msg != master_password_cross_check_value:
return False
return True
| {
"repo_name": "unchaoss/unchaoss",
"path": "engine/py/core/core.py",
"copies": "1",
"size": "1402",
"license": "apache-2.0",
"hash": 634879932728616400,
"line_mean": 40.2352941176,
"line_max": 106,
"alpha_frac": 0.7368045649,
"autogenerated": false,
"ratio": 3.728723404255319,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4965527969155319,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cb'
import threading
import socket
import re
import traceback
import logging
logger = logging.getLogger(__name__)
class SyslogServer(threading.Thread):
def __init__(self, syslog_port, worker_queue):
self.syslog_port = syslog_port
self.worker_queue = worker_queue
self.format_string = \
re.compile('src=(\d+.\d+.\d+.\d+) .*rewrite (?=.{1,254}$)((?:(?!\d+\.|-)[a-zA-Z0-9_\-]{1,63}(?<!-)\.)+(?:[a-zA-Z]{2,}))')
threading.Thread.__init__(self)
def run(self):
try:
syslog_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
listen_addr = ("", self.syslog_port)
syslog_socket.bind(listen_addr)
while True:
data, addr = syslog_socket.recvfrom(2048)
data = data.strip()
hit = self.format_string.search(data)
if hit:
self.worker_queue.put((hit.group(1), hit.group(2)))
except:
logger.error('%s' % traceback.format_exc())
# class TestSyslogServer(threading.Thread):
# def __init__(self, syslog_port, worker_queue):
# self.syslog_port = syslog_port
# self.worker_queue = worker_queue
# threading.Thread.__init__(self)
#
# def run(self):
# try:
# syslog_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#
# listen_addr = ("", self.syslog_port)
# syslog_socket.bind(listen_addr)
#
# while True:
# data, addr = syslog_socket.recvfrom(2048)
# logger.info(data)
# data = data.strip().split(" ")
# logger.info(data)
# hit = True
# if hit:
# self.worker_queue.put((data[0], data[1]))
# except:
# logger.error('%s' % traceback.format_exc())
| {
"repo_name": "carbonblack/cb-infoblox-connector",
"path": "cbinfoblox/syslog_server.py",
"copies": "1",
"size": "1895",
"license": "mit",
"hash": -7851317657341869000,
"line_mean": 31.6724137931,
"line_max": 133,
"alpha_frac": 0.5192612137,
"autogenerated": false,
"ratio": 3.515769944341373,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45350311580413727,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cbn'
import json
from re import match
from flask import Flask, render_template, redirect, request, url_for
from redis import StrictRedis
import colorsys
import cooperhewitt.swatchbook as sb
from flask_paginate import Pagination
from random import choice
app = Flask(__name__)
app.redis = StrictRedis()
app.palette = sb.load_palette('css3')
@app.route("/")
def browse():
return render_template("home.html")
@app.route("/<color>")
@app.route("/<color>/page/<int:page>")
def single_color(color, page=1, per_page=50 ):
if not match("[0-9a-fA-F]{6}$", color):
try:
color = app.palette.hex(color).strip("#")
except:
return redirect(url_for("nope"))
color = "#" + color
total = app.redis.zcount(color,0,100)
if not total:
color,name = app.palette.closest(color)
startAt = (page -1) * per_page
records = app.redis.zrevrange(color,startAt, startAt + per_page -1)
name = app.palette.name(color)
pagination_href="/" + color.strip("#") + "/page/{0}"
pagination = Pagination(page=page, total=total, per_page=per_page, href=pagination_href, bs_version=3 )
return render_template("color.j2", records=[json.loads(record) for record in records ],
hex=color.strip("#"),name=name, pagination=pagination, orig_color=request.args.get("color",None))
@app.route("/color")
def redirect_param_to_color():
color = request.args.get('color','')
if color:
return redirect(url_for('single_color',color=color))
else:
return redirect(url_for('nope'))
@app.route("/colors")
def list_available_colors():
stored_colors = app.redis.keys("#*")
stored_colors.sort(key=hex_to_hsv)
palette = sb.load_palette('css3')
hex_and_name = [[hex.strip("#"), palette.name(hex)] for hex in stored_colors ]
return json.dumps(hex_and_name)
@app.route("/color/<color>")
@app.route("/color/<color>/page/<int:page>")
def images_for_a_color(color, page=0, per_page=50):
if not match("[0-9a-fA-F]{6}$", color):
return json.dumps({"error": "Not a valid hex color"}), 500
startAt = page * per_page
# can I do the offsets in redis itself and still maintain order?
records = app.redis.zrevrangebyscore("#" + color,100,0)[startAt:startAt + per_page]
return json.dumps([json.loads(record) for record in records]), 200
@app.route("/bubble")
def bubble_viz():
count = {}
count['children'] = [
{
'hex' : hex,
'name': app.pallette.name(hex),
'count': app.redis.zcount(hex, 0,100)
} for hex in app.redis.keys("#*")
]
return render_template('bubble.j2', count=json.dumps(count))
@app.route("/nope")
def nope():
return render_template('nope.j2')
@app.route("/rando-colrissian")
def random_color():
colors = app.redis.keys("#*")
color = choice(colors).strip("#")
return redirect(url_for('single_color',color=color))
def hex_to_hsv(color):
color = color.strip("#")
r,g,b = (int(color[i:i+2], 16) / 255.0 for i in xrange(0,5,2))
return colorsys.rgb_to_hsv(r,g,b)
if __name__ == '__main__':
app.run(debug=True)
| {
"repo_name": "bibliotechy/identify-by-color",
"path": "server.py",
"copies": "1",
"size": "3211",
"license": "mit",
"hash": 5352088659130418000,
"line_mean": 27.9279279279,
"line_max": 124,
"alpha_frac": 0.6194331984,
"autogenerated": false,
"ratio": 3.1823587710604557,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43017919694604556,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cbryce'
__license__ = 'Apache2'
__date__ = '20150409'
__version__ = '0.00'
"""
Fuzzy-sansa - an Open Source Facial Recognition Tool Maybe
Copyright 2015 Chapin Bryce
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import cv2
# Following guide from : https://realpython.com/blog/python/face-recognition-with-python/
def check_versions():
if np.__version__ < "1.9.2" :
return False
if cv2.__version__ < "2.4.10":
return False
return True
def img(fin):
casc = 'xml/haarcascade_frontalface_default.xml'
faceCascade = cv2.CascadeClassifier(casc)
# Read in image
image = cv2.imread(fin)
# Most libs work best in grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#Detect faces in image
faces = faceCascade.detectMultiScale( # detectMultiScale is a general function to detect objects based on casc
gray, # Hand gray image to process
scaleFactor=1.1, # Handles size of faces, since some may be closer/further
minNeighbors=9, # States minimum number of objects needed before the face is found
minSize=(30,30), # size of box to draw on face
flags=cv2.cv.CV_HAAR_SCALE_IMAGE # Unsure....
)
print "Found {0} faces!".format(len(faces))
for (x, y, w, h) in faces:
"""
x: X position of rectangle
y: Y position of rectangle
w: Width of rectangle
h: Height of rectangle
"""
cv2.rectangle( # draws the rectangles around each face
image, # base image
(x,y), # starting coordinates
(x+w, y+h), # other 2 coordinates
(0, 255, 0), # maybe color? aka green if so
2) # no idea...maybe width of square?
cv2.imshow("Faces found", image)
cv2.waitKey(0)
cv2.imwrite(fin+'_out.jpg', image)
def vid(fin):
video_capture = cv2.VideoCapture(0)
casc = 'xml/haarcascade_frontalface_default.xml'
faceCascade = cv2.CascadeClassifier(casc)
if not video_capture.isOpened():
raise "Error opening video"
while True:
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.cv.CV_HAAR_SCALE_IMAGE
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
# Display the resulting frame
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
def img_match(f1, f2):
from matplotlib import pyplot as plt
img1 = cv2.imread(f1, 0) # Query Image
img2 = cv2.imread(f2, 0) # Training Image
# Initiate SIFT detector
orb = cv2.ORB()
# find the keypoints and descriptors with SIFT
kp1, des1 = orb.detectAndCompute(img1, None)
kp2, des2 = orb.detectAndCompute(img2, None)
# create BFMatcher object
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
# Match descriptors.
matches = bf.match(des1, des2)
# Sort them in the order of their distance.
matches = sorted(matches, key=lambda x: x.distance)
# Draw first 10 matches.
img3 = cv2.drawMatches(img1, kp1, img2, kp2, matches[:10], flags=2)
plt.imshow(img3), plt.show()
if __name__ == '__main__':
if check_versions():
import os
# for r, d, f in os.walk('img'):
# for entry in f:
# if not entry.endswith('_out.jpg'):
# img(r+'/'+entry)
# vid('mp4/1.mp4')
img_match('img/7.png', 'img/4.png') | {
"repo_name": "chapinb/fuzzy-sansa",
"path": "fuzzy-sansa.py",
"copies": "1",
"size": "4373",
"license": "apache-2.0",
"hash": -2060431087658788000,
"line_mean": 28.355704698,
"line_max": 114,
"alpha_frac": 0.6178824606,
"autogenerated": false,
"ratio": 3.382057231245166,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4499939691845166,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cbryce'
import os
import logging
class CollectorBase(object):
"""
Base Class for all collectors, allowing them to share the collection methods
"""
def __init__(self):
self.targ = ''
self.dest = ''
self.case = ''
self.eid = ''
self.target_user = ''
self.extensions = ''
self.ext_for_users = False
self.hashtype = ''
def _hasher(self, data):
import hashlib
if self.hashtype == 'sha1':
h = hashlib.sha1()
elif self.hashtype == 'sha256':
h = hashlib.sha256()
elif self.hashtype == 'sha512':
h = hashlib.sha512()
elif self.hashtype == 'md5':
h = hashlib.md5()
else:
quit()
h.update(data)
return h.hexdigest()
def _tarball(self, files):
"""
Compresses file and returns the file path to the tar file
"""
import tarfile
import datetime
import progressbar
pbar = progressbar.ProgressBar(maxval=len(files), widgets=[progressbar.SimpleProgress(), ' Users Complete ',
progressbar.Bar(), ' ',
progressbar.Percentage(), ' ',
progressbar.ETA()])
pbar.start()
hashlog = open(self.dest + '/hashlist.txt', 'w')
hashlog.write('Time'.ljust(31) + self.hashtype.upper().ljust(45) + 'File Path\n')
try:
temp_tar = tarfile.open(self.dest + '/' + self.eid + '.tar', mode='w', dereference=False)
p = 0
for entry in files:
p += 1
pbar.update(p)
if os.path.isdir(entry):
for root, dirs, file_entries in os.walk(entry):
for f in file_entries:
fname = os.path.join(root, f)
try:
hasher = self._hasher(open(fname, 'rb').read())
except IOError, e:
hasher = 'Could Not Process'
hashlog.write(str(datetime.datetime.now()).ljust(31) + str(hasher).ljust(45)
+ fname + '\n')
if not os.path.islink(fname):
temp_tar.add(fname)
else:
logging.warning('Link File excluded: ' + fname)
elif os.path.isfile(entry):
try:
hasher = self._hasher(open(entry, 'rb').read())
except IOError, e:
hasher = 'Could Not Process'
hashlog.write(str(datetime.datetime.now()).ljust(31) + str(hasher).ljust(45)
+ entry + '\n')
temp_tar.add(entry)
pbar.finish()
hashlog.close()
temp_tar.close()
except IOError, e:
import datetime
self.eid = str(datetime.datetime.now().strftime('%y-%m-%d_%H-%M-%S'))
logging.info('Tar File IO Error; Likely open. Writing to: ' + self.eid)
self._tarball(files)
return self.dest + '/' + self.eid + '.tar'
| {
"repo_name": "lcdi/LCDIC",
"path": "collectors/base.py",
"copies": "1",
"size": "3466",
"license": "mit",
"hash": 8031039916063307000,
"line_mean": 34.3673469388,
"line_max": 117,
"alpha_frac": 0.436237738,
"autogenerated": false,
"ratio": 4.572559366754618,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0017026995836470894,
"num_lines": 98
} |
__author__ = 'cbryce'
import os
from .base import CollectorBase
class Ubuntu13(CollectorBase):
"""
Collect data from Ubuntu 13
"""
def __init__(self):
super(Ubuntu13, self).__init__()
self.name = 'Ubuntu 13 Collector'
self.description = 'Collection of artifacts found in Ubuntu 13'
self.user_path = '/home/'
self.app_data_location = '/.local'
self.users = []
self.etc = '/etc/'
self.var_log = '/var/log/'
def setup(self):
self.user_path = self.targ + self.user_path
self.users = []
self.etc = self.targ + '/etc/'
self.var_log = self.targ + '/var/log/'
def collector(self):
user = self.collect_user_data().values()
config = self.collect_config_data()
paths = []
for i in user:
paths.append(i)
for i in config:
paths.append(i)
return paths
def collect_user_data(self):
"""
Browse all accounts and collect .local
:return: list of dictionary of users and the .local paths
"""
# Collect all user names
# TODO: Allow specification of specific user name(s)
user_dict = {}
for user in os.listdir(self.user_path):
if os.path.isdir(self.user_path + user):
user_dict[user] = self.user_path + user + self.app_data_location
self.users = user_dict.keys()
return user_dict
def collect_config_data(self):
return [self.etc, self.var_log]
def complete_collection(self, paths):
self._tarball(paths) | {
"repo_name": "lcdi/LCDIC",
"path": "collectors/debian.py",
"copies": "1",
"size": "1626",
"license": "mit",
"hash": -2425606261789310000,
"line_mean": 23.6515151515,
"line_max": 80,
"alpha_frac": 0.5590405904,
"autogenerated": false,
"ratio": 3.8349056603773586,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48939462507773585,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cbryce'
import re
def cc():
if not easygui.ccbox('Would you like to continue?'):
quit()
if __name__ == '__main__':
import easygui
import lcdic
# Get case information
msg = 'Enter Case Information'
title = 'Case Data'
fieldnames = ['Case Number', 'Evidence ID', 'Examiner Name']
fieldvalues = []
fieldvalues = easygui.multenterbox(msg, title, fieldnames, fieldvalues)
if not fieldvalues:
cc()
else:
while 1:
if fieldvalues == None:
break
errmsg = ''
for i in fieldvalues:
if i.strip() == "":
errmsg = '"%s" is a required field.' % fieldnames[i]
case_pattern = '(FI|DR|RD)-[0-9]{8}-[0-9]{1,4}'
if not re.search(case_pattern, fieldvalues[0]):
errmsg = 'Case Name is not the correct format'
evidnce_pattern = '[0-9]{1,4}-(HD|FM|SD|MD|PC|LT|EX|OM|EB|TC|RA|EA)-[0-9]{1,3}'
if not re.search(evidnce_pattern, fieldvalues[1]):
errmsg = 'Evidence ID is not in the correct format'
if errmsg == '':
break
else:
fieldvalues = easygui.multenterbox(errmsg, msg, fieldnames, fieldvalues)
if not fieldvalues:
cc()
# Get mount point
targ = easygui.diropenbox('Target Drive Selection', 'Select Evidence Mount Point', '')
if not targ:
cc()
else:
while 1:
if targ:
break
else:
targ = easygui.diropenbox('Target Drive Selection', 'Select Evidence Mount Point', '')
# Get output directory
dest = easygui.diropenbox('Output Location Selection', 'Select Output Directory', '')
if not dest:
cc()
else:
while 1:
if dest:
break
else:
dest = easygui.diropenbox('Output Location Selection', 'Select Output Directory', '')
# Select OS_Type
choices = ['WinXP', 'Win7', 'Ubu13']
os_type = easygui.buttonbox('Evidence OS Selection', 'Select the Evidence Operating System', choices)
if not fieldvalues:
cc()
else:
while 1:
if os_type:
break
else:
os_type = easygui.buttonbox('Evidence OS Selection', 'Select the Evidence Operating System', choices)
lcdic.main(dest, targ, os_type.lower(), fieldvalues[0], fieldvalues[1], fieldvalues[2])
| {
"repo_name": "lcdi/LCDIC",
"path": "lcdic_gui.py",
"copies": "1",
"size": "2510",
"license": "mit",
"hash": -7217122511152022000,
"line_mean": 28.1860465116,
"line_max": 117,
"alpha_frac": 0.5426294821,
"autogenerated": false,
"ratio": 3.7406855439642324,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4783315026064232,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cbryce'
__version__ = 0.00
import os
import logging
from .base import CollectorBase
class WinXP(CollectorBase):
"""
Collector Class for the
"""
def __init__(self):
super(WinXP, self).__init__()
self.name = 'Windows XP Collector'
self.description = 'Collection of artifacts found in Windows XP'
self.user_path = '\\Documents and Settings\\'
self.registry_config = '\\WINDOWS\\system32\\config\\'
self.app_data_location = '\\Application Data'
self.users = []
def setup(self):
self.user_path = self.targ + self.user_path
self.registry_config = self.targ + self.registry_config
self.users = []
def collector(self):
"""
Run collection
:return:
"""
logging.info('Collection of Users and their AppData started')
app = self.collect_appdata().values()
logging.info('Collection of Users and their AppData completed')
logging.info("Collection of registry and NTUSER.DATs started")
reg = self.collect_registry().values()
logging.info("Collection of registry and NTUSER.DATs completed")
logging.info("Collection of File System artifacts started")
self.collect_fs_data() # Nothing to extract
logging.info("Collection of File System artifacts completed")
logging.info("Collection of USB artifacts started")
usb = self.collect_usb().values()
logging.info("Collection of USB artifacts completed")
if self.target_user and self.extensions:
logging.info("Collection of User Document artifacts started")
for user in self.users:
if user in self.target_user:
# collect all data from this user
docs = self.collect_docs()
else:
docs = []
logging.info("Collection of User Document artifacts completed")
# Append data to paths
paths = []
for i in app:
paths.append(i)
for i in reg:
paths.append(i)
for i in usb:
paths.append(i)
for i in docs:
paths.append(i)
return paths
def collect_appdata(self):
"""
Browse all accounts and collect appdata
:return: list of dictionary of users and the appdata paths
"""
# Collect all user names
# TODO: Allow specification of specific user name(s)
user_dict = {}
for user in os.listdir(self.user_path):
if os.path.isdir(self.user_path + user):
user_dict[user] = self.user_path + user + self.app_data_location
self.users = user_dict.keys()
return user_dict
def collect_registry(self):
"""
Browse all accounts and collect NTUSER
:return: list of dictionary of users and the appdata paths
"""
# Collect for System32/Config
reg_hives = ['sam', 'software', 'system', 'security']
reg_dict = dict()
for entry in os.listdir(self.registry_config):
if os.path.isfile(self.registry_config + entry):
for hive in reg_hives:
if entry.lower() == hive:
reg_dict[hive] = self.registry_config + entry
for user in self.users:
if os.path.isfile(self.user_path + user + '\\NTUSER.DAT'):
reg_dict[user] = self.user_path + user + '\\NTUSER.DAT'
return reg_dict
def collect_mem(self):
pass
def collect_fs_data(self):
"""
Collect data from file system for parsing
:return: dictionary of paths to file system data
"""
# TODO Add in ability to zip extracted files
import subprocess
fsdata = dict()
if not os.path.exists(os.path.join(os.path.abspath(self.dest), 'filesystem_artifacts')):
os.makedirs(os.path.join(os.path.abspath(self.dest), 'filesystem_artifacts'))
fsdata['MFT'] = self.targ + '0'
fsdata['LogFile'] = self.targ + '2'
# fsdata['USN'] = self.targ + '\\$Extend\\$UsnJrnl' ## Not in WinXP
from lcdic import base
for key in fsdata.keys():
try:
cmd = base+'\\libs\\RawCopy\\RawCopy64.exe ' + fsdata[key] + ' ' + os.path.join(os.path.abspath(self.dest), 'filesystem_artifacts') + ' -AllAttr'
subprocess.call(cmd, shell=True)
except Exception, e:
logging.warning('Could not extract $MFT using 64bit tool...Trying 32bit...')
try:
cmd = base+'\\libs\\RawCopy\\RawCopy.exe ' + fsdata[key] + ' ' + os.path.join(os.path.abspath(self.dest), 'filesystem_artifacts') + ' -AllAttr'
subprocess.call(cmd, shell=True)
except Exception, e:
logging.error('Could not Extract ' + key + '!')
return None
def collect_usb(self):
"""
Collects the data needed for USB information
:return: dictionary of usb logs
"""
usb_logs = dict()
usb_logs['setupapi'] = self.targ + '/Windows/setupapi.log'
return usb_logs
def complete_collection(self, paths):
self._tarball(paths)
def collect_docs(self):
self.doc_array = list()
# Collect data from specified users
if self.extensions and self.ext_for_users:
for user in self.target_user:
for root, dirs, files in os.walk(self.user_path + user):
for entry in files:
if os.path.splitext(entry)[-1].strip('.') in self.extensions:
self.doc_array.append(os.path.join(root + '/' + entry))
# Collect data from all users
elif self.extensions and not self.ext_for_users:
for user in self.users:
for root, dirs, files in os.walk(self.user_path + user):
for entry in files:
if os.path.splitext(entry)[-1].strip('.') in self.extensions:
self.doc_array.append(os.path.join(root + '/' + entry))
return self.doc_array
class Win7(CollectorBase):
"""
Collector Class for the
"""
def __init__(self):
super(Win7, self).__init__()
self.name = 'Windows 7 Collector'
self.description = 'Collection of artifacts found in Windows 7'
self.user_path = '\\Users\\'
self.registry_config = '\\WINDOWS\\system32\\config\\'
self.app_data_location = '\\AppData'
self.users = []
def setup(self):
self.user_path = self.targ + self.user_path
self.registry_config = self.targ + self.registry_config
self.users = []
def collector(self):
logging.info('Collection of Users and their AppData started')
app = self.collect_appdata().values()
logging.info('Collection of Users and their AppData completed')
logging.info("Collection of registry and NTUSER.DATs started")
reg = self.collect_registry().values()
logging.info("Collection of registry and NTUSER.DATs completed")
logging.info("Collection of File System artifacts started")
# fs = self.collect_fs_data()
fs = []
logging.info("Collection of File System artifacts completed")
logging.info("Collection of USB artifacts started")
usb = self.collect_usb().values()
logging.info("Collection of USB artifacts completed")
if self.extensions:
logging.info("Collection of file extensions artifacts started")
if self.ext_for_users and self.target_user:
for user in self.users:
if user in self.target_user:
# collect all data from this user
docs = self.collect_docs()
else:
# Collect files matching extension on any location of system
docs = self.collect_docs()
logging.info("Collection of file extensions artifacts completed")
paths = []
for i in app:
paths.append(i)
for i in reg:
paths.append(i)
for i in usb:
paths.append(i)
for i in docs:
paths.append(i)
for i in fs:
paths.append(i)
return paths
def collect_appdata(self):
"""
Browse all accounts and collect appdata
:return: list of dictionary of users and the appdata paths
"""
# Collect all user names
# TODO: Allow specification of specific user name(s)
user_dict = {}
for user in os.listdir(self.user_path):
if os.path.isdir(self.user_path + user):
user_dict[user] = self.user_path + user + self.app_data_location
self.users = user_dict.keys()
return user_dict
def collect_registry(self):
"""
Browse all accounts and collect NTUSER
:return: list of dictionary of users and the appdata paths
"""
# Collect for System32/Config
reg_hives = ['sam', 'software', 'system', 'security']
reg_dict = dict()
for entry in os.listdir(self.registry_config):
if os.path.isfile(self.registry_config + entry):
for hive in reg_hives:
if entry.lower() == hive:
reg_dict[hive] = self.registry_config + entry
for user in self.users:
if os.path.isfile(self.user_path + user + '\\NTUSER.DAT'):
reg_dict[user] = self.user_path + user + '\\NTUSER.DAT'
return reg_dict
def collect_mem(self):
pass
def collect_usb(self):
"""
Collects the data needed for USB information
:return: dictionary of usb logs
"""
usb_logs = dict()
usb_logs['setupapiapp'] = self.targ + '/Windows/inf/setupapi.app.log'
usb_logs['setupapidev'] = self.targ + '/Windows/inf/setupapi.dev.log'
return usb_logs
def collect_fs_data(self):
"""
Collect data from file system for parsing
:return: dictionary of paths to file system data
"""
# TODO Add in ability to zip extracted files
import subprocess
fsdata = dict()
if not os.path.exists(os.path.join(os.path.abspath(self.dest), 'filesystem_artifacts')):
os.makedirs(os.path.join(os.path.abspath(self.dest), 'filesystem_artifacts'))
fsdata['MFT'] = self.targ + '0'
fsdata['LogFile'] = self.targ + '2'
from lcdic import base # import base variable, not base module!
from libs import pyads
j = pyads.ADS(self.targ + '\\$Extend\\$UsnJrnl:$J')
if len(j.getStreams()): # if a stream is detected, copy it out. reads entire journal into RAM, may cause issues
j.extractStream('', outfile=os.path.join(os.path.abspath(self.dest), 'filesystem_artifacts', 'USN_$J'))
logging.info('Completed USN Journal Extraction')
for key in fsdata.keys():
try:
cmd = base+'\\libs\\RawCopy\\RawCopy64.exe ' + fsdata[key] + ' ' + os.path.join(os.path.abspath(self.dest), 'filesystem_artifacts') + ' -AllAttr'
subprocess.check_output(cmd, shell=True)
except Exception, e:
logging.warning('Could not extract $MFT using 64bit tool...Trying 32bit...')
try:
cmd = base+'\\libs\\RawCopy\\RawCopy.exe ' + fsdata[key] + ' ' + os.path.join(os.path.abspath(self.dest), 'filesystem_artifacts') + ' -AllAttr'
subprocess.check_output(cmd, shell=True)
except Exception, e:
logging.error('Could not Extract ' + key + '!')
logging.info('Completed ' + key + ' Extraction')
return {'path': os.path.join(os.path.abspath(self.dest), 'filesystem_artifacts')}
def collect_docs(self):
self.doc_array = list()
# Collect data from specified users
if self.extensions and self.ext_for_users:
for user in self.target_user:
for root, dirs, files in os.walk(self.user_path + user):
for entry in files:
if os.path.splitext(entry)[-1].strip('.') in self.extensions:
self.doc_array.append(os.path.join(root + '/' + entry))
# Collect data from all users
elif self.extensions and not self.ext_for_users:
for user in self.users:
for root, dirs, files in os.walk(self.user_path + user):
for entry in files:
if os.path.splitext(entry)[-1].strip('.') in self.extensions:
self.doc_array.append(os.path.join(root + '/' + entry))
return self.doc_array
def complete_collection(self, paths):
self._tarball(paths)
| {
"repo_name": "lcdi/LCDIC",
"path": "collectors/windows.py",
"copies": "1",
"size": "13077",
"license": "mit",
"hash": -8714543090313027000,
"line_mean": 34.8273972603,
"line_max": 163,
"alpha_frac": 0.565726084,
"autogenerated": false,
"ratio": 4.171291866028708,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006762092177231016,
"num_lines": 365
} |
__author__ = 'cbryce'
__version__ = 0.00
import progressbar
import os
import yara
class YaraSearch():
def __init__(self, custom_rule, target):
self.custom_rule = custom_rule
self.target = target
def run(self):
if os.path.isfile(self.custom_rule):
rules = yara.compile(self.custom_rule)
elif isinstance(self.custom_rule, str):
rules = yara.compile(source=self.custom_rule)
matches = []
count = 0
for root, dirs, files in os.walk(self.target):
for entry in files:
count += 1
pbar = progressbar.ProgressBar(widgets=[progressbar.Bar('+'), ' ', progressbar.Percentage(), ' | ',
progressbar.ETA(), ' | ', progressbar.SimpleProgress()],
maxval=count).start()
p = 0
for root, dirs, files in os.walk(self.target+'\\'):
for entry in files:
p += 1
pbar.update(p)
e = os.path.join(root, entry)
try:
m = rules.match(e)
if len(m) > 1:
pass
if m:
matches.append({'match': m, 'file': e})
except Exception, err:
pass
pbar.finish()
return matches
if __name__ == '__main__':
# To Run Tests
YaraSearch('../config/yara.rules', 'F:') | {
"repo_name": "lcdi/LCDIC",
"path": "collectors/search.py",
"copies": "1",
"size": "1493",
"license": "mit",
"hash": 2361917249014117400,
"line_mean": 27.7307692308,
"line_max": 107,
"alpha_frac": 0.4641661085,
"autogenerated": false,
"ratio": 4.229461756373937,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0016885891885891886,
"num_lines": 52
} |
__author__ = 'cccaballero'
from gluon import current
import os
def upload():
path = os.path.join(request.folder, 'uploads')
form = SQLFORM.factory(
Field('upload', 'upload', requires=IS_NOT_EMPTY(), uploadfolder=path),
table_name=current.plugin_daxs_media_galley.settings.table_upload_name
)
if form.accepts(request, session):
upload = request.vars.upload
old_filename = upload.filename
print form.vars
# new_filename = form.table.upload.store(upload.file, upload.filename)
new_filename = form.vars.upload
length = os.path.getsize(os.path.join(path, new_filename))
mime_type = upload.headers['content-type']
title = os.path.splitext(old_filename)[0]
result = current.plugin_daxs_media_galley.settings.table_upload.validate_and_insert(
title=title,
filename=old_filename,
upload=new_filename,
flength=length,
mime_type=mime_type
)
db = current.plugin_daxs_media_galley.db
table_upload = current.plugin_daxs_media_galley.settings.table_upload
browse_filter = current.plugin_daxs_media_galley.settings.browse_filter
set = db(table_upload.id>0)
for key, val in browse_filter.items():
if value[0] == '<':
set = set(table_upload[key]<value[1:])
elif value[0] == '>':
set = set(table_upload[key]>value[1:])
elif value[0] == '!':
set = set(table_upload[key]!=value[1:])
else:
set = set(table_upload[key]==value)
rows = set.select(orderby=table_upload.title)
return dict(form=form, rows=rows)
def browse():
db = current.plugin_daxs_media_galley.db
table_upload = current.plugin_daxs_media_galley.settings.table_upload
browse_filter = current.plugin_daxs_media_galley.settings.browse_filter
set = db(table_upload.id>0)
for key, val in browse_filter.items():
if value[0] == '<':
set = set(table_upload[key]<value[1:])
elif value[0] == '>':
set = set(table_upload[key]>value[1:])
elif value[0] == '!':
set = set(table_upload[key]!=value[1:])
else:
set = set(table_upload[key]==value)
rows = set.select(orderby=table_upload.title)
return dict(rows=rows)
def delete():
print "akaka"
filename = request.args(0)
if not filename:
raise HTTP(401, T('Required argument filename missing.'))
db = current.plugin_daxs_media_galley.db
table_upload = current.plugin_daxs_media_galley.settings.table_upload
db(table_upload.upload == filename).delete()
# # delete the file from storage
# path = os.path.join(request.folder, 'uploads', filename)
# os.unlink(path) | {
"repo_name": "daxslab/web2py-media-galley",
"path": "controllers/plugin_daxs_media_galley.py",
"copies": "1",
"size": "2791",
"license": "mit",
"hash": -4375473434925932500,
"line_mean": 31.4651162791,
"line_max": 92,
"alpha_frac": 0.6180580437,
"autogenerated": false,
"ratio": 3.445679012345679,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9547491049448684,
"avg_score": 0.0032492013193990157,
"num_lines": 86
} |
__author__ = 'cccaballero'
from gluon import *
from gluon.contrib.ordereddict import OrderedDict
def set_seo_meta(type="website", card="summary", title=None,
author=None, keywords=None, generator="Web2py Web Framework",
url=None, image=None, description=None, site_name=None,
locale=None, locale_alternate={}, creator=None,
site=None, label1=None, data1=None, label2=None, data2=None):
if not title:
title = current.request.application.replace('_', ' ').title()
if not url:
url = URL(args=current.request.args, host=True)
set_meta(title, description, keywords, author, generator)
set_open_graph(type, title, url, image, description, site_name, locale, locale_alternate)
set_twitter_card(card, title, creator, site, label1, data1, label2, data2, image, description)
def set_meta(title=None, description=None, keywords=None,
author=None, generator="Web2py Web Framework"):
data = locals()
for name in ['title', 'description', 'keywords', 'author', 'generator']:
if data[name]:
current.response.meta[name] = data[name]
if not title:
title = current.request.application.replace('_', ' ').title()
current.response.title = title
def set_open_graph(type="website", title=None,
url=None, image=None, description=None,
site_name=None, locale=None, locale_alternate={}):
if not title:
title = current.request.application.replace('_', ' ').title()
if not url:
url = URL(args=current.request.args, host=True)
data = locals()
for name in ['type', 'title', 'url', 'description', 'site_name', 'locale']:
dict = OrderedDict()
if data[name]:
dict['name'] = "og:"+name
dict['content'] = data[name]
current.response.meta['og_'+name] = dict
if image:
set_og_image(image)
def set_twitter_card(card="summary", title=None,
creator=None, site=None, label1=None,
data1=None, label2=None, data2=None,
image=None, description=None):
if not title:
title = current.request.application.replace('_', ' ').title()
data = locals()
for name in ['card', 'title', 'description', 'creator', 'site', 'label1', 'data1', 'label2', 'data2']:
dict = OrderedDict()
if data[name]:
dict['property'] = "twitter:"+name
dict['content'] = data[name]
current.response.meta['tc_'+name] = dict
if image:
set_tc_image(image)
# Open Graph meta
def set_og_image(image):
if isinstance(image, list):
for count in range(len(image)):
dict = OrderedDict()
dict['property'] = "og:image"
dict['content'] = image[count]
current.response.meta['og_image_'+str(count)] = dict
else:
dict = OrderedDict()
dict['property'] = "og:image"
dict['content'] = image
current.response.meta.og_image = dict
# Twitter Card meta
def set_tc_image(image):
dict = OrderedDict()
dict['name'] = "twitter:image"
if isinstance(image, list):
dict['content'] = image[0]
else:
dict['content'] = image
current.response.meta.tc_image = dict
| {
"repo_name": "daxslab/web2py-simple-seo",
"path": "modules/plugin_simple_seo/seo.py",
"copies": "1",
"size": "3340",
"license": "mit",
"hash": 2734145262129312000,
"line_mean": 34.5319148936,
"line_max": 106,
"alpha_frac": 0.5892215569,
"autogenerated": false,
"ratio": 3.7954545454545454,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9881944097135898,
"avg_score": 0.0005464010437293057,
"num_lines": 94
} |
class Solution:
# @param {string[]} tokens
# @return {integer}
def evalRPN(self, tokens):
i = 0
while len(tokens) != 1:
if not tokens[i].strip('-').isdigit(): # 1st prob in py, integer checker including negative ones
if tokens[i] == '*':
tokens[i - 2] = str(int(tokens[i - 2]) * int(tokens[i - 1]))
if tokens[i] == '/': # 2nd prob in py, ceiling func
tmp1 = int(tokens[i - 2])
tmp2 = int(tokens[i - 1])
if tmp1 / tmp2 < 0 and tmp1 % tmp2 != 0:
tokens[i - 2] = str(tmp1 / tmp2 + 1)
else:
tokens[i - 2] = str(tmp1 / tmp2)
if tokens[i] == '+':
tokens[i - 2] = str(int(tokens[i - 2]) + int(tokens[i - 1]))
if tokens[i] == '-':
tokens[i - 2] = str(int(tokens[i - 2]) - int(tokens[i - 1]))
del tokens[i - 1]
del tokens[i - 1]
i -= 1
else:
i += 1
return int(tokens[0])
| {
"repo_name": "cc13ny/algo",
"path": "leetcode/150-Evaluate-Reverse-Polish-Notation/ERPN_001.py",
"copies": "5",
"size": "1163",
"license": "mit",
"hash": 9167256931667561000,
"line_mean": 40.5357142857,
"line_max": 108,
"alpha_frac": 0.3955288048,
"autogenerated": false,
"ratio": 3.739549839228296,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6635078644028296,
"avg_score": null,
"num_lines": null
} |
class Solution:
# @return a list of lists of length 3, [[val1,val2,val3]]
def threeSum(self, num):
num.sort()
res = []
for i in range(len(num)-2):
if i == 0 or num[i] > num[i-1]:
left = i + 1; right = len(num) - 1
while left < right:
if num[left] + num[right] == -num[i]:
res.append([num[i], num[left], num[right]])
left += 1; right -= 1
while left < right and num[left] == num[left-1]: left +=1
while left < right and num[right] == num[right+1]: right -= 1
elif num[left] + num[right] < -num[i]:
while left < right:
left += 1
if num[left] > num[left-1]: break
else:
while left < right:
right -= 1
if num[right] < num[right+1]: break
return res
| {
"repo_name": "cc13ny/Allin",
"path": "leetcode/015-3Sum/ThreeSum_001.py",
"copies": "5",
"size": "1091",
"license": "mit",
"hash": -3733121122761905000,
"line_mean": 40.9615384615,
"line_max": 85,
"alpha_frac": 0.4005499542,
"autogenerated": false,
"ratio": 4.180076628352491,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7080626582552491,
"avg_score": null,
"num_lines": null
} |
class Solution:
# @param {string} s
# @return {string}
def longestPalindrome(self, s):
size = len(s)
ls = []
ll = 0
rr = 0
l = 0
r = 0
maxlen = r - l + 1
for i in range(1, size):
if s[i-1] == s[i]:
r = i
else:
if r - l + 1 > maxlen:
maxlen = r - l + 1
ll = l
rr = r
ls.append([[l, r], s[i-1]])
l = i
r = i
if r - l + 1 > maxlen:
maxlen = r - l + 1
ll = l
rr = r
ls.append([[l, r], s[size - 1]])
for i in range(1, len(ls) - 1):
l = i - 1
r = i + 1
clen = ls[i][0][1] - ls[i][0][0] + 1
while -1 < l and r < len(ls) and ls[l][1] == ls[r][1]:
llen = ls[l][0][1] - ls[l][0][0] + 1
rlen = ls[r][0][1] - ls[r][0][0] + 1
if llen == rlen:
clen += 2 * llen
if clen > maxlen:
maxlen = clen
ll = ls[l][0][0]
rr = ls[r][0][1]
l -= 1
r += 1
else:
if llen > rlen:
clen += 2*rlen
else:
clen += 2*llen
if clen > maxlen:
maxlen = clen
if llen > rlen:
ll = ls[l][0][1] - rlen + 1
rr = ls[r][0][1]
else:
ll = ls[l][0][0]
rr = ls[r][0][0] + llen - 1
break
return s[ll:rr+1]
| {
"repo_name": "Chasego/codi",
"path": "leetcode/005-Longest-Palindromic-Substring/LongPalSubstr_001.py",
"copies": "5",
"size": "1984",
"license": "mit",
"hash": 2374014732128316400,
"line_mean": 28.6119402985,
"line_max": 66,
"alpha_frac": 0.2691532258,
"autogenerated": false,
"ratio": 3.968,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.67371532258,
"avg_score": null,
"num_lines": null
} |
class Solution:
# @param {string} str
# @return {integer}
def extractnum(self, ss):
num = 0
for i in range(len(ss)):
if ss[i].isdigit() == False:
break
else:
num = num + 1
return ss[:num]
def isoverflow(self, sss, ispos):
if ispos:
tmp = '2147483647'
if len(sss) > len(tmp):
return True
elif len(sss) < len(tmp):
return False
for j in range(len(tmp)):
if sss[j] > tmp[j]:
return True
elif sss[j] < tmp[j]:
return False
return False
else:
tmp = '2147483648'
if len(sss) > len(tmp):
return True
elif len(sss) < len(tmp):
return False
for j in range(len(tmp)):
if sss[j] > tmp[j]:
return True
elif sss[j] < tmp[j]:
return False
return False
def myAtoi(self, str):
str = str.strip()
if len(str) == 0:
return 0
flag = True
if str[0] == '+':
str = str[1:]
elif str[0] == '-':
str = str[1:]
flag = False
if len(str) == 0 or str[0].isdigit() == False:
return 0
if flag:
n = self.extractnum(str)
if self.isoverflow(n, True) == True:
return 2147483647
else:
return int(n)
else:
n = self.extractnum(str)
if self.isoverflow(n, False) == True:
return -2147483648
else:
return -int(n)
| {
"repo_name": "Chasego/codirit",
"path": "leetcode/008-String-to-Integer/Str2Int_001.py",
"copies": "5",
"size": "1880",
"license": "mit",
"hash": 5478983514924081000,
"line_mean": 27.4848484848,
"line_max": 60,
"alpha_frac": 0.4079787234,
"autogenerated": false,
"ratio": 4.196428571428571,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00750313638342193,
"num_lines": 66
} |
__author__ = 'CClive'
import theano
import numpy
import neural_layer
import theano.tensor as T
class NeuralNet(object):
"""
Generic neural network class
Acts as a container that manages a list of neural layers.
Also adds cost, error, and prediction functions to the network.
"""
def __init__(self, layers):
self.layers = layers
self.input = self.layers[0].input
self.output = self.layers[-1].output
self.params = []
self.L1_norm = []
self.L2_norm = []
for layer in self.layers:
self.params += layer.params
self.L1_norm += layer.L1_norm
self.L2_norm += layer.L2_norm
self.L1_norm = self.L1_norm.sum()
self.L2_norm = self.L2_norm.sum()
self.prediction = T.argmax(self.output, axis=1)
data = T.matrix('data')
self.classify = theano.function(
inputs=[data],
outputs=self.prediction,
givens={self.input: data})
def cost(self, y, L1_reg=0, L2_reg=0):
"""
Uses the negative log likelihood for the cost function. This is appropriate
when the last layer of the network is a logistic regression layer, which is
a popular choice. Override this method if another cost function is desired.
Return the mean of the negative log-likelihood of the prediction
of this model under a given target distribution.
.. math::
\frac{1}{|\mathcal{D}|} \mathcal{L} (\theta=\{W,b\}, \mathcal{D}) =
\frac{1}{|\mathcal{D}|} \sum_{i=0}^{|\mathcal{D}|} \log(P(Y=y^{(i)}|x^{(i)}, W,b)) \\
\ell (\theta=\{W,b\}, \mathcal{D})
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
Note: we use the mean instead of the sum so that
the learning rate is less dependent on the batch size
"""
# y.shape[0] is (symbolically) the number of rows in y, i.e.,
# number of examples (call it n) in the minibatch
# T.arange(y.shape[0]) is a symbolic vector which will contain
# [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of
# Log-Probabilities (call it LP) with one row per example and
# one column per class LP[T.arange(y.shape[0]),y] is a vector
# v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,
# LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is
# the mean (across minibatch examples) of the elements in v,
# i.e., the mean log-likelihood across the minibatch.
prediction_cost = -T.mean(T.log(self.output)[T.arange(y.shape[0]), y])
regularization_cost = L1_reg * self.L1_norm + L2_reg * self.L2_norm
return prediction_cost + regularization_cost
def errors(self, y):
"""Return a float representing the number of errors in the minibatch
over the total number of examples of the minibatch ; zero one
loss over the size of the minibatch
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
"""
# check if y has same dimension of y_pred
# what is 'target.type'?
if y.ndim != self.prediction.ndim:
raise TypeError('y should have the same shape as self.prediction',
('y', y.type, 'y_pred', self.prediction.type))
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
return T.mean(T.neq(self.prediction, y))
else:
raise NotImplementedError()
# TODO: Add a print function that displays the structure of the network.
# TODO: Add a print function to NeuralLayer, and have NeuralNet call the
# TODO: print function for each of its layers.
class LogisticRegression(NeuralNet):
"""
A logistic regression is a trivial neural net that only has a single logistic layer.
"""
def __init__(self, input, n_in, n_out):
logreg_layer = neural_layer.LogisticLayer(input, n_in, n_out)
super(LogisticRegression, self).__init__([logreg_layer])
class MLP(NeuralNet):
"""
Multi-Layer Perceptron class with one hidden layer.
TODO: generalize this to make it easy to create an arbitrary number of
hidden layers, of arbitrary size.
"""
def __init__(self, input, n_in, n_out, n_hiddens=[]):
hidden_layers = []
hidden_layers.append(neural_layer.PerceptronLayer(input, n_in, n_hiddens[0]))
for n in range(1, len(n_hiddens)):
hidden_layers.append(
neural_layer.PerceptronLayer(hidden_layers[n-1].output, n_hiddens[n-1], n_hiddens[n]))
logreg_layer = neural_layer.LogisticLayer(input=hidden_layers[-1].output,
n_in=n_hiddens[-1], n_out=n_out)
layers = hidden_layers + [logreg_layer]
super(MLP, self).__init__(layers)
class LeNet(NeuralNet):
"""
Convolutional neural network with 2 conv/pool layers and 1 perceptron layer.
"""
def __init__(self, input, nkerns, filter_shapes, image_shapes,
batch_size, n_hiddens, n_out, poolsize=2):
self.filter_shapes = filter_shapes
self.image_shapes = image_shapes
self.batch_size = batch_size
cp_layers = []
input0 = input.reshape((batch_size, 1, 28, 28))
cp_layers.append(neural_layer.LeNetConvPoolLayer(
input=input0,
filter_shape=(nkerns[0], 1, filter_shapes[0][0], filter_shapes[0][1]),
image_shape=(batch_size, 1, image_shapes[0][0], image_shapes[0][1]),
poolsize=(poolsize, poolsize)
))
for n in range(1, len(nkerns)):
cp_layers.append(
neural_layer.LeNetConvPoolLayer(
input=cp_layers[n-1].output,
filter_shape=(nkerns[n], nkerns[n-1], filter_shapes[n][0], filter_shapes[n][1]),
image_shape=(batch_size, nkerns[n-1], image_shapes[n][0], image_shapes[n][1]),
poolsize=(poolsize, poolsize)))
hidden_layers = []
hidden_layers.append(neural_layer.PerceptronLayer(input=cp_layers[-1].output.flatten(2),
n_in=nkerns[1] * 4 * 4,
n_out=n_hiddens[0]))
for n in range(1, len(n_hiddens)):
hidden_layers.append(
neural_layer.PerceptronLayer(hidden_layers[n-1].output, n_hiddens[n-1], n_hiddens[n]))
logreg_layer = neural_layer.LogisticLayer(input=hidden_layers[-1].output,
n_in=n_hiddens[-1], n_out=n_out)
super(LeNet, self).__init__(cp_layers + hidden_layers + [logreg_layer])
def classify(self, data):
"""
The LeNet class is only able to classify data sets that have the same dimension
as the minibatches it is trained on. (I'm not exactly sure why; this is something
I need to investigate.)
This function will break up a data set into minibatches and reshape them to fit
the right dimensions, and then collect predictions for each minibatch into a single
array.
"""
# TODO: Get this to work on data sets that have a number of observations
# TODO: not equal to a multiple of the minibatch size.
preds = []
n = data.shape[0]
for i in xrange(n / self.batch_size):
batch = data[i*self.batch_size : (i+1)*self.batch_size, :]
batch = batch.reshape((batch.shape[0], 1,
self.image_shapes[0][0],
self.image_shapes[0][1]))
preds.append(super(LeNet, self).classify(batch))
output = numpy.hstack(preds)
return output
| {
"repo_name": "cliffclive/neuromancy",
"path": "neuromancy/neural_net.py",
"copies": "2",
"size": "8122",
"license": "mit",
"hash": 2363939053491485000,
"line_mean": 41.3020833333,
"line_max": 102,
"alpha_frac": 0.5818763851,
"autogenerated": false,
"ratio": 3.7019143117593436,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.007858427708542514,
"num_lines": 192
} |
__author__ = 'CClive'
import theano
import numpy
import theano.tensor as T
from theano.tensor.signal import downsample
from theano.tensor.nnet import conv
class NeuralLayer(object):
def __init__(self, input, W=None, b=None, activation=None):
"""
Basic layer of a neural net: weights W and bias b, along with an
activation function, define the transformation from inputs to outputs.
:type input: theano.tensor.dmatrix
:param input: a symbolic tensor of shape (n_examples, n_in)
:type activation: theano.Op or function
:param activation: Non linearity to be applied in the hidden
layer
"""
self.input = input
self.W = W
self.b = b
lin_output = T.dot(input, self.W) + self.b
self.output = (lin_output if activation is None else activation(lin_output))
# parameters of the model
self.params = [self.W, self.b]
# L1 norm ; one regularization option is to enforce L1 norm to
# be small
self.L1_norm = abs(self.W).sum()
# square of L2 norm ; one regularization option is to enforce
# square of L2 norm to be small
self.L2_norm = (self.W ** 2).sum()
class LogisticLayer(NeuralLayer):
def __init__(self, input, n_in, n_out):
""" Initialize the parameters of the logistic regression
:type input: theano.tensor.TensorType
:param input: symbolic variable that describes the input of the
architecture (one minibatch)
:type n_in: int
:param n_in: number of input units, the dimension of the space in
which the datapoints lie
:type n_out: int
:param n_out: number of output units, the dimension of the space in
which the labels lie
"""
# initialize with 0 the weights W as a matrix of shape (n_in, n_out)
W = theano.shared(value=numpy.zeros((n_in, n_out),
dtype=theano.config.floatX),
name='W', borrow=True)
# initialize the biases b as a vector of n_out 0s
b = theano.shared(value=numpy.zeros((n_out,),
dtype=theano.config.floatX),
name='b', borrow=True)
# compute vector of class-membership probabilities in symbolic form
activation = T.nnet.softmax
super(LogisticLayer, self).__init__(input, W, b, activation)
class PerceptronLayer(NeuralLayer):
def __init__(self, input, n_in, n_out, W=None, b=None,
activation=T.tanh, seed=None):
"""
Typical hidden layer of a MLP: units are fully-connected and have
sigmoidal activation function. Weight matrix W is of shape (n_in,n_out)
and the bias vector b is of shape (n_out,).
NOTE : The nonlinearity used here is tanh
Hidden unit activation is given by: tanh(dot(input,W) + b)
:type seed: int
:param seed: used to initialize a random number generator used to initialize weights
:type input: theano.tensor.dmatrix
:param input: a symbolic tensor of shape (n_examples, n_in)
:type n_in: int
:param n_in: dimensionality of input
:type n_out: int
:param n_out: number of hidden units
:type activation: theano.Op or function
:param activation: Non linearity to be applied in the hidden
layer
"""
rng = numpy.random.RandomState(seed)
# `W` is initialized with `W_values` which is uniformely sampled
# from sqrt(-6./(n_in+n_hidden)) and sqrt(6./(n_in+n_hidden))
# for tanh activation function
# the output of uniform if converted using asarray to dtype
# theano.config.floatX so that the code is runable on GPU
# Note : optimal initialization of weights is dependent on the
# activation function used (among other things).
# For example, results presented in [Xavier10] suggest that you
# should use 4 times larger initial weights for sigmoid
# compared to tanh
# We have no info for other function, so we use the same as
# tanh.
if W is None:
W_values = numpy.asarray(rng.uniform(
low=-numpy.sqrt(6. / (n_in + n_out)),
high=numpy.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)), dtype=theano.config.floatX)
if activation == theano.tensor.nnet.sigmoid:
W_values *= 4
W = theano.shared(value=W_values, name='W', borrow=True)
if b is None:
b_values = numpy.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
super(PerceptronLayer, self).__init__(input, W, b, activation)
class LeNetConvPoolLayer(NeuralLayer):
"""Pool Layer of a convolutional network """
def __init__(self, input, filter_shape, image_shape, poolsize=(2, 2), seed=None):
"""
Allocate a LeNetConvPoolLayer with shared variable internal parameters.
:type seed: int
:param seed: used to initialize a random number generator used to initialize weights
:type input: theano.tensor.dtensor4
:param input: symbolic image tensor, of shape image_shape
:type filter_shape: tuple or list of length 4
:param filter_shape: (number of filters, num input feature maps,
filter height,filter width)
:type image_shape: tuple or list of length 4
:param image_shape: (batch size, num input feature maps,
image height, image width)
:type poolsize: tuple or list of length 2
:param poolsize: the downsampling (pooling) factor (#rows,#cols)
"""
assert image_shape[1] == filter_shape[1]
rng = numpy.random.RandomState(seed)
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = numpy.prod(filter_shape[1:])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) /
numpy.prod(poolsize))
# initialize weights with random weights
W_bound = numpy.sqrt(6. / (fan_in + fan_out))
W = theano.shared(numpy.asarray(
rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
dtype=theano.config.floatX), borrow=True)
# the bias is a 1D tensor -- one bias per output feature map
b_values = numpy.zeros((filter_shape[0],), dtype=theano.config.floatX)
b = theano.shared(value=b_values, borrow=True)
super(LeNetConvPoolLayer, self).__init__(input, W, b)
# convolve input feature maps with filters
conv_out = conv.conv2d(input=input, filters=self.W,
filter_shape=filter_shape, image_shape=image_shape)
# downsample each feature map individually, using maxpooling
pooled_out = downsample.max_pool_2d(input=conv_out,
ds=poolsize, ignore_border=True)
# add the bias term. Since the bias is a vector (1D array), we first
# reshape it to a tensor of shape (1,n_filters,1,1). Each bias will
# thus be broadcasted across mini-batches and feature map
# width & height
self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
| {
"repo_name": "dksahuji/neuromancy",
"path": "neuromancy/neural_layer.py",
"copies": "2",
"size": "7761",
"license": "mit",
"hash": 6594224671872244000,
"line_mean": 38.5969387755,
"line_max": 92,
"alpha_frac": 0.5954129622,
"autogenerated": false,
"ratio": 4.04008328995315,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.563549625215315,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cdan'
import httplib
def get_status_code(host, path):
""" This function retreives the status code of a website by requesting
HEAD data from the host. This means that it only requests the headers.
If the host cannot be reached or something else goes wrong, it returns
None instead.
"""
try:
conn = httplib.HTTPConnection(host)
conn.request("HEAD", path)
return conn.getresponse().status
except StandardError:
return None
base_url = r'datasurfer.sandag.org.gerbera.arvixe.com'
tests = [
r'/api/estimate/2013/zip/92101/92102/92103/export/pdf'
,r'/api/estimate/2013/jurisdiction/carlsbad/coronado/chula vista/del mar/escondido/encinitas/oceanside/san marcos/la mesa/lemon grove/san diego/export/pdf'
,r'/api/estimate/2013/jurisdiction/carlsbad/coronado/chula vista/del mar/escondido/encinitas/oceanside/san marcos/la mesa/lemon grove/san diego/export/xlsx'
,r'/api/forecast/13/msa/central/east county/east suburban/export/pdf'
,r'/api/forecast/13/msa/central/east county/east suburban/export/xlsx'
]
for test in tests:
url = base_url + test
url.replace(' ', '%20')
status = get_status_code(base_url, test.replace(' ', '%20'))
print str(status) + ': ' + url | {
"repo_name": "SANDAG/DataSurfer",
"path": "api/utilities/export_test.py",
"copies": "1",
"size": "1287",
"license": "mit",
"hash": -8478004757306640000,
"line_mean": 36.8823529412,
"line_max": 161,
"alpha_frac": 0.6923076923,
"autogenerated": false,
"ratio": 3.1012048192771084,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9270959852463831,
"avg_score": 0.004510531822655218,
"num_lines": 34
} |
# ADDED: velocidad LOS
# ADDED: signo a eta_blue y eta_red
# ADDED: normalization factor in p,b,r eta + rho profiles
from math import pi, sin, cos
from numpy import sqrt, arange
# from zeeman import *
# from fvoigt import fvoigt
from copy import deepcopy
from mutils2 import *
# ============================================
# FUNCTIONS
# ============================================
def grad(x):
return x * 180. / pi
def rad(x):
return x * pi / 180.
def stokesSyn(param, x, B, gamma, xi, vlos, eta0, a, ddop, S_0, S_1):
# PARAMETROS:
# param Parametros de la linea
# x Array Longitud de onda
# 0 = B Campo magnetico
# 1 = gamma Inclinacion
# 2 = xi Angulo azimutal
# 3 = vlos Velocidad en la linea de vision [km/s]
# 4 = eta0 Cociente de abs linea-continuo
# 5 = a Parametro de amortiguamiento
# 6 = ddop Anchura Doppler
# 7 = S_0 Ordenada de la funcion fuente
# 8 = S_1 Gradiente de la funcion fuente
# Pi square factor:
sqrtpi = 1. / sqrt(pi)
# Magnetic field direction:
singamma = sin(gamma)
cosgamma = cos(gamma)
sin2xi = sin(2 * xi)
cos2xi = cos(2 * xi)
# Calculating the Zeeman Pattern
[[dpi, dsr, dsb], [spi, ssr, ssb], [ju, lu, su, jl,
ll, sl, elem, l0, gu, gl, gef]] = deepcopy(param)
# Lorentz factor
lB = 4.67E-13 * l0**2. * B
# Before in lB (Lorentz units)
dpi *= lB
dsr *= lB
dsb *= lB
# VLOS: km2A
cc = 3.0E+5 # veloc luz [km/s]
vlosA = l0 * vlos / cc
# ============================================
# Perfiles de absorcion y dispersion
# COMPONENTE PI
# --------------------------------------------
eta_p = 0.
rho_p = 0.
for i in range(0, len(spi)):
xx = (x - dpi[i] - vlosA) / ddop
[H, F] = fvoigt(a, xx)
eta_p = eta_p + H * spi[i] * sqrtpi / ddop
rho_p = rho_p + 2. * F * spi[i] * sqrtpi / ddop
# COMPONENTE SIGMA BLUE
# --------------------------------------------
eta_b = 0.
rho_b = 0.
for i in range(0, len(ssb)):
xx = (x - dsb[i] - vlosA) / ddop
[H, F] = fvoigt(a, xx)
eta_b = eta_b + H * ssb[i] * sqrtpi / ddop
rho_b = rho_b + 2. * F * ssb[i] * sqrtpi / ddop
# COMPONENTE SIGMA RED
# --------------------------------------------
eta_r = 0.
rho_r = 0.
for i in range(0, len(ssr)):
xx = (x - dsr[i] - vlosA) / ddop
[H, F] = fvoigt(a, xx)
eta_r = eta_r + H * ssr[i] * sqrtpi / ddop
rho_r = rho_r + 2. * F * ssr[i] * sqrtpi / ddop
# ============================================
# Elementos la matriz de propagacion
# 1.- Elemento de absorcion
eta_I = 1.0 + 0.5 * eta0 * \
(eta_p * (singamma**2.) + 0.5 * (eta_b + eta_r) * (1. + cosgamma**2.))
# 2.- Elementos de dicroismo (pol dif en dif direcc)
eta_Q = 0.5 * eta0 * (eta_p - 0.5 * (eta_b + eta_r)
) * (singamma**2.) * cos2xi
eta_U = 0.5 * eta0 * (eta_p - 0.5 * (eta_b + eta_r)
) * (singamma**2.) * sin2xi
eta_V = 0.5 * eta0 * (eta_r - eta_b) * cosgamma
# 3.- Elementos de dispersion
rho_Q = 0.5 * eta0 * (rho_p - 0.5 * (rho_b + rho_r)
) * (singamma**2.) * cos2xi
rho_U = 0.5 * eta0 * (rho_p - 0.5 * (rho_b + rho_r)
) * (singamma**2.) * sin2xi
rho_V = 0.5 * eta0 * (rho_r - rho_b) * cosgamma
# ============================================
# Perfiles de Stokes normalizados al continuo// Sc = S1/S0
# ScDown = 1.+Sc
# Det=eta_I**2.*(eta_I**2.-eta_Q**2.-eta_U**2.-eta_V**2.+rho_Q**2.+rho_U**2.+rho_V**2.)-(eta_Q*rho_Q+eta_U*rho_U+eta_V*rho_V)**2.
# IDet = 1./Det
# I=(1.+IDet*eta_I*(eta_I**2.+rho_Q**2.+rho_U**2.+rho_V**2.)*Sc)/ScDown
# Q=-IDet*(eta_I**2.*eta_Q+eta_I*(eta_V*rho_U-eta_U*rho_V)+rho_Q*(eta_Q*rho_Q+eta_U*rho_U+eta_V*rho_V))*Sc/ScDown
# U=-IDet*(eta_I**2.*eta_U+eta_I*(eta_Q*rho_V-eta_V*rho_Q)+rho_U*(eta_Q*rho_Q+eta_U*rho_U+eta_V*rho_V))*Sc/ScDown
# V=-IDet*(eta_I**2.*eta_V+eta_I*(eta_U*rho_Q-eta_Q*rho_U)+rho_V*(eta_Q*rho_Q+eta_U*rho_U+eta_V*rho_V))*Sc/ScDown
Det = eta_I**2. * (eta_I**2. - eta_Q**2. - eta_U**2. - eta_V**2. + rho_Q**2. +
rho_U**2. + rho_V**2.) - (eta_Q * rho_Q + eta_U * rho_U + eta_V * rho_V)**2.
IDet = 1. / Det
I = S_0 + IDet * eta_I * (eta_I**2. + rho_Q**2. + rho_U**2. + rho_V**2.) * S_1
Q = -IDet * (eta_I**2. * eta_Q + eta_I * (eta_V * rho_U - eta_U * rho_V) + rho_Q * (eta_Q * rho_Q + eta_U * rho_U + eta_V * rho_V)) * S_1
U = -IDet * (eta_I**2. * eta_U + eta_I * (eta_Q * rho_V - eta_V * rho_Q) + rho_U * (eta_Q * rho_Q + eta_U * rho_U + eta_V * rho_V)) * S_1
V = -IDet * (eta_I**2. * eta_V + eta_I * (eta_U * rho_Q - eta_Q * rho_U) + rho_V * (eta_Q * rho_Q + eta_U * rho_U + eta_V * rho_V)) * S_1
return [I, Q, U, V]
# return [eta_p,eta_r,eta_b,V]
if __name__ == "__main__":
# PARAMETROS:
nlinea = 3 # Numero linea en fichero
x = arange(-2.8, 2.8, 20e-3) # Array Longitud de onda
B = 992. # Campo magnetico
gamma = rad(134.) # Inclinacion
xi = rad(145.) # Angulo azimutal
vlos = 0.0 # velocidad km/s
eta0 = 73. # Cociente de abs linea-continuo
a = 0.2 # Parametro de amortiguamiento
ddop = 0.02 # Anchura Doppler
# Sc = 4.0 # Cociente Gradiente y Ordenada de la funcion fuente
S_0 = 0.5 # Ordenada de la funcion fuente
S_1 = 0.5 # Gradiente de la funcion fuente
# lambdaStart = 6300.8
# lambdaStep = 0.015
# nLambda = 100
# lambda0 = 6301.5012
# x = np.arange(lambdaStart,lambdaStart+lambdaStep*nLambda, lambdaStep)-lambda0
# # VLOS: km2A
# cc = 3.0E+5
# l0 = 6301.5012
# ddop = l0*2./cc
# class paramlib(object):
param = paramLine(nlinea)
# tt = paramlib()
# print(tt.param)
stokes = stokesSyn(param, x, B, gamma, xi, vlos, eta0, a, ddop, S_0, S_1)
import matplotlib.pyplot as plt
for i in range(4):
plt.subplot(2, 2, i + 1)
if i == 0:
plt.ylim(0, 1.1)
plt.plot(x, stokes[i])
plt.plot([0, 0], [min(stokes[i]), max(stokes[i])], 'k--')
# if i != 0: plt.ylim(-0.4,0.4)
# plt.tight_layout()
plt.show()
# plt.savefig('stokes.pdf')
# np.save('stokes.npy',stokes)
| {
"repo_name": "cdiazbas/LMpyMilne",
"path": "milne.py",
"copies": "1",
"size": "6532",
"license": "mit",
"hash": 7936703507437912000,
"line_mean": 33.3789473684,
"line_max": 141,
"alpha_frac": 0.4820881813,
"autogenerated": false,
"ratio": 2.4779969650986344,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.34600851463986343,
"avg_score": null,
"num_lines": null
} |
def fvoigt(damp,vv):
"""
Extract spectral data from the Kitt Peak FTS-Spectral-Atlas
as provided by H. Neckel, Hamburg.
INPUTS:
DAMP: A scalar with the damping parameter
VV: Wavelength axis usually in Doppler units.
OUTPUTS:
H: Voigt function
F: Faraday-Voigt function
NOTES:
A rational approximation to the complex error function is used
after Hui, Armstrong, and Wray(1978, JQSRT 19, 509). H and F are
the real and imaginary parts of such function, respectively.
The procedure is inspired on that in SIR (Ruiz Cobo & del Toro
Iniesta 1992, ApJ 398, 385). On its turn, that routine was taken
from modifications by A. Wittmann (1986) to modifications by S.K.
Solanki (1985) to an original FORTRAN routine written by J.W. Harvey
and A. Nordlund.
"""
import numpy as np
A = [122.607931777104326, 214.382388694706425, 181.928533092181549,\
93.155580458138441, 30.180142196210589, 5.912626209773153,\
0.564189583562615]
B = [122.60793177387535, 352.730625110963558, 457.334478783897737,\
348.703917719495792, 170.354001821091472, 53.992906912940207,\
10.479857114260399,1.]
z = np.array(damp*np.ones(len(vv)) + -abs(vv)*1j)
Z = ((((((A[6]*z+A[5])*z+A[4])*z+A[3])*z+A[2])*z+A[1])*z+A[0])/\
(((((((z+B[6])*z+B[5])*z+B[4])*z+B[3])*z+B[2])*z+B[1])*z+B[0])
h = np.real(Z)
f = np.sign(vv)*np.imag(Z)*0.5
return [h,f]
if __name__ == "__main__":
import numpy as np
import matplotlib.pyplot as plt
uvals = np.linspace(-20., 20., 200)
a = 2.E-1
[h,f] = fvoigt(a,uvals)
plt.plot(uvals,f,'k-')
plt.plot(uvals,h,'r-')
plt.show()
| {
"repo_name": "aasensio/pyiacsun",
"path": "pyiacsun/util/fvoigt.py",
"copies": "1",
"size": "1842",
"license": "mit",
"hash": -820709520044281900,
"line_mean": 24.6956521739,
"line_max": 71,
"alpha_frac": 0.6433224756,
"autogenerated": false,
"ratio": 2.37984496124031,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.352316743684031,
"avg_score": null,
"num_lines": null
} |
import matplotlib.pyplot as plt
import pyLib.imtools as imtools
import numpy as np
# # ========================= CREANDO DICCIONARIO
# cdict1={'red': ((0.0, 0.0, 0.0),
# (0.5, 0.0, 0.1),
# (1.0, 1.0, 1.0)),
# 'green':((0.0, 0.0, 0.0),
# (1.0, 0.0, 0.0)),
# 'blue': ((0.0, 0.0, 0.0),
# (0.5, 0.0, 0.1),
# (1.0, 1.0, 1.0))
# }
import matplotlib.colors as mcolors
# #blue_red1 = mcolors.LinearSegmentedColormap('BlueRed1', cdict1)
def make_colormap(seq):
"""Return a LinearSegmentedColormap
seq: a sequence of floats and RGB-tuples. The floats should be increasing
and in the interval (0,1).
"""
seq = [(None,) * 3, 0.0] + list(seq) + [1.0, (None,) * 3]
cdict = {'red': [], 'green': [], 'blue': []}
for i, item in enumerate(seq):
if isinstance(item, float):
r1, g1, b1 = seq[i - 1]
r2, g2, b2 = seq[i + 1]
cdict['red'].append([item, r1, r2])
cdict['green'].append([item, g1, g2])
cdict['blue'].append([item, b1, b2])
return mcolors.LinearSegmentedColormap('CustomMap', cdict)
c = mcolors.ColorConverter().to_rgb
# #phimap = make_colormap(
# #[c('blue'), c('white'), 0.33, c('white'), 0.66, c('white'),c('blue')])
# #phimap = make_colormap(
# #[c('grey'), c('white'),0.5, c('white'), c('grey')])
phimap = make_colormap([c('white'), c('tomato'), 0.33, c('tomato'), c('deepskyblue'), 0.66, c('deepskyblue'),c('white')])
# phimap = make_colormap([c('white'), c('tomato'), 0.33, c('tomato'), c('steelblue'), 0.66, c('steelblue'),c('white')])
# phimap = make_colormap([c('red'), 0.33, c('red'), c('blue'), 0.66, c('blue')])
# phimap = make_colormap([c('tomato'), c('gold'), 0.25, c('gold'), c('deepskyblue'), 0.50, c('deepskyblue'),c('hotpink'), 0.75, c('hotpink'),c('tomato')])
# phimap = make_colormap([c('tomato'), 0.33, c('gold'), 0.66, c('deepskyblue')])
import numpy as np
from matplotlib.colors import LinearSegmentedColormap as lsc
def cmap_map(function, cmap, name='colormap_mod', N=None, gamma=None):
"""
Modify a colormap using `function` which must operate on 3-element
arrays of [r, g, b] values.
You may specify the number of colors, `N`, and the opacity, `gamma`,
value of the returned colormap. These values default to the ones in
the input `cmap`.
You may also specify a `name` for the colormap, so that it can be
loaded using plt.get_cmap(name).
"""
if N is None:
N = cmap.N
if gamma is None:
gamma = cmap._gamma
cdict = cmap._segmentdata
# Cast the steps into lists:
step_dict = {key: map(lambda x: x[0], cdict[key]) for key in cdict}
# Now get the unique steps (first column of the arrays):
step_list = np.unique(sum(step_dict.values(), []))
# 'y0', 'y1' are as defined in LinearSegmentedColormap docstring:
y0 = cmap(step_list)[:, :3]
y1 = y0.copy()[:, :3]
# Go back to catch the discontinuities, and place them into y0, y1
for iclr, key in enumerate(['red', 'green', 'blue']):
for istp, step in enumerate(step_list):
try:
ind = step_dict[key].index(step)
except ValueError:
# This step is not in this color
continue
y0[istp, iclr] = cdict[key][ind][1]
y1[istp, iclr] = cdict[key][ind][2]
# Map the colors to their new values:
y0 = np.array(map(function, y0))
y1 = np.array(map(function, y1))
# Build the new colormap (overwriting step_dict):
for iclr, clr in enumerate(['red', 'green', 'blue']):
step_dict[clr] = np.vstack((step_list, y0[:, iclr], y1[:, iclr])).T
return lsc(name, step_dict, N=N, gamma=gamma)
def cmap_map(function,cmap):
""" Applies function (which should operate on vectors of shape 3:
[r, g, b], on colormap cmap. This routine will break any discontinuous points in a colormap.
"""
cdict = cmap._segmentdata
step_dict = {}
# Firt get the list of points where the segments start or end
for key in ('red','green','blue'): step_dict[key] = map(lambda x: x[0], cdict[key])
step_list = sum(step_dict.values(), [])
step_list = array(list(set(step_list)))
# Then compute the LUT, and apply the function to the LUT
reduced_cmap = lambda step : array(cmap(step)[0:3])
old_LUT = array(map( reduced_cmap, step_list))
new_LUT = array(map( function, old_LUT))
# Now try to make a minimal segment definition of the new LUT
cdict = {}
for i,key in enumerate(('red','green','blue')):
this_cdict = {}
for j,step in enumerate(step_list):
if step in step_dict[key]:
this_cdict[step] = new_LUT[j,i]
elif new_LUT[j,i]!=old_LUT[j,i]:
this_cdict[step] = new_LUT[j,i]
colorvector= map(lambda x: x + (x[1], ), this_cdict.items())
colorvector.sort()
cdict[key] = colorvector
return matplotlib.colors.LinearSegmentedColormap('colormap',cdict,1024)
def dimMap(resultadoSir):
height = resultadoSir.shape[0]*(resultadoSir1[0][-1][0][0]+1)
width = (resultadoSir1[0][-1][0][1]+1)
return [height,width]
def readmapa(resultadoSir, mapa, magnitud):
cont = 0
for fila in range(0, height):
for columna in range(0, width):
punto = cont % resultadoSir.shape[1]
veces = int(cont/resultadoSir.shape[1])
if magnitud == 8 or magnitud == 9 or magnitud == 10 or magnitud == 11:
mapa[columna,fila] = resultadoSir[veces][punto][1][0][magnitud]
else:
mapa[columna,fila] = resultadoSir[veces][punto][1][0][magnitud][index]
cont += 1
return mapa
def corrphi(mapa):
mapa[mapa<0] = (mapa[mapa<0]+360) % 360; mapa[mapa>180] = (mapa[mapa>180]-180)
# ==============================================================================================
global index
global magnitud
import matplotlib
#hsv
# from numpy import array
# phimap = cmap_map(lambda x: x/2+0.5, matplotlib.cm.jet)
# ========================= INPUT
invSir1 = 'finalSir.npy'
logTau = 0.0
magnitud = 7
cmapArray = ['','gray','gray','bone','bone','seismic','Spectral_r',phimap,'bone','gray','gray','cubehelix']
magTitle = ['TAU','$T$ $[kK]$','p','$v$ $[km/s]$','$B$ $[kG]$','$v$ $[km/s]$','$\gamma$ $[d]$','$\phi$ $[d]$','vmacro','fillingf','difusa','$\chi^2$']
magFile = ['TAU','TEMP','PRESION','VMICRO','CAMPO','VLOS','GAMMA','PHI','VMACRO','FILLING','DIFUSA','CHI2']
# ========================= MAP
resultadoSir1 = np.load(invSir1)
height, width = dimMap(resultadoSir1)
print('height:',height,'width:',width)
mapa = np.zeros((height, width))
index = np.where(resultadoSir1[0][0][1][0][0] == logTau)[0][0]
print('logTau: '+str(logTau)+' -> index: '+str(index))
readmapa(resultadoSir1, mapa.T ,magnitud)
# Limites en la escala de color
if magnitud == 7: corrphi(mapa)
print('3sigma_map: {0:2.2f}'.format(3*np.std(mapa)))
print('Mean_map: {0:2.2f}'.format(np.mean(mapa)))
print('Min_map: {0:2.2f}'.format(np.min(mapa)))
print('Max_map: {0:2.2f}'.format(np.max(mapa)))
vmini = np.mean(mapa)-3*np.std(mapa)
if np.min(mapa) >= 0.0 and magnitud != 1: vmini = 0.
vmaxi = np.mean(mapa)+3*np.std(mapa)
if magnitud == 1: vmini = np.min(mapa); vmaxi = np.max(mapa)
if magnitud == 6: vmaxi = 180.
if magnitud == 7: vmaxi = 180.;vmini = 0.
if magnitud == 5: vmaxi = np.mean(mapa)+3*np.std(mapa); vmini = -vmaxi
from matplotlib.colors import LogNorm
plt.imshow(mapa,cmap=cmapArray[magnitud],origin='lower',interpolation='None',vmin=vmini,vmax=vmaxi)#norm=LogNorm()
plt.title('Map 17jun14.006 (1)')
plt.xlabel('Slit Axis [pix]')
plt.ylabel('Time Axis [pix]')
cb = plt.colorbar(shrink=.46)#, ticks=[0.6, 0.8, 1., 1.2])
#cb = plt.colorbar(shrink=.46, ticks=[0.3, 0.6, 0.9, 1.2, 1.5])
# cb.set_label(r'Intensity HeI ({0:4.1f}) /$I_{{qs}}$({1:4.1f})'.format(xLambda[341],xLambda[posicontinuo]), labelpad=5., y=0.5, fontsize=12.)
cb.set_label(r""+magTitle[magnitud]+", $log(\\tau)$={0}".format(logTau), labelpad=8., y=0.5, fontsize=12.)
# plt.show()
plt.savefig(magFile[magnitud]+'_log{0:02d}.pdf'.format(int(logTau)), bbox_inches='tight')
print(magFile[magnitud]+'_log{0:02d}.pdf SAVE'.format(int(logTau)))
| {
"repo_name": "cdiazbas/MPySIR",
"path": "1map_OLD.py",
"copies": "1",
"size": "8343",
"license": "mit",
"hash": -844891488818037200,
"line_mean": 36.9859813084,
"line_max": 154,
"alpha_frac": 0.5863598226,
"autogenerated": false,
"ratio": 2.680051397365885,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8676528662487187,
"avg_score": 0.01797651149573937,
"num_lines": 214
} |
import matplotlib.pyplot as plt
import pyLib.imtools as imtools
import numpy as np
# ========================= CREANDO PHIMAP
import matplotlib.colors as mcolors
def make_colormap(seq):
seq = [(None,) * 3, 0.0] + list(seq) + [1.0, (None,) * 3]
cdict = {'red': [], 'green': [], 'blue': []}
for i, item in enumerate(seq):
if isinstance(item, float):
r1, g1, b1 = seq[i - 1]
r2, g2, b2 = seq[i + 1]
cdict['red'].append([item, r1, r2])
cdict['green'].append([item, g1, g2])
cdict['blue'].append([item, b1, b2])
return mcolors.LinearSegmentedColormap('CustomMap', cdict)
c = mcolors.ColorConverter().to_rgb
phimap = make_colormap([c('white'), c('tomato'), 0.33, c('tomato'), c('deepskyblue'), 0.66, c('deepskyblue'),c('white')])
def dimMap(resultadoSir):
height = resultadoSir.shape[0]*(resultadoSir[0][-1][0][0]+1)
width = (resultadoSir[0][-1][0][1]+1)
return [height, width]
def readmapa(resultadoSir, mapa, magnitud):
cont = 0
for fila in range(0, height):
for columna in range(0, width):
punto = cont % resultadoSir.shape[1]
veces = int(cont/resultadoSir.shape[1])
if magnitud == 8 or magnitud == 9 or magnitud == 10 or magnitud == 11:
mapa[columna,fila] = resultadoSir[veces][punto][1][0][magnitud]
else:
mapa[columna,fila] = resultadoSir[veces][punto][1][0][magnitud][index]
cont += 1
return mapa
def corrphi(mapa):
mapa[mapa<0] = (mapa[mapa<0]+360) % 360; mapa[mapa>180] = (mapa[mapa>180]-180)
def do1map(logTau, magnitud):
# ==============================================================================================
# global index
# global magnitud
# ========================= INPUT
invSir1 = 'finalSir.npy'
# logTau = 0.0
# magnitud = 2
# hsv
cmapArray = ['gray','gray','gray','bone','bone','seismic','Spectral_r',phimap,'bone','gray','gray','cubehelix']
magTitle = [r'${\rm log(\tau)=}$',r'${\rm T\ [kK]}$','p',r'${\rm v\ [km/s]}$',r'${\rm B\ [kG]}$',r'${\rm v\ [km/s]}$',r'${\rm \gamma\ [d]}$',r'${\rm \phi\ [d]}$','vmacro','fillingf','difusa',r'${\rm \chi^2}$']
magFile = ['TAU','TEMP','PRESION','VMICRO','CAMPO','VLOS','GAMMA','PHI','VMACRO','FILLING','DIFUSA','CHI2']
# ========================= MAP
resultadoSir1 = np.load(invSir1)
# height, width = dimMap(resultadoSir1)
# print('height:',height,'width:',width)
# mapa = np.zeros((height, width))
index = np.where(resultadoSir1[0][0][1][0][0] == logTau)[0][0]
print('logTau: '+str(logTau)+' -> index: '+str(index))
# readmapa(resultadoSir1, mapa.T ,magnitud)
from pySir import sirtools as st
mapa = st.readSIRMap(resultadoSir1, magnitud, index)
# Limites en la escala de color
if magnitud == 7: corrphi(mapa)
print('3sigma_map: {0:2.2f}'.format(3*np.std(mapa)))
print('Mean_map: {0:2.2f}'.format(np.mean(mapa)))
print('Min_map: {0:2.2f}'.format(np.min(mapa)))
print('Max_map: {0:2.2f}'.format(np.max(mapa)))
vmini = np.mean(mapa)-3*np.std(mapa)
if np.min(mapa) >= 0.0 and magnitud != 1: vmini = 0.
vmaxi = np.mean(mapa)+3*np.std(mapa)
if magnitud == 1 or magnitud == 4: vmini = np.min(mapa); vmaxi = np.max(mapa)
if magnitud == 6: vmaxi = 180.
if magnitud == 7: vmaxi = 180.;vmini = 0.
if magnitud == 11: vmaxi = np.mean(mapa)+6*np.std(mapa); vmini = 0.
if magnitud == 5: vmini = np.mean(mapa)-4*np.std(mapa); vmaxi = -vmini
from matplotlib.colors import LogNorm
plt.imshow(mapa,cmap=cmapArray[magnitud],origin='lower',interpolation='None',vmin=vmini,vmax=vmaxi)#norm=LogNorm()
plt.title('Map 17jun14.006 (3-4)')
plt.xlabel('Slit Axis [pix]')
plt.ylabel('Time Axis [pix]')
cb = plt.colorbar(shrink=.46)#, ticks=[0.6, 0.8, 1., 1.2])
#cb = plt.colorbar(shrink=.46, ticks=[0.3, 0.6, 0.9, 1.2, 1.5])
# cb.set_label(r'Intensity HeI ({0:4.1f}) /$I_{{qs}}$({1:4.1f})'.format(xLambda[341],xLambda[posicontinuo]), labelpad=5., y=0.5, fontsize=12.)
loglabel = r'${\rm log(\tau)=}$'
cb.set_label(r""+magTitle[magnitud]+r", "+loglabel+"{0}".format(logTau), labelpad=8., y=0.5, fontsize=12.)
# plt.show()
plt.savefig(magFile[magnitud]+'_log{0:02d}.pdf'.format(int(logTau)), bbox_inches='tight')
print(magFile[magnitud]+'_log{0:02d}.pdf SAVE'.format(int(logTau)))
print('-----------------------'+str(magnitud))
plt.clf()
for magnitud in range(12):
do1map(0.0,magnitud)
| {
"repo_name": "cdiazbas/MPySIR",
"path": "allmaps.py",
"copies": "1",
"size": "4633",
"license": "mit",
"hash": 5517724357132258000,
"line_mean": 39.3660714286,
"line_max": 213,
"alpha_frac": 0.5598963954,
"autogenerated": false,
"ratio": 2.6474285714285712,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8656174963377617,
"avg_score": 0.01023000069019081,
"num_lines": 112
} |
__author__ = 'Cecilia'
import gensim
import numpy as np
import scipy.io as spio
from sklearn.cluster import MiniBatchKMeans as kmeans
import os
def get_word_2_vec(model_file, save_file, concept_file):
model = gensim.models.Word2Vec.load_word2vec_format(model_file, binary=True)
with open(concept_file, 'r') as f:
concepts = f.read().split('\n')
concepts = [c.split('\t')[0].replace(' ', '_') for c in concepts if len(c) > 0]
features = np.Inf * np.ones((model.layer1_size, len(concepts)))
mean_feature_mask = np.zeros((len(concepts),))
feature_mask = np.ones((len(concepts),))
for i in range(0, len(concepts)):
if concepts[i] in model:
features[:, i] = model[concepts[i]]
elif concepts[i].replace('_', '') in model:
features[:, i] = model[concepts[i].replace('_', '')]
elif all([word in model for word in concepts[i].split('_')]):
#If the phrase is not in the glove dictionary, but the component words are, use the mean of the vectors
mean_feature_mask[i] = 1
concept_words = concepts[i].split('_')
features[:, i] = 0
for word in concept_words:
features[:, i] = features[:, i] + model[word]
features[:, i] = features[:, i] / len(concept_words)
else:
feature_mask[i] = 0
print "{} not in model".format(concepts[i])
features = features.transpose()
spio.savemat(save_feature_file, {'features': features, 'concepts': concepts, 'feature_mask': feature_mask, 'mean_feature_mask': mean_feature_mask})
# Adapted from dhammacks Word2VecExample
def load_glove_vec(txt_filepath, num_dims):
glove_terms = []
vocab_ind = 0
with open(txt_filepath, 'r') as fin:
for line in fin:
items = line.replace('\r', '').replace('\n', '').split(' ')
if len(items) < 10: continue
vocab_ind += 1
glove_vec = np.zeros((vocab_ind, num_dims))
vocab_ind = 0
# load the word2vec features.
with open(txt_filepath, 'r') as fin:
# if path == 'vectors0.txt':
# next(fin) #skip information on first line
for line in fin:
items = line.replace('\r', '').replace('\n', '').split(' ')
if len(items) < 10: continue
glove_terms.append(items[0])
vect = np.array([float(i) for i in items[1:] if len(i) > 0])
glove_vec[vocab_ind, :] = vect
vocab_ind += 1
return glove_vec, glove_terms
def get_glove(dim, model_file, save_model_file, save_feature_file, concept_file):
if os.path.exists(save_model_file):
model_mat = spio.loadmat(save_model_file)
glove_vec = model_mat['glove_vec']
glove_terms = model_mat['glove_terms']
else:
print "loading {}".format(model_file)
glove_vec, glove_terms = load_glove_vec(model_file, dim)
print "loaded"
#spio.savemat(save_model_file, {'glove_vec': glove_vec, 'glove_terms': glove_terms})
#print "saved"
with open(concept_file, 'r') as f:
concepts = f.read().split('\n')
concepts = [c.split('\t')[0].replace(' ', '_') for c in concepts]
num_concepts = len(concepts)
features = np.Inf * np.ones((num_concepts, dim))
mean_feature_mask = np.zeros((num_concepts,))
feature_mask = np.ones((num_concepts,))
glove_terms_dict = dict(zip(glove_terms, range(0, len(glove_terms))))
for i in range(0, num_concepts):
if concepts[i] in glove_terms_dict:
features[i, :] = glove_vec[glove_terms_dict[concepts[i]], :]
elif concepts[i].replace('_', '') in glove_terms_dict:
features[i, :] = glove_vec[glove_terms_dict[concepts[i].replace('_', '')], :]
elif all([word in glove_terms_dict for word in concepts[i].split('_')]):
#If the phrase is not in the glove dictionary, but the component words are, use the mean of the vectors
mean_feature_mask[i] = 1
concept_words = concepts[i].split('_')
features[i, :] = 0
for word in concept_words:
features[i, :] = features[i, :] + glove_vec[glove_terms_dict[word], :]
features[i, :] = features[i, :] / len(concept_words)
else:
feature_mask[i] = 0
print "{} not in model".format(concepts[i])
spio.savemat(save_feature_file, {'features': features, 'concepts': concepts, 'feature_mask': feature_mask, 'mean_feature_mask': mean_feature_mask})
if __name__ == "__main__":
query = "biology_domesticated_animal"
min_num_images = 500
concept_file = 'E:\data\Iconic\data\\test_crawler\data\concepts\\{}_{}_extended_owner_per_concept.txt'.format(query, min_num_images)
model_file = 'E:\data\GloVe\glove.42B.300d.txt'
save_model_file = ''
dim = 300
save_feature_file = "E:\data\Iconic\data\word2vec_features\\{}_{}_extended_feature_glove.42B.300d.mat".format(query, min_num_images)
#get_glove(dim, model_file, save_model_file, save_feature_file, concept_file)
model_file = 'E:\data\word2vec\GoogleNews-vectors-negative300.bin.gz'
save_file = "E:\data\Iconic\data\word2vec_features\\{}_{}_extended_feature_word2vec.mat".format(query, min_num_images)
get_word_2_vec(model_file, save_file, concept_file) | {
"repo_name": "crmauceri/VisualCommonSense",
"path": "code/database_builder/get_vocab_features.py",
"copies": "1",
"size": "5336",
"license": "mit",
"hash": -953453465884107000,
"line_mean": 42.3902439024,
"line_max": 151,
"alpha_frac": 0.5982008996,
"autogenerated": false,
"ratio": 3.2144578313253014,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9292081386686456,
"avg_score": 0.004115468847769043,
"num_lines": 123
} |
__author__ = 'Cedric Da Costa Faro'
from datetime import datetime, date
import hashlib
from werkzeug.security import generate_password_hash, check_password_hash
from flask import request
from flask.ext.login import UserMixin
from . import db, login_manager
# We define here user table with all required fields,
# we also retrieve user's avatar from Gravatar if any,
# We make sure that user's password is encrypted
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(64), nullable=False, unique=True, index=True)
username = db.Column(db.String(64), nullable=False, unique=True, index=True)
is_admin = db.Column(db.Boolean)
password_hash = db.Column(db.String(128))
name = db.Column(db.String(64))
location = db.Column(db.String(128))
bio = db.Column(db.Text())
avatar_hash = db.Column(db.String(32))
projects = db.relationship('Project', lazy='dynamic', backref='author')
client = db.relationship('Client', lazy='dynamic', backref='author')
agenda = db.relationship('Agenda', lazy='dynamic', backref='author')
def __init__(self, **kwargs):
super(User, self).__init__(**kwargs)
if self.email is not None and self.avatar_hash is None:
self.avatar_hash = hashlib.md5(self.email.encode('utf-8')).hexdigest()
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def gravatar(self, size=100, default='identicon', rating='g'):
if request.is_secure:
url = 'https://secure.gravatar.com/avatar'
else:
url = 'http://www.gravatar.com/avatar'
hash = self.avatar_hash or hashlib.md5(self.email.encode('utf-8')).hexdigest()
return '{url}/{hash}?s={size}&d={default}&r={rating}'.format(
url=url, hash=hash, size=size, default=default, rating=rating)
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
# We define here the structure of the project table and link it to a user via a foreign key
class Project(db.Model):
__tablename__ = 'projects'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(128), nullable=False)
description = db.Column(db.Text())
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
client_id = db.Column(db.Integer, db.ForeignKey('clients.id'))
date = db.Column(db.DateTime())
agenda_id = db.relationship('Agenda', lazy='dynamic', backref='project_agenda')
# We define here the structure of the client table which will be re-used when creating projects.
class Client(db.Model):
__tablename__ = 'clients'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128), nullable=False)
location = db.Column(db.Text(), nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
projects = db.relationship('Project', lazy='dynamic', backref='client_project')
# We define here the structure of the agenda table which will be used to record an activity
# belonging to a project
class Agenda(db.Model):
__tablename__ = 'agendas'
id = db.Column(db.Integer, primary_key=True)
agenda_date = db.Column(db.Date(), nullable=False)
project_id = db.Column(db.Integer, db.ForeignKey('projects.id'))
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
| {
"repo_name": "cdcf/time_tracker",
"path": "app/models.py",
"copies": "1",
"size": "3650",
"license": "bsd-3-clause",
"hash": -5091426325701063000,
"line_mean": 40.4772727273,
"line_max": 96,
"alpha_frac": 0.6821917808,
"autogenerated": false,
"ratio": 3.5129932627526466,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46951850435526465,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Cedric Da Costa Faro'
from flask.ext.wtf import Form
from wtforms import StringField, PasswordField, BooleanField, SubmitField, validators
from wtforms.validators import Required, Length, Email, Regexp, EqualTo
from wtforms import ValidationError
from ..models import User
# We allow here a user to be created within the app, however this user will NOT be an admin user.
# Users have to insert both an e-mail address which can be only unique and a username that we also want to be unique.
class RegistrationForm(Form):
email = StringField('Email', validators=[Required(), Length(1, 64), Email()])
username = StringField('Username', validators=[Required(), Length(1, 64),
Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0,
'Username must have only letters, '
'numbers, dots or underscores')])
password = PasswordField('Password', validators=[Required(), validators.Length(min=6, message=
('Please choose a password with at least 6 characters')), EqualTo('password2', message='Passwords must match')])
password2 = PasswordField('Confirm password', validators=[Required()])
submit = SubmitField('Register')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('Email already registered.')
def validate_username(self, field):
if User.query.filter_by(username=field.data).first():
raise ValidationError('Username already registered.')
# We allow here an registered user to login into the app
class LoginForm(Form):
email = StringField('Email', validators=[Required(), Length(1, 64), Email()])
password = PasswordField('Password', validators=[Required()])
remember_me = BooleanField('Keep me logged in')
submit = SubmitField('Log In')
| {
"repo_name": "cdcf/time_tracker",
"path": "app/auth/forms.py",
"copies": "1",
"size": "1942",
"license": "bsd-3-clause",
"hash": 8440094075152128000,
"line_mean": 51.4864864865,
"line_max": 120,
"alpha_frac": 0.6565396498,
"autogenerated": false,
"ratio": 4.526806526806527,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0033801814436689396,
"num_lines": 37
} |
__author__ = 'Cedric Da Costa Faro'
from flask.ext.wtf import Form
from wtforms import StringField, TextAreaField, SubmitField
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from wtforms.validators import Length, Required
from wtforms.fields.html5 import DateField
from ..models import Client
import datetime
# We define here the structure of a project, typically, the project name, a brief description and a date when it starts
# and we allow that it is updated if required
# We define here the structure of a client, typically, the client name and a location
def get_clients():
return Client.query.all()
class ProjectForm(Form):
title = StringField('Title', validators=[Required(), Length(1, 128)])
description = TextAreaField('Desciption')
date = DateField('Date', format='%d/%m/%Y', default=datetime.date.today())
client_id = QuerySelectField('Select_Client',
validators=[Required()],
query_factory=get_clients,
allow_blank=True,
get_label='name',
blank_text=u'-- Please choose a client --')
submit = SubmitField('Submit')
def from_model(self, project):
self.title.data = project.title
self.description.data = project.description
self.client_id.data = project.client_project
self.date.data = project.date
def to_model(self, project):
project.title = self.title.data
project.description = self.description.data
project.client_project = self.client_id.data
project.date = self.date.data
| {
"repo_name": "cdcf/time_tracker",
"path": "app/projects/forms.py",
"copies": "1",
"size": "1544",
"license": "bsd-3-clause",
"hash": -8379751659161218000,
"line_mean": 36.6585365854,
"line_max": 119,
"alpha_frac": 0.7046632124,
"autogenerated": false,
"ratio": 4.01038961038961,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.521505282278961,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Cedric Da Costa Faro'
from flask.ext.wtf import Form
from wtforms import SubmitField
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from wtforms.validators import Required
from ..models import Client, Project
from wtforms.fields.html5 import DateField
import datetime
# We define here the structure of a client, typically, the client name and a location
def get_clients():
return Client.query.all()
def get_projects():
return Project.query.all()
class AgendaForm(Form):
agenda_date = DateField('Date', format='%d/%m/%Y', default=datetime.date.today())
project_id = QuerySelectField('Select_Project',
validators=[Required()],
query_factory=get_projects,
allow_blank=True,
get_label='title',
blank_text=u'-- Please choose a project --')
submit = SubmitField('Submit')
def from_model(self, agenda):
self.agenda_date.data = agenda.agenda_date
self.project_id.data = agenda.project_agenda
def to_model(self, agenda):
agenda.agenda_date = self.agenda_date.data
agenda.project_agenda = self.project_id.data
| {
"repo_name": "cdcf/time_tracker",
"path": "app/agendas/forms.py",
"copies": "1",
"size": "1135",
"license": "bsd-3-clause",
"hash": -4382612146455704600,
"line_mean": 29.6756756757,
"line_max": 85,
"alpha_frac": 0.6995594714,
"autogenerated": false,
"ratio": 3.626198083067093,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9818519872810807,
"avg_score": 0.0014475363312572616,
"num_lines": 37
} |
__author__ = 'Cedric Da Costa Faro'
from flask import render_template, current_app, request, redirect, url_for, flash
from flask.ext.login import login_user, logout_user, login_required
from ..models import User
from . import auth
from app import db
from .forms import LoginForm, RegistrationForm
# We enable here new users to be registered in the app
@auth.route('/register/', methods=['GET', 'POST'])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email=form.email.data,
username=form.username.data,
password=form.password.data)
db.session.add(user)
db.session.commit()
flash('You can now login, remember to complete your profile.', 'info')
return redirect(url_for('auth.login'))
return render_template('auth/register.html/', form=form)
# We first check that if we are in Production system, users have to be authenticated first.
# In any case, we define here the login page, and we check that both email and passwords are valid. We choose to
# work with passwords as they are unique values for a user.
@auth.route('/login/', methods=['GET', 'POST'])
def login():
if not current_app.config['DEBUG'] and not current_app.config['TESTING'] and not request.is_secure:
return redirect(url_for('auth.login', _external=True, _scheme='https'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is None or not user.verify_password(form.password.data):
flash('Invalid email or password.', 'error')
return redirect(url_for('auth.login'))
login_user(user, form.remember_me.data)
return redirect(request.args.get('next') or url_for('main.index'))
return render_template('auth/login.html/', form=form)
# We define here the logout process
@auth.route('/logout/')
@login_required
def logout():
logout_user()
flash('You have been logged out.', 'success')
return redirect(url_for('main.index')) | {
"repo_name": "cdcf/time_tracker",
"path": "app/auth/routes.py",
"copies": "1",
"size": "2071",
"license": "bsd-3-clause",
"hash": 1725368505882962400,
"line_mean": 40.44,
"line_max": 112,
"alpha_frac": 0.6779333655,
"autogenerated": false,
"ratio": 3.806985294117647,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49849186596176476,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Cedric Da Costa Faro'
from flask import render_template, flash, redirect, url_for, abort, request, current_app
from flask.ext.login import login_required, current_user
from .. import db
from ..models import Agenda
from . import agendas
from .forms import AgendaForm
# We allow here a user to create a new agenda once he has logged in and to modify his own ones only
@agendas.route('/new_agenda/', methods=['GET', 'POST'])
@login_required
def new_agenda():
form = AgendaForm()
if form.validate_on_submit():
agenda = Agenda(author=current_user)
form.to_model(agenda)
db.session.add(agenda)
db.session.commit()
flash('The agenda was added successfully.', 'success')
return redirect(url_for('agendas.agenda_list'))
return render_template('agendas/edit_agenda.html/', form=form)
# We list here all agendas
@agendas.route('/agenda_list/', methods=['GET', 'POST'])
@login_required
def agenda_list():
page = request.args.get('page', 1, type=int)
pagination = Agenda.query.order_by(Agenda.agenda_date.asc()).paginate(page,
per_page=current_app.config['AGENDA_PER_PAGE'],
error_out=False)
list_of_agenda = pagination.items
return render_template('agendas/list_agenda.html/', agendas=list_of_agenda, pagination=pagination)
# this function is used as basis to generate a agenda
@agendas.route('/agenda/<int:id>/')
def agenda(id):
agenda = Agenda.query.get_or_404(id)
headers = {}
if current_user.is_authenticated():
headers['X-XSS-Protection'] = '0'
return render_template('agendas/agenda.html/', agenda=agenda), 200, headers
# We enable agenda owner to edit them if required
@agendas.route('/edit_agenda/<int:id>/', methods=['GET', 'POST'])
@login_required
def edit_agenda(id):
agenda = Agenda.query.get_or_404(id)
if not current_user.is_admin and agenda.author != current_user:
abort(403)
form = AgendaForm()
if form.validate_on_submit():
form.to_model(agenda)
db.session.add(agenda)
db.session.commit()
flash('The agenda was updated successfully.', 'success')
return redirect(url_for('agendas.agenda_list'))
form.from_model(agenda)
return render_template('agendas/edit_agenda.html', form=form)
@agendas.route('/delete_agenda/<int:id>/', methods=['POST'])
@login_required
def delete_agenda(id):
agenda = Agenda.query.get_or_404(id)
db.session.delete(agenda)
db.session.commit()
flash('Agenda was deleted successfully.', 'success')
return redirect(url_for('agendas.agenda_list'))
| {
"repo_name": "cdcf/time_tracker",
"path": "app/agendas/routes.py",
"copies": "1",
"size": "2726",
"license": "bsd-3-clause",
"hash": 2749393665992331300,
"line_mean": 36.3424657534,
"line_max": 117,
"alpha_frac": 0.6515040352,
"autogenerated": false,
"ratio": 3.365432098765432,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4516936133965432,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Cedric Da Costa Faro'
from flask import render_template, flash, redirect, url_for
from flask.ext.login import login_required, current_user
from .. import db
from ..models import User
from . import users
from .forms import ProfileForm, ChangePasswordForm
# last part first_or_404 will return a 404 status code if user tries to manually overwrite the user
# name in the url and that this user does not exist in the db
# We then display the list of projects created by that user.
@users.route('/user/<username>/')
def user(username):
user = User.query.filter_by(username=username).first_or_404()
return render_template('users/user.html/', user=user)
# We enable here a user to complete his user profile once he has logged in
@users.route('/profile/', methods=['GET', 'POST'])
@login_required
def profile():
form = ProfileForm()
if form.validate_on_submit():
current_user.name = form.name.data
current_user.location = form.location.data
current_user.bio = form.bio.data
db.session.add(current_user._get_current_object())
db.session.commit()
flash('Your profile has been updated.', 'success')
return redirect(url_for('users.user', username=current_user.username))
form.name.data = current_user.name
form.location.data = current_user.location
form.bio.data = current_user.bio
return render_template('users/profile.html/', form=form)
# We enable users to update their password
@users.route('/change-password/', methods=['GET', 'POST'])
@login_required
def change_password():
form = ChangePasswordForm()
if form.validate_on_submit():
if current_user.verify_password(form.old_password.data):
current_user.password = form.password.data
db.session.add(current_user)
db.session.commit()
flash('Your password has been updated', 'success')
return redirect(url_for('main.index'))
else:
flash('Invalid password', 'danger')
return render_template('users/profile.html/', form=form)
| {
"repo_name": "cdcf/time_tracker",
"path": "app/users/routes.py",
"copies": "1",
"size": "2073",
"license": "bsd-3-clause",
"hash": -6436530425740043000,
"line_mean": 38.1132075472,
"line_max": 99,
"alpha_frac": 0.687891944,
"autogenerated": false,
"ratio": 3.831792975970425,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5019684919970425,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Cedric Da Costa Faro'
import os
from app import create_app
from flask.ext.script import Manager, Shell
from flask.ext.migrate import Migrate, MigrateCommand
from app import db
from app.models import User
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
manager = Manager(app)
migrate = Migrate(app, db)
def make_shell_context():
return dict(app=app, db=db, User=User, Role=Role)
manager.add_command("shell", Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
# This command line function allows to create an Admin user only. As an admin user has extensive rights, we do not
# want any user to be or become admin from the app and thus, keep it separate.
@manager.command
def adduser(email, username, admin=True):
from getpass import getpass
password = getpass()
password2 = getpass(prompt='Confirm: ')
if password != password2:
import sys
sys.exit('Error: passwords do not match.')
db.create_all()
user = User(email=email, username=username, password=password, is_admin=admin)
db.session.add(user)
db.session.commit()
print('User {0} was registered successfully.'.format(username))
if __name__ == '__main__':
manager.run()
| {
"repo_name": "cdcf/time_tracker",
"path": "manage.py",
"copies": "1",
"size": "1243",
"license": "bsd-3-clause",
"hash": 2040202919969653500,
"line_mean": 30.8717948718,
"line_max": 114,
"alpha_frac": 0.7127916331,
"autogenerated": false,
"ratio": 3.5514285714285716,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9759702691891625,
"avg_score": 0.0009035025273893793,
"num_lines": 39
} |
__author__ = 'Cedric'
# each information will be used to sort the properties for the given policy
import random
from monopyly import *
from .Memory import *
from .Policy import *
class VSSchizoAI(PlayerAIBase):
'''
'''
def __init__(self):
'''
ctor
'''
# memory information
self.needed_money = 0
self.auction_memory = AuctionMemory()
self.deal_memory = DealMemory()
self.property_policy = AcquiringPolicy(self)
self.house_policy = HousePolicy_v2(self,HousePolicy_v2.HousePolicy.ONE_COMPLETE_SET, HousePolicy_v2.RepartitionPolicy.SAME_SIZE)
self.selling_policy = SellingPolicy(self,self.deal_memory)
self.chance_policy = ChancePolicy(random.random())
self.jail_policy = JailPolicy(random.random(), random.random() * 10, random.random()*10 + 12)
self.money_to_be_taken = 0
self.properties_information = {
#brown
Square.Name.OLD_KENT_ROAD: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.WHITECHAPEL_ROAD: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
#light blue
Square.Name.THE_ANGEL_ISLINGTON: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.EUSTON_ROAD: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.PENTONVILLE_ROAD: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
#rose
Square.Name.PALL_MALL: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.WHITEHALL: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.NORTHUMBERLAND_AVENUE: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
#orange
Square.Name.BOW_STREET: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.MARLBOROUGH_STREET: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.VINE_STREET: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
#red
Square.Name.STRAND: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.FLEET_STREET: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.TRAFALGAR_SQUARE: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
#yellow
Square.Name.LEICESTER_SQUARE: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.COVENTRY_STREET: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.PICCADILLY: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
#green
Square.Name.REGENT_STREET: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.OXFORD_STREET: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.BOND_STREET: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
#dark blue
Square.Name.PARK_LANE: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.MAYFAIR: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
#station
Square.Name.KINGS_CROSS_STATION: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.MARYLEBONE_STATION: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.FENCHURCH_STREET_STATION: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.LIVERPOOL_STREET_STATION: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
#company
Square.Name.ELECTRIC_COMPANY: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.WATER_WORKS: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
}
def get_name(self):
return 'VSSchizoAI'
def start_of_game(self):
'''
Called at the start of the game.
No response is required.
'''
self.needed_money = 0
self.money_to_be_taken = 0
def start_of_turn(self, game_state, player):
'''
Called when an AI's turn starts. All AIs receive this notification.
No response is required.
'''
self.needed_money = 0
self.money_to_be_taken = 0
self.chance_policy = ChancePolicy(random.random())
self.jail_policy = JailPolicy(random.random(), random.random() * 10, random.random()*10 + 12)
self.money_to_be_taken = 0
self.properties_information = {
#brown
Square.Name.OLD_KENT_ROAD: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.WHITECHAPEL_ROAD: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
#light blue
Square.Name.THE_ANGEL_ISLINGTON: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.EUSTON_ROAD: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.PENTONVILLE_ROAD: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
#rose
Square.Name.PALL_MALL: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.WHITEHALL: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.NORTHUMBERLAND_AVENUE: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
#orange
Square.Name.BOW_STREET: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.MARLBOROUGH_STREET: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.VINE_STREET: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
#red
Square.Name.STRAND: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.FLEET_STREET: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.TRAFALGAR_SQUARE: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
#yellow
Square.Name.LEICESTER_SQUARE: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.COVENTRY_STREET: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.PICCADILLY: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
#green
Square.Name.REGENT_STREET: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.OXFORD_STREET: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.BOND_STREET: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
#dark blue
Square.Name.PARK_LANE: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.MAYFAIR: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
#station
Square.Name.KINGS_CROSS_STATION: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.MARYLEBONE_STATION: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.FENCHURCH_STREET_STATION: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.LIVERPOOL_STREET_STATION: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
#company
Square.Name.ELECTRIC_COMPANY: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
Square.Name.WATER_WORKS: [300 - (random.random() * 300), random.random() + 0.25, random.random() + 0.25, random.random() + 0.25, 250 - (random.random() * 250), random.random() *10, 0.75],
}
def landed_on_unowned_property(self, game_state, player, property):
'''
Called when the AI lands on an unowned property. Only the active
player receives this notification.
Must return either the BUY or DO_NOT_BUY action from the
PlayerAIBase.Action enum.
The default behaviour is DO_NOT_BUY.
'''
return self.property_policy.acquire_through_landing(game_state,player,property)
def money_given(self, player, amount):
'''
Called when money has been given to the player.
No response is required.
'''
pass
def money_will_be_taken(self, player, amount):
'''
Called shortly before money will be taken from the player.
Before the money is taken, there will be an opportunity to
make deals and/or mortgage properties. (This will be done via
subsequent callbacks.)
No response is required.
'''
self.money_to_be_taken = amount
if amount > player.state.cash:
self.needed_money = amount - player.state.cash
pass
def money_taken(self, player, amount):
'''
Called when money has been taken from the player.
No response is required.
'''
pass
def players_birthday(self):
'''
Called when a player picks up the 'It is your birthday...'
Community Chest card.
You should return "Happy Birthday!" (with this casing and the
exclamation mark). If not, you will have to pay £100 instead of
the standard £10.
'''
return "Happy Birthday!"
def pay_ten_pounds_or_take_a_chance(self, game_state, player):
'''
Called when the player picks up the "Pay a £10 fine or take a Chance" card.
Return either:
PlayerAIBase.Action.PAY_TEN_POUND_FINE
or
PlayerAIBase.Action.TAKE_A_CHANCE
'''
return self.chance_policy.compute()
def property_offered_for_auction(self, game_state, player, property):
'''
Called when a property is put up for auction.
Properties are auctioned when a player lands on an unowned square but does
not want to buy it. All players take part in the auction, including the
player who landed on the square.
The property will be sold to the highest bidder using the eBay rule,
ie, for £1 more than the second-highest bid.
Return the amount you bid. To put in a bid this must be a positive integer.
Zero means that you are not bidding (it does not mean that you are bidding
zero).
The default behaviour is not to bid.
'''
return self.property_policy.acquire_through_auction(game_state,player,property)
def auction_result(self, status, property, player, amount_paid):
'''
Called with the result of an auction. All players receive
this notification.
status is either AUCTION_SUCCEEDED or AUCTION_FAILED.
If the auction succeeded, the property, the player who won
the auction and the amount they paid are passed to the AI.
If the auction failed, the player will be None and the
amount paid will be 0.
No response is required.
'''
if status == PlayerAIBase.Action.AUCTION_SUCCEEDED:
self.auction_memory.add_auction(property,player,amount_paid)
pass
def build_houses(self, game_state, player):
'''
Called near the start of the player's turn to give the option of building houses.
Return a list of tuples indicating which properties you want to build houses
on and how many houses to build on each. For example:
[(park_lane, 3), (mayfair, 4)]
The properties should be Property objects.
Return an empty list if you do not want to build.
Notes:
- You must own a whole set of unmortgaged properties before you can
build houses on it.
- You can build on multiple sets in one turn. Just specify all the streets
and houses you want to build.
- Build five houses on a property to have a "hotel".
- You specify the _additional_ houses you will be building, not the
total after building. For example, if Park Lane already has 3 houses
and you specify (park_lane, 2) you will end up with 5
houses (ie, a hotel).
- Sets must end up with 'balanced' housing. No square in a set can
have more than one more house than any other. If you request an
unbalanced build, the whole transaction will be rolled back, even
if it includes balanced building on other sets as well.
- If you do not have (or cannot raise) enough money to build all the
houses specified, the whole transaction will be rolled back. Between
this function call and money being taken, you will have an opportunity
to mortgage properties or make deals.
The default behaviour is not to build.
'''
return self.house_policy.compute(game_state, player)
def sell_houses(self, game_state, player):
'''
Gives the player the option to sell properties.
This is called when any debt, fine or rent has to be paid. It is
called just before mortgage_properties (below).
Notes:
- You cannot mortgage properties with houses on them, so if you
plan to mortgage, make sure you sell all the houses first.
- For each house sold you receive half the price that they were
bought for.
- Houses on a set must end up 'balanced', ie no property can have
more than one more house than any other property in the set.
Return a list of tuples of the streets and number of houses you
want to sell. For example:
[(old_kent_road, 1), (bow_street, 1)]
The streets should be Property objects.
The default is not to sell any houses.
'''
if self.needed_money > 0:
return self.selling_policy.computeHouse(game_state,player)
return []
def mortgage_properties(self, game_state, player):
'''
Gives the player an option to mortgage properties.
This is called before any debt is paid (house building, rent,
tax, fines from cards etc).
Notes:
- You receive half the face value of each property mortgaged.
- You cannot mortgage properties with houses on them.
(The AI will have been given the option to sell houses before this
function is called.)
Return a list of properties to mortgage, for example:
[bow_street, liverpool_street_station]
The properties should be Property objects.
Return an empty list if you do not want to mortgage anything.
The default behaviour is not to mortgage anything.
'''
if self.needed_money > 0:
return self.selling_policy.computeMortgage(game_state,player)
return []
def unmortgage_properties(self, game_state, player):
'''
Called near the start of the player's turn to give them the
opportunity to unmortgage properties.
Unmortgaging costs half the face value plus 10%. Between deciding
to unmortgage and money being taken the player will be given the
opportunity to make deals or sell other properties. If after this
they do not have enough money, the whole transaction will be aborted,
and no properties will be unmortgaged and no money taken.
Return a list of property names to unmortgage, like:
[old_kent_road, bow_street]
The properties should be Property objects.
The default is to return an empty list, ie to do nothing.
'''
return self.property_policy.acquire_through_unmortgage(game_state,player)
def get_out_of_jail(self, game_state, player):
'''
Called in the player's turn, before the dice are rolled, if the player
is in jail.
There are three possible return values:
PlayerAIBase.Action.BUY_WAY_OUT_OF_JAIL
PlayerAIBase.Action.PLAY_GET_OUT_OF_JAIL_FREE_CARD
PlayerAIBase.Action.STAY_IN_JAIL
Buying your way out of jail will cost £50.
The default action is STAY_IN_JAIL.
'''
return self.jail_policy.compute(self,game_state,player)
def propose_deal(self, game_state, player):
'''
Called to allow the player to propose a deal.
You return a DealProposal object.
If you do not want to make a deal, return None.
If you want to make a deal, you provide this information:
- The player number of the player you are proposing the deal to
- A list of properties offered
- A list of properties wanted
- Maximum cash offered as part of the deal
- Minimum cash wanted as part of the deal.
Properties offered and properties wanted are passed as lists of
Property objects.
If you offer money as part of the deal, set the cash wanted to zero
and vice versa.
Note that the cash limits will not be shown to the proposed-to player.
When the deal is offered to them, they set their own limits for accepting
the deal without seeing your limits. If the limits are acceptable to both
players, the deal will be done at the halfway point.
For example, Player1 proposes:
Propose to: Player2
Properties offered: Mayfair
Properties wanted: (none)
Maximum cash offered: 0
Minimum cash wanted: 500
Player2 accepts with these limits:
Maximum cash offered: 1000
Minimum cash wanted: 0
The deal will be done with Player2 receiving Mayfair and paying £750
to Player1.
The only 'negotiation' is in the managing of cash along with the deal
as discussed above. There is no negotiation about which properties are
part of the deal. If a deal is rejected because it does not contain the
right properties, another deal can be made at another time with different
lists of properties.
Example construction and return of a DealProposal object:
return DealProposal(
propose_to_player_number=2,
properties_offered=[vine_street, bow_street],
properties_wanted=[park_lane],
maximum_cash_offered=200)
The default is for no deal to be proposed.
'''
if self.needed_money > 0:
return self.selling_policy.propose_deal(game_state,player)
return self.property_policy.acquire_through_deal_proposal(game_state,player,player.state.cash - self.money_to_be_taken)
def deal_proposed(self, game_state, player, deal_proposal):
'''
Called when another player proposes a deal to you.
See propose_deal (above) for details of the DealProposal object.
Return a DealResponse object.
To reject a deal:
return DealResponse(DealResponse.Action.REJECT)
To accept a deal:
return DealResponse(DealResponse.Action.ACCEPT, maximum_cash_offered=300)
or
return DealResponse(DealResponse.Action.ACCEPT, minimum_cash_wanted=800)
The default is to reject the deal.
'''
return self.property_policy.acquire_through_deal_being_proposed(game_state,player,deal_proposal)
def deal_result(self, deal_info):
'''
Called when a proposed deal has finished. The players involved in
the deal receive this notification.
deal_info is a PlayerAIBase.DealInfo 'enum' giving indicating
whether the deal succeeded, and if not why not.
No response is required.
'''
pass
def deal_completed(self, deal_result):
'''
Called when a deal has successfully completed to let all
players know the details of the deal which took place.
deal_result is a DealResult object.
Note that the cash_transferred_from_proposer_to_proposee in
the deal_result can be negative if cash was transferred from
the proposee to the proposer.
No response is required.
'''
self.deal_memory.add_deal(deal_result)
pass
def player_went_bankrupt(self, player):
'''
Called when a player goes bankrupt.
All non-bankrupt players receive this notification.
player is a Player object.
No response is required.
'''
if player.name == self.get_name() and player.net_worth + player.state.cash > 0:
property_net_worth = 0
property_with_house = 0
property_unmortgaged = 0
houses = 0
for property in player.state.properties:
# We add the mortgage value of properties...
if property.is_mortgaged == False:
property_unmortgaged += 1
property_net_worth += property.mortgage_value
# We add the resale value of houses...
if type(property) == Street:
if property.number_of_houses > 0:
property_with_house += 1
houses += property.number_of_houses
property_net_worth += int(property.house_price/2 * property.number_of_houses)
if property_unmortgaged > 0 or property_with_house > 0 or houses > 0:
Logger.log(player.name + " went bankrupt with a cash of " + format(player.state.cash) + " and a net of " + format(player.net_worth) + "/" + format(property_net_worth), Logger.ERROR)
Logger.log(player.name + " went bankrupt with " + format(property_unmortgaged) + " properties unmortgaged", Logger.ERROR)
Logger.log(player.name + " went bankrupt with " + format(property_with_house) + " properties with house", Logger.ERROR)
Logger.log(player.name + " went bankrupt with " + format(houses) + " houses", Logger.ERROR)
for property in player.state.properties:
# We add the mortgage value of properties...
if property.is_mortgaged == False:
Logger.log(player.name + " unmortgage property: " + property.name, Logger.ERROR)
# We add the resale value of houses...
if type(property) == Street:
if property.number_of_houses > 0:
Logger.log(player.name + " housed property: " + property.name + " / " + format(property.number_of_houses), Logger.ERROR)
#exit(-1)
pass
def player_ran_out_of_time(self, player):
'''
Called when a player is removed from the game because
they ran out of processing time.
All non-bankrupt players receive this notification.
player is a Player object.
No response is required.
'''
pass
def game_over(self, winner, maximum_rounds_played):
'''
Called when the game is over.
All players receive this notification.
winner is the winning player (a Player object) or None if the
game was drawn.
maximum_rounds_played is True if the game went to the round-limit.
No response is required.
'''
pass
def ai_error(self, message):
'''
Called if the return value from any of the Player AI functions
was invalid. for example, if it was not of the expected type.
No response is required.
'''
pass
| {
"repo_name": "richard-shepherd/monopyly",
"path": "AIs/Cedric Daligny/VSSchizoAI.py",
"copies": "1",
"size": "29822",
"license": "mit",
"hash": 6162684342410625000,
"line_mean": 52.6258992806,
"line_max": 214,
"alpha_frac": 0.5989401664,
"autogenerated": false,
"ratio": 3.5310279488394127,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46299681152394123,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Cedric'
# each information will be used to sort the properties for the given policy
import random
from monopyly import *
from .Memory import *
from .Policy import *
class VSSmartBuyerBlueFocusAI(PlayerAIBase):
'''
'''
def __init__(self):
'''
ctor
'''
# memory information
self.needed_money = 0
self.auction_memory = AuctionMemory()
self.deal_memory = DealMemory()
self.property_policy = AcquiringPolicy(self)
self.house_policy = HousePolicy_v2(self,HousePolicy_v2.HousePolicy.ONE_COMPLETE_SET, HousePolicy_v2.RepartitionPolicy.SAME_SIZE)
self.selling_policy = SellingPolicy(self,self.deal_memory)
self.chance_policy = ChancePolicy(0.2)
self.jail_policy = JailPolicy(0.8, 3, 20)
self.money_to_be_taken = 0
self.properties_information = {
#brown
Square.Name.OLD_KENT_ROAD: [300, 1, 1, 1, 250, 4, 0.75],
Square.Name.WHITECHAPEL_ROAD: [300, 1, 1, 1, 250, 4, 0.75],
#light blue
Square.Name.THE_ANGEL_ISLINGTON: [0, 1.25, 1.25, 1.25, 50, 9, 0.75],
Square.Name.EUSTON_ROAD: [0, 1.25, 1.25, 1.25, 50, 9, 0.75],
Square.Name.PENTONVILLE_ROAD: [0, 1.25, 1.25, 1.25, 0, 10, 0.75],
#rose
Square.Name.PALL_MALL: [300, 1, 1, 1, 250, 4, 0.75],
Square.Name.WHITEHALL: [300, 1, 1, 1, 250, 4, 0.75],
Square.Name.NORTHUMBERLAND_AVENUE: [300, 1, 1, 1, 250, 5, 0.75],
#orange
Square.Name.BOW_STREET: [300, 1, 1, 1, 250, 4, 0.75],
Square.Name.MARLBOROUGH_STREET: [300, 1, 1, 1, 250, 4, 0.75],
Square.Name.VINE_STREET: [300, 1, 1, 1, 250, 5, 0.75],
#red
Square.Name.STRAND: [300, 1, 1, 1, 250, 4, 0.75],
Square.Name.FLEET_STREET: [300, 1, 1, 1, 250, 4, 0.75],
Square.Name.TRAFALGAR_SQUARE: [300, 1, 1, 1, 250, 5, 0.75],
#yellow
Square.Name.LEICESTER_SQUARE: [300, 1, 1, 1, 250, 4, 0.75],
Square.Name.COVENTRY_STREET: [300, 1, 1, 1, 250, 4, 0.75],
Square.Name.PICCADILLY: [300, 1, 1, 1, 250, 5, 0.75],
#green
Square.Name.REGENT_STREET: [300, 1, 1, 1, 250, 5, 0.75],
Square.Name.OXFORD_STREET: [300, 1, 1, 1, 250, 5, 0.75],
Square.Name.BOND_STREET: [300, 1, 1, 1, 250, 5, 0.75],
#dark blue
Square.Name.PARK_LANE: [0, 1.25, 1.25, 1.25, 50, 9, 0.75],
Square.Name.MAYFAIR: [0, 1.25, 1.25, 1.25, 0, 10, 0.75],
#station
Square.Name.KINGS_CROSS_STATION: [150, 1.15, 1.15, 1.25, 0, -1, 0],
Square.Name.MARYLEBONE_STATION: [150, 1.15, 1.15, 1.25, 0, -1, 0],
Square.Name.FENCHURCH_STREET_STATION: [150, 1.15, 1.15, 1.25, 0, -1, 0],
Square.Name.LIVERPOOL_STREET_STATION: [150, 1.15, 1.15, 1.25, 0, -1, 0],
#company
Square.Name.ELECTRIC_COMPANY: [250, 1.15, 1.15, 1, 0, -1, 0.5],
Square.Name.WATER_WORKS: [250, 1.15, 1.15, 1, 0, -1, 0.5],
}
def get_name(self):
return 'VSSmartBuyerBlueFocusAI'
def start_of_game(self):
'''
Called at the start of the game.
No response is required.
'''
self.needed_money = 0
self.money_to_be_taken = 0
def start_of_turn(self, game_state, player):
'''
Called when an AI's turn starts. All AIs receive this notification.
No response is required.
'''
self.needed_money = 0
self.money_to_be_taken = 0
def landed_on_unowned_property(self, game_state, player, property):
'''
Called when the AI lands on an unowned property. Only the active
player receives this notification.
Must return either the BUY or DO_NOT_BUY action from the
PlayerAIBase.Action enum.
The default behaviour is DO_NOT_BUY.
'''
return self.property_policy.acquire_through_landing(game_state,player,property)
def money_given(self, player, amount):
'''
Called when money has been given to the player.
No response is required.
'''
pass
def money_will_be_taken(self, player, amount):
'''
Called shortly before money will be taken from the player.
Before the money is taken, there will be an opportunity to
make deals and/or mortgage properties. (This will be done via
subsequent callbacks.)
No response is required.
'''
self.money_to_be_taken = amount
if amount > player.state.cash:
self.needed_money = amount - player.state.cash
pass
def money_taken(self, player, amount):
'''
Called when money has been taken from the player.
No response is required.
'''
pass
def players_birthday(self):
'''
Called when a player picks up the 'It is your birthday...'
Community Chest card.
You should return "Happy Birthday!" (with this casing and the
exclamation mark). If not, you will have to pay £100 instead of
the standard £10.
'''
return "Happy Birthday!"
def pay_ten_pounds_or_take_a_chance(self, game_state, player):
'''
Called when the player picks up the "Pay a £10 fine or take a Chance" card.
Return either:
PlayerAIBase.Action.PAY_TEN_POUND_FINE
or
PlayerAIBase.Action.TAKE_A_CHANCE
'''
return self.chance_policy.compute()
def property_offered_for_auction(self, game_state, player, property):
'''
Called when a property is put up for auction.
Properties are auctioned when a player lands on an unowned square but does
not want to buy it. All players take part in the auction, including the
player who landed on the square.
The property will be sold to the highest bidder using the eBay rule,
ie, for £1 more than the second-highest bid.
Return the amount you bid. To put in a bid this must be a positive integer.
Zero means that you are not bidding (it does not mean that you are bidding
zero).
The default behaviour is not to bid.
'''
return self.property_policy.acquire_through_auction(game_state,player,property)
def auction_result(self, status, property, player, amount_paid):
'''
Called with the result of an auction. All players receive
this notification.
status is either AUCTION_SUCCEEDED or AUCTION_FAILED.
If the auction succeeded, the property, the player who won
the auction and the amount they paid are passed to the AI.
If the auction failed, the player will be None and the
amount paid will be 0.
No response is required.
'''
if status == PlayerAIBase.Action.AUCTION_SUCCEEDED:
self.auction_memory.add_auction(property,player,amount_paid)
pass
def build_houses(self, game_state, player):
'''
Called near the start of the player's turn to give the option of building houses.
Return a list of tuples indicating which properties you want to build houses
on and how many houses to build on each. For example:
[(park_lane, 3), (mayfair, 4)]
The properties should be Property objects.
Return an empty list if you do not want to build.
Notes:
- You must own a whole set of unmortgaged properties before you can
build houses on it.
- You can build on multiple sets in one turn. Just specify all the streets
and houses you want to build.
- Build five houses on a property to have a "hotel".
- You specify the _additional_ houses you will be building, not the
total after building. For example, if Park Lane already has 3 houses
and you specify (park_lane, 2) you will end up with 5
houses (ie, a hotel).
- Sets must end up with 'balanced' housing. No square in a set can
have more than one more house than any other. If you request an
unbalanced build, the whole transaction will be rolled back, even
if it includes balanced building on other sets as well.
- If you do not have (or cannot raise) enough money to build all the
houses specified, the whole transaction will be rolled back. Between
this function call and money being taken, you will have an opportunity
to mortgage properties or make deals.
The default behaviour is not to build.
'''
return self.house_policy.compute(game_state, player)
def sell_houses(self, game_state, player):
'''
Gives the player the option to sell properties.
This is called when any debt, fine or rent has to be paid. It is
called just before mortgage_properties (below).
Notes:
- You cannot mortgage properties with houses on them, so if you
plan to mortgage, make sure you sell all the houses first.
- For each house sold you receive half the price that they were
bought for.
- Houses on a set must end up 'balanced', ie no property can have
more than one more house than any other property in the set.
Return a list of tuples of the streets and number of houses you
want to sell. For example:
[(old_kent_road, 1), (bow_street, 1)]
The streets should be Property objects.
The default is not to sell any houses.
'''
if self.needed_money > 0:
return self.selling_policy.computeHouse(game_state,player)
return []
def mortgage_properties(self, game_state, player):
'''
Gives the player an option to mortgage properties.
This is called before any debt is paid (house building, rent,
tax, fines from cards etc).
Notes:
- You receive half the face value of each property mortgaged.
- You cannot mortgage properties with houses on them.
(The AI will have been given the option to sell houses before this
function is called.)
Return a list of properties to mortgage, for example:
[bow_street, liverpool_street_station]
The properties should be Property objects.
Return an empty list if you do not want to mortgage anything.
The default behaviour is not to mortgage anything.
'''
if self.needed_money > 0:
return self.selling_policy.computeMortgage(game_state,player)
return []
def unmortgage_properties(self, game_state, player):
'''
Called near the start of the player's turn to give them the
opportunity to unmortgage properties.
Unmortgaging costs half the face value plus 10%. Between deciding
to unmortgage and money being taken the player will be given the
opportunity to make deals or sell other properties. If after this
they do not have enough money, the whole transaction will be aborted,
and no properties will be unmortgaged and no money taken.
Return a list of property names to unmortgage, like:
[old_kent_road, bow_street]
The properties should be Property objects.
The default is to return an empty list, ie to do nothing.
'''
return self.property_policy.acquire_through_unmortgage(game_state,player)
def get_out_of_jail(self, game_state, player):
'''
Called in the player's turn, before the dice are rolled, if the player
is in jail.
There are three possible return values:
PlayerAIBase.Action.BUY_WAY_OUT_OF_JAIL
PlayerAIBase.Action.PLAY_GET_OUT_OF_JAIL_FREE_CARD
PlayerAIBase.Action.STAY_IN_JAIL
Buying your way out of jail will cost £50.
The default action is STAY_IN_JAIL.
'''
return self.jail_policy.compute(self,game_state,player)
def propose_deal(self, game_state, player):
'''
Called to allow the player to propose a deal.
You return a DealProposal object.
If you do not want to make a deal, return None.
If you want to make a deal, you provide this information:
- The player number of the player you are proposing the deal to
- A list of properties offered
- A list of properties wanted
- Maximum cash offered as part of the deal
- Minimum cash wanted as part of the deal.
Properties offered and properties wanted are passed as lists of
Property objects.
If you offer money as part of the deal, set the cash wanted to zero
and vice versa.
Note that the cash limits will not be shown to the proposed-to player.
When the deal is offered to them, they set their own limits for accepting
the deal without seeing your limits. If the limits are acceptable to both
players, the deal will be done at the halfway point.
For example, Player1 proposes:
Propose to: Player2
Properties offered: Mayfair
Properties wanted: (none)
Maximum cash offered: 0
Minimum cash wanted: 500
Player2 accepts with these limits:
Maximum cash offered: 1000
Minimum cash wanted: 0
The deal will be done with Player2 receiving Mayfair and paying £750
to Player1.
The only 'negotiation' is in the managing of cash along with the deal
as discussed above. There is no negotiation about which properties are
part of the deal. If a deal is rejected because it does not contain the
right properties, another deal can be made at another time with different
lists of properties.
Example construction and return of a DealProposal object:
return DealProposal(
propose_to_player_number=2,
properties_offered=[vine_street, bow_street],
properties_wanted=[park_lane],
maximum_cash_offered=200)
The default is for no deal to be proposed.
'''
if self.needed_money > 0:
return self.selling_policy.propose_deal(game_state,player)
return self.property_policy.acquire_through_deal_proposal(game_state,player,player.state.cash - self.money_to_be_taken)
def deal_proposed(self, game_state, player, deal_proposal):
'''
Called when another player proposes a deal to you.
See propose_deal (above) for details of the DealProposal object.
Return a DealResponse object.
To reject a deal:
return DealResponse(DealResponse.Action.REJECT)
To accept a deal:
return DealResponse(DealResponse.Action.ACCEPT, maximum_cash_offered=300)
or
return DealResponse(DealResponse.Action.ACCEPT, minimum_cash_wanted=800)
The default is to reject the deal.
'''
return self.property_policy.acquire_through_deal_being_proposed(game_state,player,deal_proposal)
def deal_result(self, deal_info):
'''
Called when a proposed deal has finished. The players involved in
the deal receive this notification.
deal_info is a PlayerAIBase.DealInfo 'enum' giving indicating
whether the deal succeeded, and if not why not.
No response is required.
'''
pass
def deal_completed(self, deal_result):
'''
Called when a deal has successfully completed to let all
players know the details of the deal which took place.
deal_result is a DealResult object.
Note that the cash_transferred_from_proposer_to_proposee in
the deal_result can be negative if cash was transferred from
the proposee to the proposer.
No response is required.
'''
self.deal_memory.add_deal(deal_result)
pass
def player_went_bankrupt(self, player):
'''
Called when a player goes bankrupt.
All non-bankrupt players receive this notification.
player is a Player object.
No response is required.
'''
if player.name == self.get_name() and player.net_worth + player.state.cash > 0:
property_net_worth = 0
property_with_house = 0
property_unmortgaged = 0
houses = 0
for property in player.state.properties:
# We add the mortgage value of properties...
if property.is_mortgaged == False:
property_unmortgaged += 1
property_net_worth += property.mortgage_value
# We add the resale value of houses...
if type(property) == Street:
if property.number_of_houses > 0:
property_with_house += 1
houses += property.number_of_houses
property_net_worth += int(property.house_price/2 * property.number_of_houses)
if property_unmortgaged > 0 or property_with_house > 0 or houses > 0:
Logger.log(player.name + " went bankrupt with a cash of " + format(player.state.cash) + " and a net of " + format(player.net_worth) + "/" + format(property_net_worth), Logger.ERROR)
Logger.log(player.name + " went bankrupt with " + format(property_unmortgaged) + " properties unmortgaged", Logger.ERROR)
Logger.log(player.name + " went bankrupt with " + format(property_with_house) + " properties with house", Logger.ERROR)
Logger.log(player.name + " went bankrupt with " + format(houses) + " houses", Logger.ERROR)
for property in player.state.properties:
# We add the mortgage value of properties...
if property.is_mortgaged == False:
Logger.log(player.name + " unmortgage property: " + property.name, Logger.ERROR)
# We add the resale value of houses...
if type(property) == Street:
if property.number_of_houses > 0:
Logger.log(player.name + " housed property: " + property.name + " / " + format(property.number_of_houses), Logger.ERROR)
#exit(-1)
pass
def player_ran_out_of_time(self, player):
'''
Called when a player is removed from the game because
they ran out of processing time.
All non-bankrupt players receive this notification.
player is a Player object.
No response is required.
'''
pass
def game_over(self, winner, maximum_rounds_played):
'''
Called when the game is over.
All players receive this notification.
winner is the winning player (a Player object) or None if the
game was drawn.
maximum_rounds_played is True if the game went to the round-limit.
No response is required.
'''
pass
def ai_error(self, message):
'''
Called if the return value from any of the Player AI functions
was invalid. for example, if it was not of the expected type.
No response is required.
'''
pass
| {
"repo_name": "richard-shepherd/monopyly",
"path": "AIs/Cedric Daligny/VSSmartBuyerBlueFocusAI.py",
"copies": "1",
"size": "19739",
"license": "mit",
"hash": -3457856910518529500,
"line_mean": 37.3910505837,
"line_max": 197,
"alpha_frac": 0.6022399027,
"autogenerated": false,
"ratio": 3.82644948613535,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9907167233710801,
"avg_score": 0.004304431024909746,
"num_lines": 514
} |
__author__ = 'Cedric'
# each information will be used to sort the properties for the given policy
#import random
from monopyly import *
from .Memory import *
from .Policy import *
class VSSmartBuyerNeutral(PlayerAIBase):
'''
'''
def __init__(self):
'''
ctor
'''
# memory information
self.needed_money = 0
self.auction_memory = AuctionMemory()
self.deal_memory = DealMemory()
self.property_policy = AcquiringPolicy(self)
self.house_policy = HousePolicy_v2(self,HousePolicy_v2.HousePolicy.ONE_COMPLETE_SET, HousePolicy_v2.RepartitionPolicy.SAME_SIZE)
self.selling_policy = SellingPolicy(self,self.deal_memory)
#self.selling_policy = SellingPolicy_v2(self,SellingPolicy_v2.SellingPolicy.MORTGAGE_DEAL_STATION_UTILITY_FIRST, SellingPolicy_v2.HousePolicy.ONE_ON_EACH_SET)
self.chance_policy = ChancePolicy(0.25)
self.jail_policy = JailPolicy(0.8, 4, 20)
self.turn = 0
self.money_to_be_taken = 0
self.in_state_of_taking_money = False
'''
PropertyInformation
# information used to know is a property will be bought (or unmortgaged)
buying_property_cash_threshold # this threshold indicate the remaining cash wanted after buying the property (negative means that even if the cash if not available, we want to buy)
buying_property_deal_proposal_factor # the factor will be applied to the price of the property during the deal_proposal pahse (the buyingThreshold should also be taken into acount as we wanted some cash left if needed)
buying_property_deal_proposed_factor # the factor will be applied to the price of the property during the deal_proposed alert (the buyingThreshold should also be taken into acount as we wanted some cash left if needed)
buying_property_auctionFactor # the factor will be applied to the price of the property during the auction alert (the buyingThreshold should also be taken into acount as we wanted some cash left if needed)
# information used to know if house will be build
buying_house_cash_threshold # similar to the property cash threshold, the remaining cash wanted after transaction occurs
buying_house_sorter # value use to sort the property in terms of house building preferences (-1 means that housing is not available)
buying_house_policy # ONE_COMPLETE_SET, ONE_AVAILABLE_PROPERTY, ALL_AVAIALABLE_PROPERTY, ALL_COMPLETE_SET
buying_house_repartition_policy # MAXIMIZE_HOTEL, SAME_SIZE
# information used to know how to retrieve money
# - the inverted buying_house_sorter will be used to know which house's property/set need to sold first
# - the inverted buying_house_cash_threshold will be used to know which property to mortgage first
selling_policy # MORTGAGE_DEAL_STREET_FIRST, SELL_HOUSE_FIRST, MORTGAGE_DEAL_STATION_UTILTY_FIRST,
selling_mortgage_deal_threshold # the threshold to know if we mortgage or try to deal a propery (tested against a random number)
selling_house_policy # ONE_ON_EACH_SET, WHOLE_SET_LESS_HOUSE_FIRST, WHOLE_SET_MAX_HOUSE_FIRST, WHOLE_SET_SMOOTH_HOUSE_ON_BOARD
'''
self.properties_information = {
#brown
Square.Name.OLD_KENT_ROAD: [200, 1.1, 1, 1.25, 150, 5, 0.75],
Square.Name.WHITECHAPEL_ROAD: [200, 1.1, 1, 1.25, 125, 6, 0.75],
#light blue
Square.Name.THE_ANGEL_ISLINGTON: [300, 1.1, 1, 1, 250, 5, 0.75],
Square.Name.EUSTON_ROAD: [300, 1.1, 1, 1, 250, 5, 0.75],
Square.Name.PENTONVILLE_ROAD: [300, 1.1, 1, 1, 225, 6, 0.75],
#rose
Square.Name.PALL_MALL: [150, 1.1, 1.1, 1.2, 250, 7, 0.75],
Square.Name.WHITEHALL: [150, 1.1, 1.1, 1.2, 250, 7, 0.75],
Square.Name.NORTHUMBERLAND_AVENUE: [150, 1.1, 1.1, 1.2, 225, 8, 0.75],
#orange
Square.Name.BOW_STREET: [300, 1, 1, 1, 250, 4, 0.75],
Square.Name.MARLBOROUGH_STREET: [300, 1, 1, 1, 250, 4, 0.75],
Square.Name.VINE_STREET: [300, 1, 1, 1, 250, 5, 0.75],
#red
Square.Name.STRAND: [300, 1, 1, 1, 250, 4, 0.75],
Square.Name.FLEET_STREET: [300, 1, 1, 1, 250, 4, 0.75],
Square.Name.TRAFALGAR_SQUARE: [300, 1, 1, 1, 250, 5, 0.75],
#yellow
Square.Name.LEICESTER_SQUARE: [300, 1, 1, 1, 250, 4, 0.75],
Square.Name.COVENTRY_STREET: [300, 1, 1, 1, 250, 4, 0.75],
Square.Name.PICCADILLY: [300, 1, 1, 1, 250, 5, 0.75],
#green
Square.Name.REGENT_STREET: [300, 1, 1, 1, 250, 5, 0.75],
Square.Name.OXFORD_STREET: [300, 1, 1, 1, 250, 5, 0.75],
Square.Name.BOND_STREET: [300, 1, 1, 1, 250, 5, 0.75],
#dark blue
Square.Name.PARK_LANE: [100, 1.1, 1.1, 1.2, 250, 5, 0.75],
Square.Name.MAYFAIR: [100, 1.1, 1.1, 1.2, 250, 5, 0.75],
#station
Square.Name.KINGS_CROSS_STATION: [0, 1.25, 1.25, 1.25, 0, -1, 0],
Square.Name.MARYLEBONE_STATION: [0, 1.25, 1.25, 1.25, 0, -1, 0],
Square.Name.FENCHURCH_STREET_STATION: [0, 1.25, 1.25, 1.25, 0, -1, 0],
Square.Name.LIVERPOOL_STREET_STATION: [0, 1.25, 1.25, 1.25, 0, -1, 0],
#company
Square.Name.ELECTRIC_COMPANY: [50, 1, 1, 1.1, 0, -1, 0.5],
Square.Name.WATER_WORKS: [50, 1, 1, 1.1, 0, -1, 0.5],
}
def get_name(self):
return 'VSSmartBuyerNeutral'
def start_of_game(self):
'''
Called at the start of the game.
No response is required.
'''
self.needed_money = 0
self.turn = 0
self.money_to_be_taken = 0
self.in_state_of_taking_money = False
def start_of_turn(self, game_state, player):
'''
Called when an AI's turn starts. All AIs receive this notification.
No response is required.
'''
self.needed_money = 0
self.money_to_be_taken = 0
self.turn += 1
self.in_state_of_taking_money = False
def landed_on_unowned_property(self, game_state, player, property):
'''
Called when the AI lands on an unowned property. Only the active
player receives this notification.
Must return either the BUY or DO_NOT_BUY action from the
PlayerAIBase.Action enum.
The default behaviour is DO_NOT_BUY.
'''
return self.property_policy.acquire_through_landing(game_state,player,property)
def money_given(self, player, amount):
'''
Called when money has been given to the player.
No response is required.
'''
self.in_state_of_taking_money = False
#if self.needed_money > 0:
#Logger.log("PLAYER::IN TURN> money given ("+format(self.turn) + ") : " + format(amount),Logger.WARNING)
pass
def money_will_be_taken(self, player, amount):
'''
Called shortly before money will be taken from the player.
Before the money is taken, there will be an opportunity to
make deals and/or mortgage properties. (This will be done via
subsequent callbacks.)
No response is required.
'''
self.in_state_of_taking_money = True
self.money_to_be_taken = amount
#Logger.log("PLAYER::BEGIN-money_will_be_taken> needed money turn ("+format(self.turn) + ") : " + format(amount) + " vs cash: " + format(player.state.cash),Logger.WARNING)
if amount > player.state.cash:
self.needed_money = amount - player.state.cash
#Logger.log(" money_will_be_taken> needed money turn ("+format(self.turn) + ") : " + format(self.needed_money),Logger.WARNING)
pass
def money_taken(self, player, amount):
'''
Called when money has been taken from the player.
No response is required.
'''
#Logger.log("PLAYER::END-money_taken> turn ("+format(self.turn) + ") money_to_be_taken: " + format(self.money_to_be_taken) + " vs taken money: " + format(amount) + " vs cash: " + format(player.state.cash),Logger.WARNING)
pass
def players_birthday(self):
'''
Called when a player picks up the 'It is your birthday...'
Community Chest card.
You should return "Happy Birthday!" (with this casing and the
exclamation mark). If not, you will have to pay £100 instead of
the standard £10.
'''
return "Happy Birthday!"
def pay_ten_pounds_or_take_a_chance(self, game_state, player):
'''
Called when the player picks up the "Pay a £10 fine or take a Chance" card.
Return either:
PlayerAIBase.Action.PAY_TEN_POUND_FINE
or
PlayerAIBase.Action.TAKE_A_CHANCE
'''
return self.chance_policy.compute()
def property_offered_for_auction(self, game_state, player, property):
'''
Called when a property is put up for auction.
Properties are auctioned when a player lands on an unowned square but does
not want to buy it. All players take part in the auction, including the
player who landed on the square.
The property will be sold to the highest bidder using the eBay rule,
ie, for £1 more than the second-highest bid.
Return the amount you bid. To put in a bid this must be a positive integer.
Zero means that you are not bidding (it does not mean that you are bidding
zero).
The default behaviour is not to bid.
'''
return self.property_policy.acquire_through_auction(game_state,player,property)
def auction_result(self, status, property, player, amount_paid):
'''
Called with the result of an auction. All players receive
this notification.
status is either AUCTION_SUCCEEDED or AUCTION_FAILED.
If the auction succeeded, the property, the player who won
the auction and the amount they paid are passed to the AI.
If the auction failed, the player will be None and the
amount paid will be 0.
No response is required.
'''
if status == PlayerAIBase.Action.AUCTION_SUCCEEDED:
self.auction_memory.add_auction(property,player,amount_paid)
pass
def build_houses(self, game_state, player):
'''
Called near the start of the player's turn to give the option of building houses.
Return a list of tuples indicating which properties you want to build houses
on and how many houses to build on each. For example:
[(park_lane, 3), (mayfair, 4)]
The properties should be Property objects.
Return an empty list if you do not want to build.
Notes:
- You must own a whole set of unmortgaged properties before you can
build houses on it.
- You can build on multiple sets in one turn. Just specify all the streets
and houses you want to build.
- Build five houses on a property to have a "hotel".
- You specify the _additional_ houses you will be building, not the
total after building. For example, if Park Lane already has 3 houses
and you specify (park_lane, 2) you will end up with 5
houses (ie, a hotel).
- Sets must end up with 'balanced' housing. No square in a set can
have more than one more house than any other. If you request an
unbalanced build, the whole transaction will be rolled back, even
if it includes balanced building on other sets as well.
- If you do not have (or cannot raise) enough money to build all the
houses specified, the whole transaction will be rolled back. Between
this function call and money being taken, you will have an opportunity
to mortgage properties or make deals.
The default behaviour is not to build.
'''
return self.house_policy.compute(game_state, player)
def sell_houses(self, game_state, player):
'''
Gives the player the option to sell properties.
This is called when any debt, fine or rent has to be paid. It is
called just before mortgage_properties (below).
Notes:
- You cannot mortgage properties with houses on them, so if you
plan to mortgage, make sure you sell all the houses first.
- For each house sold you receive half the price that they were
bought for.
- Houses on a set must end up 'balanced', ie no property can have
more than one more house than any other property in the set.
Return a list of tuples of the streets and number of houses you
want to sell. For example:
[(old_kent_road, 1), (bow_street, 1)]
The streets should be Property objects.
The default is not to sell any houses.
'''
if self.needed_money > 0:
return self.selling_policy.computeHouse(game_state,player)
return []
def mortgage_properties(self, game_state, player):
'''
Gives the player an option to mortgage properties.
This is called before any debt is paid (house building, rent,
tax, fines from cards etc).
Notes:
- You receive half the face value of each property mortgaged.
- You cannot mortgage properties with houses on them.
(The AI will have been given the option to sell houses before this
function is called.)
Return a list of properties to mortgage, for example:
[bow_street, liverpool_street_station]
The properties should be Property objects.
Return an empty list if you do not want to mortgage anything.
The default behaviour is not to mortgage anything.
'''
if self.needed_money > 0:
return self.selling_policy.computeMortgage(game_state,player)
return []
def unmortgage_properties(self, game_state, player):
'''
Called near the start of the player's turn to give them the
opportunity to unmortgage properties.
Unmortgaging costs half the face value plus 10%. Between deciding
to unmortgage and money being taken the player will be given the
opportunity to make deals or sell other properties. If after this
they do not have enough money, the whole transaction will be aborted,
and no properties will be unmortgaged and no money taken.
Return a list of property names to unmortgage, like:
[old_kent_road, bow_street]
The properties should be Property objects.
The default is to return an empty list, ie to do nothing.
'''
return self.property_policy.acquire_through_unmortgage(game_state,player)
def get_out_of_jail(self, game_state, player):
'''
Called in the player's turn, before the dice are rolled, if the player
is in jail.
There are three possible return values:
PlayerAIBase.Action.BUY_WAY_OUT_OF_JAIL
PlayerAIBase.Action.PLAY_GET_OUT_OF_JAIL_FREE_CARD
PlayerAIBase.Action.STAY_IN_JAIL
Buying your way out of jail will cost £50.
The default action is STAY_IN_JAIL.
'''
return self.jail_policy.compute(self,game_state,player)
def propose_deal(self, game_state, player):
'''
Called to allow the player to propose a deal.
You return a DealProposal object.
If you do not want to make a deal, return None.
If you want to make a deal, you provide this information:
- The player number of the player you are proposing the deal to
- A list of properties offered
- A list of properties wanted
- Maximum cash offered as part of the deal
- Minimum cash wanted as part of the deal.
Properties offered and properties wanted are passed as lists of
Property objects.
If you offer money as part of the deal, set the cash wanted to zero
and vice versa.
Note that the cash limits will not be shown to the proposed-to player.
When the deal is offered to them, they set their own limits for accepting
the deal without seeing your limits. If the limits are acceptable to both
players, the deal will be done at the halfway point.
For example, Player1 proposes:
Propose to: Player2
Properties offered: Mayfair
Properties wanted: (none)
Maximum cash offered: 0
Minimum cash wanted: 500
Player2 accepts with these limits:
Maximum cash offered: 1000
Minimum cash wanted: 0
The deal will be done with Player2 receiving Mayfair and paying £750
to Player1.
The only 'negotiation' is in the managing of cash along with the deal
as discussed above. There is no negotiation about which properties are
part of the deal. If a deal is rejected because it does not contain the
right properties, another deal can be made at another time with different
lists of properties.
Example construction and return of a DealProposal object:
return DealProposal(
propose_to_player_number=2,
properties_offered=[vine_street, bow_street],
properties_wanted=[park_lane],
maximum_cash_offered=200)
The default is for no deal to be proposed.
'''
if self.needed_money > 0:
return self.selling_policy.propose_deal(game_state,player)
return self.property_policy.acquire_through_deal_proposal(game_state,player,player.state.cash - self.money_to_be_taken)
def deal_proposed(self, game_state, player, deal_proposal):
'''
Called when another player proposes a deal to you.
See propose_deal (above) for details of the DealProposal object.
Return a DealResponse object.
To reject a deal:
return DealResponse(DealResponse.Action.REJECT)
To accept a deal:
return DealResponse(DealResponse.Action.ACCEPT, maximum_cash_offered=300)
or
return DealResponse(DealResponse.Action.ACCEPT, minimum_cash_wanted=800)
The default is to reject the deal.
'''
return self.property_policy.acquire_through_deal_being_proposed(game_state,player,deal_proposal)
def deal_result(self, deal_info):
'''
Called when a proposed deal has finished. The players involved in
the deal receive this notification.
deal_info is a PlayerAIBase.DealInfo 'enum' giving indicating
whether the deal succeeded, and if not why not.
No response is required.
'''
pass
def deal_completed(self, deal_result):
'''
Called when a deal has successfully completed to let all
players know the details of the deal which took place.
deal_result is a DealResult object.
Note that the cash_transferred_from_proposer_to_proposee in
the deal_result can be negative if cash was transferred from
the proposee to the proposer.
No response is required.
'''
self.deal_memory.add_deal(deal_result)
pass
def player_went_bankrupt(self, player):
'''
Called when a player goes bankrupt.
All non-bankrupt players receive this notification.
player is a Player object.
No response is required.
'''
if player.name == self.get_name() and player.net_worth + player.state.cash > 0:
property_net_worth = 0
property_with_house = 0
property_unmortgaged = 0
houses = 0
for property in player.state.properties:
# We add the mortgage value of properties...
if property.is_mortgaged == False:
property_unmortgaged += 1
property_net_worth += property.mortgage_value
# We add the resale value of houses...
if type(property) == Street:
if property.number_of_houses > 0:
property_with_house += 1
houses += property.number_of_houses
property_net_worth += int(property.house_price/2 * property.number_of_houses)
if property_unmortgaged > 0 or property_with_house > 0 or houses > 0:
Logger.log(player.name + " went bankrupt with a cash of " + format(player.state.cash) + " and a net of " + format(player.net_worth) + "/" + format(property_net_worth), Logger.ERROR)
Logger.log(player.name + " went bankrupt with " + format(property_unmortgaged) + " properties unmortgaged", Logger.ERROR)
Logger.log(player.name + " went bankrupt with " + format(property_with_house) + " properties with house", Logger.ERROR)
Logger.log(player.name + " went bankrupt with " + format(houses) + " houses", Logger.ERROR)
for property in player.state.properties:
# We add the mortgage value of properties...
if property.is_mortgaged == False:
Logger.log(player.name + " unmortgage property: " + property.name, Logger.ERROR)
# We add the resale value of houses...
if type(property) == Street:
if property.number_of_houses > 0:
Logger.log(player.name + " housed property: " + property.name + " / " + format(property.number_of_houses), Logger.ERROR)
#exit(-1)
pass
def player_ran_out_of_time(self, player):
'''
Called when a player is removed from the game because
they ran out of processing time.
All non-bankrupt players receive this notification.
player is a Player object.
No response is required.
'''
pass
def game_over(self, winner, maximum_rounds_played):
'''
Called when the game is over.
All players receive this notification.
winner is the winning player (a Player object) or None if the
game was drawn.
maximum_rounds_played is True if the game went to the round-limit.
No response is required.
'''
pass
def ai_error(self, message):
'''
Called if the return value from any of the Player AI functions
was invalid. for example, if it was not of the expected type.
No response is required.
'''
pass
| {
"repo_name": "richard-shepherd/monopyly",
"path": "AIs/Cedric Daligny/VSSmartBuyerNeutral.py",
"copies": "1",
"size": "22886",
"license": "mit",
"hash": -1725063134238901000,
"line_mean": 40.6757741348,
"line_max": 228,
"alpha_frac": 0.6216783217,
"autogenerated": false,
"ratio": 3.7495902982628646,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48712686199628646,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Cedric'
from ..Memory import *
from monopyly import *
class AuctionPolicy(object):
def __init__(self,memory,threshold_buy_at_any_cost,keep_cash):
'''
ctor
'''
self.memory = memory
self.threshold = threshold_buy_at_any_cost
self.keep_cash = keep_cash
def compute(self, ai, game_state, player, property):
'''
Called when a property is put up for auction.
Properties are auctioned when a player lands on an unowned square but does
not want to buy it. All players take part in the auction, including the
player who landed on the square.
The property will be sold to the highest bidder using the eBay rule,
ie, for £1 more than the second-highest bid.
Return the amount you bid. To put in a bid this must be a positive integer.
Zero means that you are not bidding (it does not mean that you are bidding
zero).
The default behaviour is not to bid.
'''
for properties in ai.properties_weight:
if properties[0] == property.name:
if properties[1] > self.threshold:
return player.state.cash
elif player.state.cash > self.keep_cash:
return player.state.cash - self.keep_cash
return 0
| {
"repo_name": "richard-shepherd/monopyly",
"path": "AIs/Cedric Daligny/Policy/AuctionPolicy.py",
"copies": "1",
"size": "1347",
"license": "mit",
"hash": -1182312042872396500,
"line_mean": 34.4210526316,
"line_max": 83,
"alpha_frac": 0.6114413076,
"autogenerated": false,
"ratio": 4.078787878787879,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0030331585650159297,
"num_lines": 38
} |
__author__ = 'Cedric'
from ..Memory import *
from monopyly import *
class DealPolicy(object):
def __init__(self,memory):
'''
ctor
'''
self.memory = memory
self.properties_we_like = [
[Square.Name.OLD_KENT_ROAD, 80],
[Square.Name.WHITECHAPEL_ROAD, 80],
[Square.Name.KINGS_CROSS_STATION, 100],
[Square.Name.THE_ANGEL_ISLINGTON, 80],
[Square.Name.EUSTON_ROAD, 80],
[Square.Name.PENTONVILLE_ROAD, 80],
[Square.Name.PALL_MALL, 1],
[Square.Name.ELECTRIC_COMPANY, 50],
[Square.Name.WHITEHALL, 1],
[Square.Name.NORTHUMBERLAND_AVENUE, 1],
[Square.Name.MARYLEBONE_STATION, 10],
[Square.Name.BOW_STREET, 1],
[Square.Name.MARLBOROUGH_STREET, 1],
[Square.Name.VINE_STREET, 1],
[Square.Name.STRAND, 1],
[Square.Name.FLEET_STREET, 1],
[Square.Name.TRAFALGAR_SQUARE, 1],
[Square.Name.FENCHURCH_STREET_STATION, 10],
[Square.Name.LEICESTER_SQUARE, 1],
[Square.Name.COVENTRY_STREET, 1],
[Square.Name.WATER_WORKS, 50],
[Square.Name.PICCADILLY, 1],
[Square.Name.REGENT_STREET, 1],
[Square.Name.OXFORD_STREET, 1],
[Square.Name.BOND_STREET, 1],
[Square.Name.LIVERPOOL_STREET_STATION, 10],
[Square.Name.PARK_LANE, 80],
[Square.Name.MAYFAIR, 80] ]
def compute(self, game_state, player, deal_proposal):
'''
Called when another player proposes a deal to you.
See propose_deal (above) for details of the DealProposal object.
Return a DealResponse object.
To reject a deal:
return DealResponse(DealResponse.Action.REJECT)
To accept a deal:
return DealResponse(DealResponse.Action.ACCEPT, maximum_cash_offered=300)
or
return DealResponse(DealResponse.Action.ACCEPT, minimum_cash_wanted=800)
The default is to reject the deal.
'''
return DealResponse(PlayerAIBase.DealInfo.DEAL_REJECTED)
def propose(self, ia, game_state, player):
'''
Called to allow the player to propose a deal.
You return a DealProposal object.
If you do not want to make a deal, return None.
If you want to make a deal, you provide this information:
- The player number of the player you are proposing the deal to
- A list of properties offered
- A list of properties wanted
- Maximum cash offered as part of the deal
- Minimum cash wanted as part of the deal.
Properties offered and properties wanted are passed as lists of
Property objects.
If you offer money as part of the deal, set the cash wanted to zero
and vice versa.
Note that the cash limits will not be shown to the proposed-to player.
When the deal is offered to them, they set their own limits for accepting
the deal without seeing your limits. If the limits are acceptable to both
players, the deal will be done at the halfway point.
For example, Player1 proposes:
Propose to: Player2
Properties offered: Mayfair
Properties wanted: (none)
Maximum cash offered: 0
Minimum cash wanted: 500
Player2 accepts with these limits:
Maximum cash offered: 1000
Minimum cash wanted: 0
The deal will be done with Player2 receiving Mayfair and paying £750
to Player1.
The only 'negotiation' is in the managing of cash along with the deal
as discussed above. There is no negotiation about which properties are
part of the deal. If a deal is rejected because it does not contain the
right properties, another deal can be made at another time with different
lists of properties.
Example construction and return of a DealProposal object:
return DealProposal(
propose_to_player_number=2,
properties_offered=[vine_street, bow_street],
properties_wanted=[park_lane],
maximum_cash_offered=200)
The default is for no deal to be proposed.
'''
for property_name in self.properties_we_like:
if property_name[1] > 55:
property = game_state.board.get_square_by_name(property_name[0])
if(property.owner is player or property.owner is None):
# The property is either not owned, or owned by us...
continue
# The property is owned by another player, so we make them an
# offer for it...
price_offered = property.price * 1.2
if player.state.cash > price_offered:
return DealProposal(
properties_wanted=[property],
maximum_cash_offered=price_offered,
propose_to_player=property.owner)
return None
| {
"repo_name": "richard-shepherd/monopyly",
"path": "AIs/Cedric Daligny/Policy/DealPolicy.py",
"copies": "1",
"size": "5448",
"license": "mit",
"hash": -8399262462012154000,
"line_mean": 40.2651515152,
"line_max": 85,
"alpha_frac": 0.560492014,
"autogenerated": false,
"ratio": 4.071001494768311,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0012142775758569215,
"num_lines": 132
} |
__author__ = 'Cedric'
from ..Memory import *
from monopyly import *
class SellingPolicy(object):
def __init__(self, ai, memory):
'''
ctor
'''
self.ai = ai
self.memory = memory
def computeMortgage(self, game_state, player):
'''
Gives the player an option to mortgage properties.
This is called before any debt is paid (house building, rent,
tax, fines from cards etc).
Notes:
- You receive half the face value of each property mortgaged.
- You cannot mortgage properties with houses on them.
(The AI will have been given the option to sell houses before this
function is called.)
Return a list of properties to mortgage, for example:
[bow_street, liverpool_street_station]
The properties should be Property objects.
Return an empty list if you do not want to mortgage anything.
The default behaviour is not to mortgage anything.
'''
if self.ai.needed_money > 0:
result = []
money_from_mortgage = 0
for property in player.state.properties:
if type(property) != Street:
if property.is_mortgaged == False:
money_from_mortgage += property.price / 2
result.append(property)
if money_from_mortgage > self.ai.needed_money:
#Logger.log("SellingPolicy::mortgage case 1- mortgage value: " + format(money_from_mortgage) + " vs needed: " + format(self.ai.needed_money),Logger.WARNING)
return result
else:
if property.is_mortgaged == False and property.number_of_houses == 0:
money_from_mortgage += property.price / 2
result.append(property)
if money_from_mortgage > self.ai.needed_money:
#Logger.log("SellingPolicy::mortgage case 2 - mortgage value: " + format(money_from_mortgage) + " vs needed: " + format(self.ai.needed_money),Logger.WARNING)
return result
#Logger.log("SellingPolicy::mortgage case 3 - mortgage value: " + format(money_from_mortgage) + " vs needed: " + format(self.ai.needed_money),Logger.WARNING)
return result
return []
def computeHouse(self, game_state, player):
'''
Gives the player the option to sell properties.
This is called when any debt, fine or rent has to be paid. It is
called just before mortgage_properties (below).
Notes:
- You cannot mortgage properties with houses on them, so if you
plan to mortgage, make sure you sell all the houses first.
- For each house sold you receive half the price that they were
bought for.
- Houses on a set must end up 'balanced', ie no property can have
more than one more house than any other property in the set.
Return a list of tuples of the streets and number of houses you
want to sell. For example:
[(old_kent_road, 1), (bow_street, 1)]
The streets should be Property objects.
The default is not to sell any houses.
'''
if self.ai.needed_money > 0:
money_from_mortgage = 0
for property in player.state.properties:
if type(property) != Street:
if property.is_mortgaged == False:
money_from_mortgage += property.price / 2
else:
if property.is_mortgaged == False and property.number_of_houses == 0:
money_from_mortgage += property.price / 2
#Logger.log("SellingPolicy::sell_house - mortgage value: " + format(money_from_mortgage) + " vs needed: " + format(self.ai.needed_money),Logger.WARNING)
if money_from_mortgage < self.ai.needed_money:
still_needed = self.ai.needed_money - money_from_mortgage
#Logger.log("SellingPolicy::sell_house BY ONE - still needed: " + format(still_needed),Logger.WARNING)
result = []
money_got_by_house = 0
# try one on each
for owned_set in player.state.owned_sets:
for street in owned_set.properties:
if type(street) == Street:
if street.number_of_houses > 0:
result.append((street,1))
still_needed -= street.house_price / 2
money_got_by_house += street.house_price / 2
if still_needed <= 0:
#Logger.log("SP: needed money left case 1: " + format(self.ai.needed_money) + " money expected from house: " + format(money_got_by_house),Logger.WARNING)
self.ai.needed_money -= money_got_by_house
self.display_list(player,result)
return result
#reinit before trying selling max of houses
result = []
money_got_by_house = 0
still_needed = self.ai.needed_money - money_from_mortgage
#Logger.log("SellingPolicy::sell_house ALL - still needed: " + format(still_needed),Logger.WARNING)
#try all
for owned_set in player.state.owned_sets:
for street in owned_set.properties:
if type(street) == Street:
if street.number_of_houses > 0:
result.append((street,street.number_of_houses))
still_needed -= (street.house_price / 2) * street.number_of_houses
money_got_by_house += (street.house_price / 2) * street.number_of_houses
if still_needed <= 0:
#Logger.log("SP: needed money left case 2: " + format(self.ai.needed_money) + " money expected from house: " + format(money_got_by_house),Logger.WARNING)
self.ai.needed_money -= money_got_by_house
self.display_list(player,result)
return result
# if we need really money, sell all
result = []
for street in player.state.properties:
if type(street) == Street:
if street.number_of_houses > 0:
result.append((street,street.number_of_houses))
#Logger.log("SP: needed money left case 3: " + format(self.ai.needed_money) + " money expected from house: " + format(money_got_by_house),Logger.WARNING)
self.ai.needed_money -= money_got_by_house
self.display_list(player,result)
return result
#Logger.log("SP: needed money left case 5: " + format(self.ai.needed_money) + " money expected from house: 0",Logger.WARNING)
return []
def display_list(self,player,list):
#Logger.log("SELLING HOUSE BEGIN for " + player.name,Logger.WARNING)
#for property in player.state.properties:
# if type(property) == Street:
# Logger.log(" property: " + property.name + " - " + format(property.number_of_houses),Logger.WARNING)
#Logger.log("List length: " + format(len(list)),Logger.WARNING)
#for elem in list:
# Logger.log(" elem: " + elem[0].name + " - " + format(elem[1]),Logger.WARNING)
#Logger.log("SELLING HOUSE END for " + player.name,Logger.WARNING)
pass
def propose_deal(self,game_state,player):
'''
Called to allow the player to propose a deal.
You return a DealProposal object.
If you do not want to make a deal, return None.
If you want to make a deal, you provide this information:
- The player number of the player you are proposing the deal to
- A list of properties offered
- A list of properties wanted
- Maximum cash offered as part of the deal
- Minimum cash wanted as part of the deal.
Properties offered and properties wanted are passed as lists of
Property objects.
If you offer money as part of the deal, set the cash wanted to zero
and vice versa.
Note that the cash limits will not be shown to the proposed-to player.
When the deal is offered to them, they set their own limits for accepting
the deal without seeing your limits. If the limits are acceptable to both
players, the deal will be done at the halfway point.
For example, Player1 proposes:
Propose to: Player2
Properties offered: Mayfair
Properties wanted: (none)
Maximum cash offered: 0
Minimum cash wanted: 500
Player2 accepts with these limits:
Maximum cash offered: 1000
Minimum cash wanted: 0
The deal will be done with Player2 receiving Mayfair and paying £750
to Player1.
The only 'negotiation' is in the managing of cash along with the deal
as discussed above. There is no negotiation about which properties are
part of the deal. If a deal is rejected because it does not contain the
right properties, another deal can be made at another time with different
lists of properties.
Example construction and return of a DealProposal object:
return DealProposal(
propose_to_player_number=2,
properties_offered=[vine_street, bow_street],
properties_wanted=[park_lane],
maximum_cash_offered=200)
The default is for no deal to be proposed.
'''
return None
| {
"repo_name": "richard-shepherd/monopyly",
"path": "AIs/Cedric Daligny/Policy/SellingPolicy.py",
"copies": "1",
"size": "9934",
"license": "mit",
"hash": 754281529506986800,
"line_mean": 46.3,
"line_max": 185,
"alpha_frac": 0.5707238498,
"autogenerated": false,
"ratio": 4.125,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.003165950714750616,
"num_lines": 210
} |
__author__ = 'Cedric'
from monopyly import *
class AcquiringPolicy(object):
def __init__(self,ai):
self.ai = ai
self.last_offers = []
def acquire_through_landing(self,game_state,player,property):
'''
Called when the AI lands on an unowned property. Only the active
player receives this notification.
Must return either the BUY or DO_NOT_BUY action from the
PlayerAIBase.Action enum.
The default behaviour is DO_NOT_BUY.
'''
if type(property) == Street:
if property.number_of_houses > 0:
Logger.log("Trying to acquire a property (through landing) with houses on it " + property.name,Logger.ERROR)
remaining_after_buying = player.state.cash - property.price
if self.ai.properties_information[property.name][0] <= remaining_after_buying:
return PlayerAIBase.Action.BUY
return PlayerAIBase.Action.DO_NOT_BUY
def acquire_through_auction(self,game_state,player,property):
'''
Called when a property is put up for auction.
Properties are auctioned when a player lands on an unowned square but does
not want to buy it. All players take part in the auction, including the
player who landed on the square.
The property will be sold to the highest bidder using the eBay rule,
ie, for £1 more than the second-highest bid.
Return the amount you bid. To put in a bid this must be a positive integer.
Zero means that you are not bidding (it does not mean that you are bidding
zero).
The default behaviour is not to bid.
'''
if type(property) == Street:
if property.number_of_houses > 0:
Logger.log("Trying to acquire a property (through auction) with houses on it " + property.name,Logger.ERROR)
remaining_after_buying = player.state.cash - (property.price * self.ai.properties_information[property.name][3])
if (self.ai.properties_information[property.name][0] / self.ai.properties_information[property.name][3]) <= remaining_after_buying:
return property.price * self.ai.properties_information[property.name][3]
return 0
def acquire_through_unmortgage(self,game_state,player):
'''
Called near the start of the player's turn to give them the
opportunity to unmortgage properties.
Unmortgaging costs half the face value plus 10%. Between deciding
to unmortgage and money being taken the player will be given the
opportunity to make deals or sell other properties. If after this
they do not have enough money, the whole transaction will be aborted,
and no properties will be unmortgaged and no money taken.
Return a list of property names to unmortgage, like:
[old_kent_road, bow_street]
The properties should be Property objects.
The default is to return an empty list, ie to do nothing.
'''
unmortgaged_properties = []
remaining_after_buying = player.state.cash
for property in player.state.properties:
if property.is_mortgaged == True:
unmortgage_price = (property.price * 0.55)
if self.ai.properties_information[property.name][0] <= (remaining_after_buying - unmortgage_price):
remaining_after_buying -= unmortgage_price
unmortgaged_properties.append(property)
return unmortgaged_properties
def acquire_through_deal_proposal(self,game_state,player,cash_available):
'''
Called to allow the player to propose a deal.
You return a DealProposal object.
If you do not want to make a deal, return None.
If you want to make a deal, you provide this information:
- The player number of the player you are proposing the deal to
- A list of properties offered
- A list of properties wanted
- Maximum cash offered as part of the deal
- Minimum cash wanted as part of the deal.
Properties offered and properties wanted are passed as lists of
Property objects.
If you offer money as part of the deal, set the cash wanted to zero
and vice versa.
Note that the cash limits will not be shown to the proposed-to player.
When the deal is offered to them, they set their own limits for accepting
the deal without seeing your limits. If the limits are acceptable to both
players, the deal will be done at the halfway point.
For example, Player1 proposes:
Propose to: Player2
Properties offered: Mayfair
Properties wanted: (none)
Maximum cash offered: 0
Minimum cash wanted: 500
Player2 accepts with these limits:
Maximum cash offered: 1000
Minimum cash wanted: 0
The deal will be done with Player2 receiving Mayfair and paying £750
to Player1.
The only 'negotiation' is in the managing of cash along with the deal
as discussed above. There is no negotiation about which properties are
part of the deal. If a deal is rejected because it does not contain the
right properties, another deal can be made at another time with different
lists of properties.
Example construction and return of a DealProposal object:
return DealProposal(
propose_to_player_number=2,
properties_offered=[vine_street, bow_street],
properties_wanted=[park_lane],
maximum_cash_offered=200)
The default is for no deal to be proposed.
'''
# create the list of property owned by a player not being us
listed_owned_by_other_properties = []
for player_inside in game_state.players:
if player_inside != player:
for property in player_inside.state.properties:
listed_owned_by_other_properties.append([property,self.ai.properties_information[property.name][0]])
sorted_properties = sorted(listed_owned_by_other_properties,key=lambda elem:elem[1],reverse=True)
if len(self.last_offers) > 5 or len(self.last_offers) >= len(sorted_properties) - 2:
self.last_offers = []
for prop_elem in sorted_properties:
remaining_after_buying = cash_available - (prop_elem[0].price * self.ai.properties_information[prop_elem[0].name][1])
if (self.ai.properties_information[prop_elem[0].name][0] / self.ai.properties_information[prop_elem[0].name][1]) <= remaining_after_buying and self.last_offers.count(prop_elem[0]) == 0:
if type(prop_elem[0]) != Street:
self.last_offers.append(prop_elem[0])
return DealProposal(
propose_to_player=prop_elem[0].owner,
properties_offered=[],
properties_wanted=[prop_elem[0]],
maximum_cash_offered=(prop_elem[0].price * self.ai.properties_information[prop_elem[0].name][1]) )
elif prop_elem[0].number_of_houses == 0:
self.last_offers.append(prop_elem[0])
return DealProposal(
propose_to_player=prop_elem[0].owner,
properties_offered=[],
properties_wanted=[prop_elem[0]],
maximum_cash_offered=(prop_elem[0].price * self.ai.properties_information[prop_elem[0].name][1]) )
return None
def acquire_through_deal_being_proposed(self,game_state,player,deal_proposal):
'''
Called when another player proposes a deal to you.
See propose_deal (above) for details of the DealProposal object.
Return a DealResponse object.
To reject a deal:
return DealResponse(DealResponse.Action.REJECT)
To accept a deal:
return DealResponse(DealResponse.Action.ACCEPT, maximum_cash_offered=300)
or
return DealResponse(DealResponse.Action.ACCEPT, minimum_cash_wanted=800)
The default is to reject the deal.
'''
if len(deal_proposal.properties_wanted) == 0 or len(deal_proposal.properties_offered) != 1:
return DealResponse(DealResponse.Action.REJECT)
property = deal_proposal.properties_offered[0]
if type(property) == Street:
if property.number_of_houses > 0:
Logger.log("Trying to acquire a property (through being proposed) with houses on it " + property.name,Logger.ERROR)
remaining_after_buying = player.state.cash - (property.price * self.ai.properties_information[property.name][2])
if (self.ai.properties_information[property.name][0] / self.ai.properties_information[property.name][2]) <= remaining_after_buying:
return DealResponse(DealResponse.Action.ACCEPT, maximum_cash_offered=property.price * self.ai.properties_information[property.name][2])
# Default to rejecting the deal...
return DealResponse(DealResponse.Action.REJECT) | {
"repo_name": "richard-shepherd/monopyly",
"path": "AIs/Cedric Daligny/Policy/AcquiringPolicy.py",
"copies": "1",
"size": "9220",
"license": "mit",
"hash": 8580615338033832000,
"line_mean": 45.095,
"line_max": 197,
"alpha_frac": 0.6404860056,
"autogenerated": false,
"ratio": 4.084182543198937,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0038204309112416,
"num_lines": 200
} |
__author__ = 'Cedric'
from monopyly import *
class UnmortgagePolicy(object):
def __init__(self):
'''
ctor
'''
def compute(self, game_state, player):
'''
Called near the start of the player's turn to give them the
opportunity to unmortgage properties.
Unmortgaging costs half the face value plus 10%. Between deciding
to unmortgage and money being taken the player will be given the
opportunity to make deals or sell other properties. If after this
they do not have enough money, the whole transaction will be aborted,
and no properties will be unmortgaged and no money taken.
Return a list of property names to unmortgage, like:
[old_kent_road, bow_street]
The properties should be Property objects.
The default is to return an empty list, ie to do nothing.
'''
result = []
remain_cash = player.state.cash - 150;
for property in player.state.properties:
if property.is_mortgaged == True:
if (property.price /2) * 1.1 < remain_cash:
remain_cash -= (property.price /2) * 1.1
result.append(property.name)
return result
| {
"repo_name": "richard-shepherd/monopyly",
"path": "AIs/Cedric Daligny/Policy/UnmortgagePolicy.py",
"copies": "1",
"size": "1259",
"license": "mit",
"hash": 6533988398404749000,
"line_mean": 33.9722222222,
"line_max": 77,
"alpha_frac": 0.6115965052,
"autogenerated": false,
"ratio": 4.100977198697068,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0029812732074744708,
"num_lines": 36
} |
__author__ = 'Cedric'
from monopyly import *
'''
buying_house_policy # ONE_COMPLETE_SET, ONE_AVAILABLE_PROPERTY, ALL_AVAILABLE_PROPERTY, ALL_COMPLETE_SET
buying_house_repartition_policy # MAXIMIZE_HOTEL, SAME_SIZE
# information used to know if house will be build
buying_house_cash_threshold # similar to the property cash threshold, the remaining cash wanted after transaction occurs
buying_house_sorter # value use to sort the property in terms of house building preferences (-1 means that housing is not available)
'''
class HousePolicy_v2(object):
class HousePolicy(object):
ONE_COMPLETE_SET = 0
ONE_AVAILABLE_PROPERTY = 1
ALL_AVAILABLE_PROPERTY = 2
ALL_COMPLETE_SET = 3
class RepartitionPolicy(object):
MAXIMIZE_HOTEL = 0
SAME_SIZE = 1
def __init__(self,ai,policy,repartition):
self.ai = ai
self.policy = policy
self.repartition = repartition
def compute(self,game_state, player):
'''
Called near the start of the player's turn to give the option of building houses.
Return a list of tuples indicating which properties you want to build houses
on and how many houses to build on each. For example:
[(park_lane, 3), (mayfair, 4)]
The properties should be Property objects.
Return an empty list if you do not want to build.
Notes:
- You must own a whole set of unmortgaged properties before you can
build houses on it.
- You can build on multiple sets in one turn. Just specify all the streets
and houses you want to build.
- Build five houses on a property to have a "hotel".
- You specify the _additional_ houses you will be building, not the
total after building. For example, if Park Lane already has 3 houses
and you specify (park_lane, 2) you will end up with 5
houses (ie, a hotel).
- Sets must end up with 'balanced' housing. No square in a set can
have more than one more house than any other. If you request an
unbalanced build, the whole transaction will be rolled back, even
if it includes balanced building on other sets as well.
- If you do not have (or cannot raise) enough money to build all the
houses specified, the whole transaction will be rolled back. Between
this function call and money being taken, you will have an opportunity
to mortgage properties or make deals.
The default behaviour is not to build.
'''
set_available_to_build = []
for owned_set in player.state.owned_unmortgaged_sets:
# We can't build on stations or utilities, or if the
# set already has hotels on all the properties...
if not owned_set.can_build_houses:
continue
set_available_to_build.append(owned_set)
if len(set_available_to_build) > 0:
if self.repartition == self.RepartitionPolicy.MAXIMIZE_HOTEL:
best_set = set_available_to_build[0]
for set in set_available_to_build:
property = set.properties[len(set.properties) - 1]
best_property = best_set.properties[len(best_set.properties) - 1]
if self.ai.properties_information[property.name][5] > self.ai.properties_information[best_property.name][5]:
best_set = set
if self.policy == self.HousePolicy.ONE_COMPLETE_SET:
cost = best_set.house_price * best_set.number_of_properties
if (player.state.cash - cost) >= self.ai.properties_information[best_set.properties[0].name][4]:
# We build one house on each property...
result = [(p, 1) for p in best_set.properties]
self.display_list(player,result)
return result
elif self.policy == self.HousePolicy.ONE_AVAILABLE_PROPERTY:
valid_property = best_set.properties[len(best_set.properties) - 1]
nb_house = valid_property.number_of_houses
for property in best_set.properties:
if property.number_of_houses < nb_house:
valid_property = property
nb_house = valid_property.number_of_houses
if (player.state.cash - best_set.house_price) >= self.ai.properties_information[valid_property.name][4]:
# We build one house on property...
result = [(valid_property, 1)]
self.display_list(player,result)
return result
elif self.repartition == self.RepartitionPolicy.SAME_SIZE:
max_size = 0
for property in player.state.properties:
if type(property) == Street:
if property.number_of_houses > max_size:
max_size = property.number_of_houses
best_set = set_available_to_build[0]
for set in set_available_to_build:
property = set.properties[len(set.properties) - 1]
best_property = best_set.properties[len(best_set.properties) - 1]
if self.ai.properties_information[property.name][5] > self.ai.properties_information[best_property.name][5] and best_property.number_of_houses <= max_size:
best_set = set
if self.policy == self.HousePolicy.ONE_COMPLETE_SET:
cost = best_set.house_price * best_set.number_of_properties
if (player.state.cash - cost) >= self.ai.properties_information[best_set.properties[0].name][4]:
# We build one house on each property...
result = [(p, 1) for p in best_set.properties]
self.display_list(player,result)
return result
elif self.policy == self.HousePolicy.ONE_AVAILABLE_PROPERTY:
valid_property = best_set.properties[len(best_set.properties) - 1]
nb_house = valid_property.number_of_houses
for property in best_set.properties:
if property.number_of_houses < nb_house:
valid_property = property
nb_house = valid_property.number_of_houses
if (player.state.cash - best_set.house_price) >= self.ai.properties_information[valid_property.name][4]:
# We build one house on property...
result = [(valid_property, 1)]
self.display_list(player,result)
return result
return []
def display_list(self,player,list):
#Logger.log("BUYING HOUSE BEGIN for " + player.name,Logger.WARNING)
#for property in player.state.properties:
# if type(property) == Street:
# Logger.log(" property: " + property.name + " - " + format(property.number_of_houses),Logger.WARNING)
#Logger.log("List length: " + format(len(list)),Logger.WARNING)
#for elem in list:
# Logger.log(" elem: " + elem[0].name + " - " + format(elem[1]),Logger.WARNING)
#Logger.log("BUYING HOUSE END for " + player.name,Logger.WARNING)
pass
| {
"repo_name": "richard-shepherd/monopyly",
"path": "AIs/Cedric Daligny/Policy/HousePolicy_v2.py",
"copies": "1",
"size": "7612",
"license": "mit",
"hash": -6089262619517751000,
"line_mean": 50.7823129252,
"line_max": 175,
"alpha_frac": 0.5779033106,
"autogenerated": false,
"ratio": 4.233592880978866,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5311496191578866,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Cedric'
import random
from monopyly import *
class JailPolicy(object):
'''
OutOfJailInformation
threshold_random_exit
max_round_in_jail # no limit is 500
threshold_free_square
'''
def __init__(self,threshold,max_round,free_square):
'''
ctor
'''
self.threshold_random_exit = threshold
self.max_round = max_round
self.min_free_square = free_square
def compute(self, ia, game_state, player):
'''
Called in the player's turn, before the dice are rolled, if the player
is in jail.
There are three possible return values:
PlayerAIBase.Action.BUY_WAY_OUT_OF_JAIL
PlayerAIBase.Action.PLAY_GET_OUT_OF_JAIL_FREE_CARD
PlayerAIBase.Action.STAY_IN_JAIL
Buying your way out of jail will cost £50.
The default action is STAY_IN_JAIL.
'''
if random.random() < self.threshold_random_exit or \
player.state.number_of_turns_in_jail > self.max_round or \
self.free_square_from(game_state) >= self.min_free_square:
if player.state.number_of_get_out_of_jail_free_cards > 0:
return PlayerAIBase.Action.PLAY_GET_OUT_OF_JAIL_FREE_CARD
return PlayerAIBase.Action.BUY_WAY_OUT_OF_JAIL
return PlayerAIBase.Action.STAY_IN_JAIL
def free_square_from(self,game_state):
nb_occupied_square = 0
for player in game_state.players:
nb_occupied_square += len(player.state.properties)
return 40 - nb_occupied_square | {
"repo_name": "richard-shepherd/monopyly",
"path": "AIs/Cedric Daligny/Policy/JailPolicy.py",
"copies": "1",
"size": "1606",
"license": "mit",
"hash": 9173316395307077000,
"line_mean": 33.170212766,
"line_max": 78,
"alpha_frac": 0.614953271,
"autogenerated": false,
"ratio": 3.512035010940919,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46269882819409186,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Celery'
import os
import cPickle as pickle
import rtmidi
from rtmidi import midiconstants as rt_const
from midi.objects.pot import Pot
from midi.objects.key import Key
import midi.defaults.defaults as d
class Engine():
def __init__(self):
self.dir = None
self.file = ''
self.raw_data = ''
self.pots = [[None]*d.NUM_OF_POTS_H for i in range(d.NUM_OF_POTS_V)]
self.keys = [[None]*d.NUM_OF_KEYS_H for i in range(d.NUM_OF_KEYS_V)]
self.midiout = rtmidi.MidiOut()
midi_ports = self.midiout.get_ports()
self.midiout.open_port(2)
def open(self, filename):
self.file = filename
self.dir = os.path.split(self.file)[0] # get the directory of the file
f = open(os.path.join(self.dir, self.file), 'rb')
for i in range(d.NUM_OF_POTS_V):
for j in range(d.NUM_OF_POTS_H):
self.pots[i][j] = pickle.load(f)
for i in range(d.NUM_OF_KEYS_V):
for j in range(d.NUM_OF_KEYS_H):
self.keys[i][j] = pickle.load(f)
self.keys[i][j].post_load()
f.close()
def save_as(self, filename, project_dir):
self.dir = project_dir
self.file = filename + '.txt'
self.save()
def new_file(self, filename, project_dir):
self.dir = project_dir
self.file = filename + '.txt'
#fill the file out with default things
for i in range(d.NUM_OF_POTS_V):
for j in range(d.NUM_OF_POTS_H):
self.pots[i][j] = Pot(d.POT_NAMES[i][j], d.POT_CCS[i][j], 'forward')
for i in range(d.NUM_OF_KEYS_V):
for j in range(d.NUM_OF_KEYS_H):
self.keys[i][j] = Key(d.KEY_NAMES[i][j], d.KEY_CCS[i][j], 'toggle')
self.save()
def save(self):
if not os.path.isdir(self.dir):
os.mkdir(self.dir)
f = open(os.path.join(self.dir, self.file), 'wb')
for row in self.pots:
for pot in row:
pickle.dump(pot, f)
for row in self.keys:
for key in row:
key.pre_save()
pickle.dump(key, f)
f.close()
def midi_out(self, midi_channel, midi_signal, midi_vel):
self.midiout.send_message([midi_channel, midi_signal, midi_vel])
if __name__ == '__main__':
main = Engine()
main.new_file('default', 'C:\\Users\\Celery\\Documents\\TopHat-MIDI') | {
"repo_name": "S1M1S/TopHat-MIDI",
"path": "midi/main.py",
"copies": "1",
"size": "2447",
"license": "mit",
"hash": 3018517683264948000,
"line_mean": 31.2105263158,
"line_max": 84,
"alpha_frac": 0.5508786269,
"autogenerated": false,
"ratio": 3.0209876543209875,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4071866281220987,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Celery'
import os
DIRECTORY = os.sep.join(os.path.dirname(os.path.realpath(__file__)).split('\\')[:-2])
NUM_OF_POTS_H = 4
NUM_OF_POTS_V = 2
NUM_OF_KEYS_H = 4
NUM_OF_KEYS_V = 4
DRAWING_AREA_WIDTH = DRAWING_AREA_HEIGHT = 100
DRAWING_AREA_OUTLINE_THICKNESS = 6
DRAWING_AREA_INDENT = 10
DRAWING_AREA_CENTRE = 50
POT_LINE_THICKNESS = 5
POT_CIRCLE_RADIUS = 40
KEY_SQUARE_WIDTH = KEY_SQUARE_HEIGHT = 80
DEFAULT_WIDGET_WIDTH = 100
DEFAULT_WIDGET_HEIGHT = 121
OPTION_MENU_X = 2
OPTION_MENU_Y = 4
OPTION_WIDGET_WIDTH = 130
OPTION_PADDING = 3
LABEL_HEIGHT = 21
MAX_NAME_LENGTH = 10
POT_CCS = [[0+i, 1+i, 2+i, 3+i] for i in range(0, NUM_OF_POTS_V*NUM_OF_POTS_H, NUM_OF_POTS_H)]
POT_NAMES = [['CC#'+str(col) for col in row] for row in POT_CCS]
KEY_CCS = [[64+i, 65+i, 66+i, 67+i] for i in range(0, 16, 4)] # default key params, 2D list from 0 to 15
KEY_NAMES = [['CC#'+str(col) for col in row] for row in KEY_CCS] # convert params to names
rgb_cols = {}
for line in open(os.path.join(DIRECTORY, 'midi', 'defaults', 'colours.tsv'), 'rU'):
key, r, g, b = line.rstrip().split('\t')
r, g, b = [int(x) for x in [r, g, b]]
rgb_cols[key] = (r, g, b) | {
"repo_name": "S1M1S/TopHat-MIDI",
"path": "midi/defaults/defaults.py",
"copies": "1",
"size": "1171",
"license": "mit",
"hash": 6358410344773691000,
"line_mean": 25.0444444444,
"line_max": 105,
"alpha_frac": 0.6413321947,
"autogenerated": false,
"ratio": 2.3051181102362204,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8436578608183641,
"avg_score": 0.0019743393505158146,
"num_lines": 45
} |
__author__ = 'Celery'
from base import Base
import rtmidi.midiconstants as m
def clamp(n, min_n, max_n): # clamp input between min and max
return max(min(max_n, n), min_n)
class Pot(Base):
def __init__(self, name, midi_loc, func, colour=None, state=False):
Base.__init__(self, name, midi_loc, func, colour=None, state=False)
self.available_funcs = ('forward', 'reverse', 'step')
self.vel = self.old_vel = self.scaled_vel = 0.0
self.num_steps = 5.0
def set_vel(self, mouse_y):
has_changed = False
new_vel = 0
if self.func == 'forward':
new_vel = self.vel + mouse_y
elif self.func == 'reverse':
new_vel = self.vel - mouse_y
if self.func == 'step':
new_vel = self.scaled_vel + mouse_y * self.num_steps / 127.0
self.scaled_vel = clamp(new_vel, 0, self.num_steps)
self.vel = int(self.scaled_vel) * 127.0 / self.num_steps
else:
self.vel = clamp(new_vel, 0.0, 127.0)
if self.vel != self.old_vel:
has_changed = True
self.old_vel = self.vel
return has_changed
def set_num_steps(self, new_num):
success = True
try:
self.num_steps = float(new_num)
except ValueError:
success = False
return success
def get_midi_vel(self):
return self.vel
def get_num_steps(self):
return self.num_steps
def get_channel(self):
return 0xB0 | {
"repo_name": "S1M1S/TopHat-MIDI",
"path": "midi/objects/pot.py",
"copies": "1",
"size": "1518",
"license": "mit",
"hash": 5712815940102341000,
"line_mean": 27.1296296296,
"line_max": 75,
"alpha_frac": 0.5559947299,
"autogenerated": false,
"ratio": 3.3289473684210527,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43849420983210524,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Celery'
import gtk
from base_widg import BaseWidg
from option_widg import OptionWidg
import midi.defaults.defaults as d
class KeyWidg(BaseWidg):
def __init__(self, parent, engine):
BaseWidg.__init__(self, parent, engine)
self.option_widg = OptionWidg(self)
self.set_label_text(parent.get_name())
self.drawing_area.set_events(gtk.gdk.EXPOSURE_MASK # drawing areas
| gtk.gdk.BUTTON_PRESS_MASK # do not receive mouse clicks
| gtk.gdk.BUTTON_RELEASE_MASK) # by default
self.h_box.pack_end(self.option_widg.alignment)
self.connect()
self.show_self()
self.option_widg.set_visibility(False)
def connect(self):
self.drawing_area.connect('expose_event', self.draw)
self.drawing_area.connect('button_release_event', self.button_pressed)
self.drawing_area.connect('button_press_event', self.button_pressed)
def draw(self, widget, event, data=None):
self.surface = self.drawing_area.window
self.context = self.surface.new_gc()
self.colour_map = self.drawing_area.get_colormap()
self.surface.set_cursor(gtk.gdk.Cursor(gtk.gdk.HAND1))
self.context.set_values(foreground=self.colour_map.alloc('white'))
self.surface.draw_rectangle(self.context,
True,
0,
0,
d.DRAWING_AREA_WIDTH,
d.DRAWING_AREA_HEIGHT) # fill the frame with white
self.context.set_values(foreground=self.colour_map.alloc('grey'),
line_width=d.DRAWING_AREA_OUTLINE_THICKNESS,
cap_style=gtk.gdk.CAP_ROUND,
join_style=gtk.gdk.JOIN_ROUND)
self.surface.draw_rectangle(self.context,
False, # only draw outline
d.DRAWING_AREA_INDENT, # x
d.DRAWING_AREA_INDENT, # y
d.DRAWING_AREA_WIDTH - d.DRAWING_AREA_INDENT*2, # width
d.DRAWING_AREA_WIDTH - d.DRAWING_AREA_INDENT*2) # height
self.fill()
return True
def fill(self): # whether or not to draw the coloured square in the middle
r, g, b = self.parent.get_gtk_colour()
if self.parent.state: # it should be filled
self.context.set_values(foreground=self.colour_map.alloc(r, g, b))
else:
self.context.set_values(foreground=self.colour_map.alloc(int(r + (65535 - r) / 1.04),
int(g + (65535 - g) / 1.04),
int(b + (65535 - b) / 1.04)))
self.surface.draw_rectangle(self.context,
True,
d.DRAWING_AREA_INDENT + d.DRAWING_AREA_OUTLINE_THICKNESS / 2,
d.DRAWING_AREA_INDENT + d.DRAWING_AREA_OUTLINE_THICKNESS / 2,
d.KEY_SQUARE_WIDTH - d.DRAWING_AREA_OUTLINE_THICKNESS,
d.KEY_SQUARE_HEIGHT - d.DRAWING_AREA_OUTLINE_THICKNESS)
def button_pressed(self, widget, event, data=None):
if event.button == 1: # left click
if event.type == gtk.gdk.BUTTON_PRESS:
if self.parent.set_state('press'): # if state has changed
self.send_midi_msg(self.parent.get_channel(),
self.parent.get_midi_loc(),
self.parent.get_midi_vel())
self.fill()
elif event.type == gtk.gdk.BUTTON_RELEASE:
if self.parent.set_state('release'):
self.send_midi_msg(self.parent.get_channel(),
self.parent.get_midi_loc(),
self.parent.get_midi_vel())
self.fill()
elif event.button == 3 and event.type == gtk.gdk.BUTTON_PRESS: # right click
self.options_visible = not self.options_visible # toggle boolean
self.option_widg.set_visibility(self.options_visible)
return True | {
"repo_name": "S1M1S/TopHat-MIDI",
"path": "gui/widgets/key_widg.py",
"copies": "1",
"size": "4494",
"license": "mit",
"hash": -9115532625421537000,
"line_mean": 51.8823529412,
"line_max": 101,
"alpha_frac": 0.505117935,
"autogenerated": false,
"ratio": 4.100364963503649,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5105482898503649,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Celery'
import gtk
from base_widg import BaseWidg
import midi.defaults.defaults as d
class OptionWidg(BaseWidg):
def __init__(self, linked_widg):
self.lnkd_widg = linked_widg
lwp = self.lnkd_widg.get_parent()
self.alignment = gtk.Alignment()
self.frame = gtk.Frame()
if lwp.__class__.__name__ == 'Pot':
option_menu_y = d.OPTION_MENU_Y + 1
else:
option_menu_y = d.OPTION_MENU_Y
self.table = gtk.Table(d.OPTION_MENU_X, option_menu_y)
self.labels = [gtk.Label() for i in range(option_menu_y)]
self.entries = [gtk.Entry() for i in range(option_menu_y - 1)]
self.combo_box = gtk.combo_box_new_text()
self.frame.set_size_request(d.OPTION_WIDGET_WIDTH, d.DEFAULT_WIDGET_HEIGHT)
self.alignment.set(0.5, 0.5, 0, 0) # centrally align everything inside
label_data = ['Name:',
'CC no:',
'Colour:',
'Func:']
if lwp.__class__.__name__ == 'Pot':
label_data.insert(3, 'Steps:')
for i, label in enumerate(self.labels):
label.set_text(label_data[i])
label.set_alignment(xalign=1.0, yalign=0.5)
self.table.attach(label, 0, 1, i, i + 1, xpadding=d.OPTION_PADDING)
self.update_text()
for i, entry in enumerate(self.entries):
self.table.attach(entry, 1, 2, i, i + 1, gtk.SHRINK | gtk.FILL, xpadding=d.OPTION_PADDING)
self.combo_box.connect('changed', self.combo_callback, lwp.set_func)
if lwp.__class__.__name__ == 'Pot':
combo_y_pos = [4, 5]
else:
combo_y_pos = [3, 4]
self.table.attach(self.combo_box, 1, 2, combo_y_pos[0], combo_y_pos[1], gtk.SHRINK | gtk.FILL, gtk.SHRINK, d.OPTION_PADDING)
for func in lwp.get_available_funcs():
self.combo_box.append_text(func)
self.combo_box.set_active(lwp.get_available_funcs().index(lwp.func))
self.frame.add(self.table)
self.alignment.add(self.frame)
self.show_self()
self.set_visibility(False)
def update_text(self):
lwp = self.lnkd_widg.get_parent()
entry_data = [(lwp.get_name(), lwp.set_name, ('name', self.lnkd_widg.label)),
(lwp.get_midi_loc(), lwp.set_midi_loc, ('cc no')),
(lwp.get_colour(), lwp.set_colour, ('colour', self.lnkd_widg.fill))]
if lwp.__class__.__name__ == 'Pot':
entry_data.append((lwp.get_num_steps(), lwp.set_num_steps, ('steps')))
for i, entry in enumerate(self.entries):
entry.set_text(str(entry_data[i][0]))
entry.connect('activate', self.entry_callback, entry_data[i][1], entry_data[i][2])
@staticmethod
def entry_callback(widget, function, args):
text = widget.get_text()
function(text)
if args[0] == 'name': # dynamically setting functions results in things like this:
args[1].set_text(text) # in order for set_name to work, it needs extra bits of data no other callback needs
elif args[0] == 'colour': # so args[1] is out of range when args[0] is 'midi' as it doesn't need anything extra
args[1]() # set the new colour immediately, don't wait for redraw
@staticmethod
def combo_callback(widget, function):
text = widget.get_active_text()
function(text) | {
"repo_name": "S1M1S/TopHat-MIDI",
"path": "gui/widgets/option_widg.py",
"copies": "1",
"size": "3427",
"license": "mit",
"hash": -5711975441967206000,
"line_mean": 43.5194805195,
"line_max": 132,
"alpha_frac": 0.5789320105,
"autogenerated": false,
"ratio": 3.211808809746954,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9280116108769935,
"avg_score": 0.0021249422954038202,
"num_lines": 77
} |
__author__ = 'Celery'
import midi.defaults.defaults as d
class Base:
def __init__(self, name, midi_loc, func, colour=None, state=False):
self.name = name
self.midi_loc = midi_loc
self.func = func
self.state = state
self.linked_mod = None
self.available_funcs = None
if colour is not None:
self.colour = colour
else:
self.colour = d.rgb_cols['red'] # defaults to red
def pre_save(self): # called just before saving
if self.func == 'hold':
self.state = False
def post_load(self): # called just after loading
if self.func == 'hold':
self.state = False
def set_name(self, new_name):
if len(new_name) > 10:
raise NameError('Name was too long')
self.name = new_name
def set_midi_loc(self, new_midi_loc):
success = True
if new_midi_loc[:2] == "0x":
base = 16
new_midi_loc = new_midi_loc[2:]
else:
base = 10
try:
self.midi_loc = int(new_midi_loc, base)
except ValueError:
success = False
return success
def set_func(self, new_func):
success = False
if new_func in self.available_funcs: # if it is an acceptable function
self.func = new_func
success = True
return success
def set_colour(self, new_colour):
success = True
if new_colour.startswith(('(','[')): # it must be an rgb tuple or list
try:
new_colour = [int(x) for x in new_colour[1:-1].split(",")] # make each item an integer
except ValueError:
success = False
for val in new_colour:
if val > 255:
success = False
if success:
self.colour = new_colour
elif new_colour in d.rgb_cols: # it must be a named colour
self.colour = d.rgb_cols[new_colour]
else:
success = False
return success
def set_linked_mod(self, new_mod):
self.linked_mod = new_mod
def get_name(self):
return self.name
def get_midi_loc(self):
return self.midi_loc
def get_func(self):
return self.func
def get_colour(self):
return self.colour
def get_gtk_colour(self):
gtk_colour = [int((x/255.0)*65535) for x in self.colour] # gtk does not accept regular rgb values
return gtk_colour
def get_available_funcs(self):
return self.available_funcs | {
"repo_name": "S1M1S/TopHat-MIDI",
"path": "midi/objects/base.py",
"copies": "1",
"size": "2606",
"license": "mit",
"hash": 2674738319640532500,
"line_mean": 27.9666666667,
"line_max": 106,
"alpha_frac": 0.5402916347,
"autogenerated": false,
"ratio": 3.9070464767616193,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4947338111461619,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Celery'
import pygtk
pygtk.require('2.0')
import gtk
from widgets.key_widg import KeyWidg
from widgets.pot_widg import PotWidg
from midi.main import Engine
import midi.defaults.defaults as d
class Gui:
def new_menu_item(self, name, img=None, accel=None, func=None, *args):
if img is not None:
menu_item = gtk.ImageMenuItem(img, name)
key, mod = gtk.accelerator_parse(accel)
menu_item.add_accelerator('activate', self.shortcuts, key, mod, gtk.ACCEL_VISIBLE)
elif name == 'sep':
menu_item = gtk.SeparatorMenuItem()
else:
menu_item = gtk.MenuItem(name)
if func is not None:
menu_item.connect('activate', func, name)
menu_item.show()
return menu_item
def create_menu(self):
self.shortcuts = gtk.AccelGroup()
self.window.add_accel_group(self.shortcuts)
file_root = gtk.MenuItem('File')
file_items = [['New', gtk.STOCK_NEW, '<Control>N', self.new_file],
['Open', gtk.STOCK_OPEN, '<Control>O', self.open_file],
['Save As',gtk.STOCK_SAVE_AS, '<Control><Shift>S',self.new_file],
['Save', gtk.STOCK_SAVE, '<Control>S', self.save_file],
['sep', None, None, None],
['Quit', gtk.STOCK_QUIT, '<Control>Q', self.delete_event]]
file_menu = gtk.Menu()
file_root.set_submenu(file_menu)
for name, img, accel, func in file_items:
menu_item = self.new_menu_item(name, img, accel, func)
file_menu.append(menu_item)
file_root.show()
file_menu.show()
newobj_root = gtk.MenuItem('Objects')
newobj_items = [['Create new modulator', self.new_modulator]]
newobj_menu = gtk.Menu()
newobj_root.set_submenu(newobj_menu)
for name, func in newobj_items:
menu_item = self.new_menu_item(name, func=func)
newobj_menu.append(menu_item)
newobj_root.show()
newobj_menu.show()
self.menu_separator = gtk.VBox()
self.scrolled_window = gtk.ScrolledWindow()
self.scrolled_window.add_with_viewport(self.menu_separator)
self.window.add(self.scrolled_window)
self.scrolled_window.set_policy(gtk.POLICY_AUTOMATIC,
gtk.POLICY_AUTOMATIC) # make the scrollbars show only when needed
self.scrolled_window.show()
self.menu_separator.show()
self.menu_bar = gtk.MenuBar()
self.menu_separator.pack_start(self.menu_bar, False, False, 2)
self.menu_bar.append(file_root)
self.menu_bar.append(newobj_root)
self.menu_bar.show()
key_items = [['Edit CC number...', self.edit_key_attrs],
['Edit key function...', self.edit_key_attrs],
['Edit key colour...', self.edit_key_attrs],
['Link to modulator...', self.edit_key_attrs]]
self.key_menu = gtk.Menu()
for name, func in key_items:
menu_item = self.new_menu_item(name, func=func)
self.key_menu.append(menu_item)
def create_pot_widgs(self):
for y in range(d.NUM_OF_POTS_V):
self.pot_widgs.append([])
for x in range(d.NUM_OF_POTS_H):
parent = engine.pots[y][x]
self.pot_widgs[y].append(PotWidg(parent, engine))
cur_widg = self.pot_widgs[y][x]
self.pot_grid.attach(cur_widg.alignment, x, x+1, y, y+1)
def create_key_widgs(self):
for y in range(d.NUM_OF_KEYS_V):
self.key_widgs.append([])
for x in range(d.NUM_OF_KEYS_V):
parent = engine.keys[y][x]
self.key_widgs[y].append(KeyWidg(parent, engine))
cur_widg = self.key_widgs[y][x]
self.key_grid.attach(cur_widg.alignment, x, x+1, y, y+1)
def refresh_widgets(self):
for i, row in enumerate(self.key_widgs):
for j, key_widg in enumerate(row):
key_widg.set_parent(engine.keys[i][j])
def __init__(self):
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.set_title('TopHat MIDI')
self.window.set_default_size(800, 600)
self.window.connect('delete_event', self.delete_event)
self.create_menu()
self.widget_separator = gtk.HBox()
self.pot_widgs = []
self.key_widgs = []
self.pot_grid = gtk.Table(d.NUM_OF_POTS_H, d.NUM_OF_POTS_V)
self.key_grid = gtk.Table(d.NUM_OF_KEYS_H, d.NUM_OF_KEYS_V)
self.create_pot_widgs()
self.create_key_widgs()
self.widget_separator.pack_start(self.pot_grid)
self.widget_separator.pack_start(self.key_grid)
self.menu_separator.pack_start(self.widget_separator)
self.pot_grid.show()
self.key_grid.show()
self.widget_separator.show()
self.menu_separator.show()
self.window.show()
def delete_event(self, widget, event, data=None):
gtk.main_quit()
return False
def new_file(self, widget, event, data=None):
if event == 'New':
title = 'Create a new file'
func = engine.new_file
elif event == 'Save As':
title = 'Save as'
func = engine.save_as
new_file_window = gtk.FileChooserDialog(title=title,
parent=self.window,
action=gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER,
buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_SAVE, gtk.RESPONSE_OK))
filename_label = gtk.Label('Enter a filename:')
filename_input = gtk.Entry()
filename_bar = gtk.HBox(spacing=10)
filename_bar.pack_start(filename_label, expand=False)
filename_bar.pack_start(filename_input)
filename_label.show()
filename_input.show()
new_file_window.set_extra_widget(filename_bar)
event = new_file_window.run()
if event == gtk.RESPONSE_OK:
func(filename_input.get_text(), new_file_window.get_filename())
self.refresh_widgets()
new_file_window.destroy()
self.window.queue_draw()
def open_file(self, widget, event, data=None):
open_file_window = gtk.FileChooserDialog(title='Select a file to open',
parent=self.window,
buttons=(gtk.STOCK_CANCEL,
gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN,
gtk.RESPONSE_OK))
event = open_file_window.run()
if event == gtk.RESPONSE_OK:
engine.open(open_file_window.get_filename())
self.refresh_widgets()
open_file_window.destroy()
def save_file(self, widget, event, data=None):
engine.save()
def new_modulator(self, **kwargs): # TODO: actually make this function. Should create a new mod_widg on the screen.
pass
def entry_control(self, entry, event, key_widg):
if event.type == gtk.gdk._2BUTTON_PRESS:
entry.set_editable(True)
entry.set_has_frame(True)
entry.grab_focus()
entry.select_region(0, -1)
elif event.type == gtk.gdk.BUTTON_PRESS:
entry.set_position(0)
else:
normalise = False
if event.type == gtk.gdk.FOCUS_CHANGE:
normalise = True
# elif event.type == gtk.gdk.KEY_PRESS:
# if event.keyval == 65293: # enter was pressed
# normalise = True
if normalise:
entry.set_editable(False)
entry.set_has_frame(False)
key_widg.parent.set_name(entry.get_text())
entry.select_region(0, 0)
return True
def edit_key_attrs(self, menu, event, data=None):
active_key = self.selected_key_widg.parent
if event == 'Edit CC number...':
prev_val = active_key.midi_loc
func = active_key.set_midi_loc
elif event == 'Edit key function...':
prev_val = active_key.func
func = active_key.set_func
elif event == 'Edit key colour...':
prev_val = active_key.colour
func = active_key.set_colour
else: # event == 'Link to modulator...':
prev_val = active_key.linked_mod
func = active_key.set_linked_mod
entry = EntryDialog(parent=None,
flags=gtk.DIALOG_DESTROY_WITH_PARENT,
message_format=event, # the title of the entry dialog
buttons=gtk.BUTTONS_OK,
default_value=prev_val)
entry.format_secondary_text('Please enter a new value')
new_val = entry.run()
if new_val is not None:
func(new_val) # call the function specified above with the new value to be set
entry.destroy()
if __name__ == "__main__":
engine = Engine()
engine.new_file('default', 'C:\\Users\\Celery\\Documents\\TopHat-MIDI')
engine.open('C:\\Users\\Celery\\Documents\\TopHat-MIDI\\default.txt')
gui = Gui()
gtk.main() | {
"repo_name": "S1M1S/TopHat-MIDI",
"path": "gui/gui.py",
"copies": "1",
"size": "9591",
"license": "mit",
"hash": 1165059341581017300,
"line_mean": 40.5238095238,
"line_max": 129,
"alpha_frac": 0.5435303931,
"autogenerated": false,
"ratio": 3.67612111920276,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.471965151230276,
"avg_score": null,
"num_lines": null
} |
# Example of using matplotlib to create boxplot:
# http://matplotlib.org/examples/pylab_examples/boxplot_demo2.html
import argparse
import sys
import os
import re
import pylab
import numpy
usage = """ %s [options] -i INFILE
Use matplotlib to create boxplot
""" % (__file__)
def append_element_to_list(element, list):
try:
list.append(eval(element))
except:
pass
parser = argparse.ArgumentParser(description=usage, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-i', '--infile', dest='infile',
required=True,
help='path to input file')
##get at the arguments
args = parser.parse_args()
##do something...
with open(args.infile, 'r') as infile:
comment_line = re.compile("^#")
find_tuple = re.compile(r'^[0-9xXyY]{1,2} \d+ .*? (.*)')
add_comma1 = re.compile('e \(')
add_comma2 = re.compile('\) \(')
add_comma3 = re.compile('\) N')
add_comma4 = re.compile('e N')
##init list
T_5_0 = []
T_5_1 = []
T_5_2 = []
T_5_3 = []
T_5_4 = []
T_8 = []
T_9 = []
T_10 = []
T_11 = []
N_5_0 = []
N_5_1 = []
N_5_2 = []
N_5_3 = []
N_5_4 = []
N_8 = []
N_9 = []
N_10 = []
N_11 = []
R_1 = []
R_2 = []
R_3 = []
R_4 = []
data_line_counter = 0
for line in infile:
if (not comment_line.match(line)):
data_line_counter += 1
t = find_tuple.findall(line)[0]
t = add_comma1.sub('e, (', t)
t = add_comma2.sub('), (', t)
t = add_comma3.sub('), N', t)
t = add_comma4.sub('e, N', t)
tuples = eval(t)
T = tuples[0]
N = tuples[1]
R = tuples[2]
##plot 1
append_element_to_list('T[5][0]', T_5_0)
append_element_to_list('T[5][1]', T_5_1)
append_element_to_list('T[5][2]', T_5_2)
append_element_to_list('T[5][3]', T_5_3)
append_element_to_list('T[5][4]', T_5_4)
append_element_to_list('T[8]', T_8)
append_element_to_list('T[9]', T_9)
append_element_to_list('T[10]', T_10)
append_element_to_list('T[11]', T_11)
##plot 2
append_element_to_list('N[5][0]', N_5_0)
append_element_to_list('N[5][1]', N_5_1)
append_element_to_list('N[5][2]', N_5_2)
append_element_to_list('N[5][3]', N_5_3)
append_element_to_list('N[5][4]', N_5_4)
append_element_to_list('N[8]', N_8)
append_element_to_list('N[9]', N_9)
append_element_to_list('N[10]', N_10)
append_element_to_list('N[11]', N_11)
##plot 3
append_element_to_list('R[1]', R_1)
append_element_to_list('R[2]', R_2)
append_element_to_list('R[3]', R_3)
append_element_to_list('R[4]', R_4)
## data == [[data], [level], [label], [filename-prefix]] ==================================
data_plot1 = [[T_5_0, T_5_1, T_5_2, T_5_3, T_5_4, T_8, T_9, T_10, T_11], [0, 1, 1, 1, 0, 0, 0, 0, 0],
['T_5_0', 'T_5_1', 'T_5_2', 'T_5_3', 'T_5_4', 'T_8', 'T_9', 'T_10', 'T_11'], "T"]
data_plot2 = [[N_5_0, N_5_1, N_5_2, N_5_3, N_5_4, N_8, N_9, N_10, N_11], [0, 1, 1, 1, 0, 0, 0, 0, 0],
['N_5_0', 'N_5_1', 'N_5_2', 'N_5_3', 'N_5_4', 'N_8', 'N_9', 'N_10', 'N_11'], "N"]
data_plot3 = [[R_1, R_2, R_3, R_4], [0, 0, 0, 0], ['R_1', 'R_2', 'R_3', 'R_4'], "R"]
## Example data
# b = [[[8, 17, 23, 31, 55, 54, 13, 4],
# [153, 529, 1152, 1110, 2010, 1983, 433, 78],
# [380, 772, 1776, 1829, 3114, 3054, 718, 240],
# [243, 643, 1618, 1422, 1945, 1867, 458, 128],
# [7, 23, 16, 20, 28, 28, 10, 4],
# [0, 0, 0, 0, 0, 0, 0, 0],
# [0, 1, 0, 0, 0, 0, 0, 0],
# [0.6411148186324177, 0.2912814467821452, 0.335456212549906, 0.6524452376496032, 0.3664983435521037, 0.366581046925974, 0.3224848765251726, 0.2703107720721096],
# [0, 2, 0, 0, 0, 0, 0, 0]],
# [0, 1, 1, 1, 0, 0, 0, 0, 0],
# ['label1', 'label2', 'label3', 'label4', 'label5', 'label6', 'label7', 'label8', 'label9'],
# 'Filename']
for b in [data_plot1, data_plot2, data_plot3]:
data = b[0]
levels = b[1]
labels = b[2]
data_mean = []
data_var = []
for level in list(set(levels)):
index = [i for i,v in enumerate(levels) if v == level]
sys.stderr.write("working on %s_%s\n" % (b[3], level))
d = [data[x] for x in index]
l = [labels[x] for x in index]
d_mean = [numpy.mean(x) for x in d]
d_var = [numpy.var(x) for x in d]
l = ["%s\n%.2f\n%.2f" % (l[i], d_mean[i], d_var[i]) for i, v in enumerate(l)]
filename = "%s_%s.png" % (b[3], level)
pylab.figure()
pylab.title("Number of points: %s / %s" % (len(d[0]), data_line_counter))
pylab.boxplot(d)
pylab.xticks(rotation=45)
pylab.xticks(range(1, len(index) + 1), l)
pylab.xlabel("(Feature/Mean/Variance)")
# ymax = max([max(d[i]) for i, v in enumerate(d)])
# top = ymax+(ymax*0.05) ## not a very good formula for describing info
# for i,v in enumerate(d):
# pylab.text(i+1, top, len(d[i]), horizontalalignment='center')
pylab.tight_layout()
# pylab.show()
pylab.savefig(filename)
| {
"repo_name": "csiu/tokens",
"path": "python/relic/matplotlib_boxplot.py",
"copies": "1",
"size": "5492",
"license": "mit",
"hash": -690917945004816100,
"line_mean": 31.4970414201,
"line_max": 169,
"alpha_frac": 0.4794246176,
"autogenerated": false,
"ratio": 2.5820404325340856,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.35614650501340855,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cenk'
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth.models import Permission
class BottomUpRoleAuthenticateBackend(ModelBackend):
"""
Authenticates against settings.AUTH_USER_MODEL.
"""
def get_role_permissions(self, user_obj, obj=None):
"""
Returns a set of permission strings that this user has through his/her
role groups. Here the parent has perm whether one of the children has according to role.
:param user_obj: current user
:param obj: which object user want to access
:return: user_obj with role permissions
"""
if user_obj.is_anonymous() or obj is not None:
return set()
if not hasattr(user_obj, '_role_perm_cache'):
if user_obj.is_superuser:
perms = Permission.objects.all()
else:
user_group_ids = user_obj.get_role_group_ids()
perms = Permission.objects.filter(**{"group__in": user_group_ids})
perms = perms.values_list('content_type__app_label', 'codename').order_by()
user_obj._role_perm_cache = set("%s.%s" % (ct, name) for ct, name in perms)
return user_obj._role_perm_cache
def get_all_permissions(self, user_obj, obj=None):
"""
:param user_obj: current user
:param obj: which object user want to access
:return: user with all permissions
"""
if user_obj.is_anonymous() or obj is not None:
return set()
if not hasattr(user_obj, '_perm_cache'):
user_obj._perm_cache = set(
"%s.%s" % (p.content_type.app_label, p.codename) for p in user_obj.user_permissions.select_related())
user_obj._perm_cache.update(self.get_group_permissions(user_obj))
user_obj._perm_cache.update(self.get_role_permissions(user_obj))
return user_obj._perm_cache
| {
"repo_name": "cenkbircanoglu/django-roles",
"path": "django_roles/backends/bottomup_role_authenticate_backend.py",
"copies": "1",
"size": "1941",
"license": "mit",
"hash": -6810606050448815000,
"line_mean": 39.4375,
"line_max": 117,
"alpha_frac": 0.6130860381,
"autogenerated": false,
"ratio": 3.897590361445783,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9899036697639336,
"avg_score": 0.022327940381289105,
"num_lines": 48
} |
__author__ = 'cenk'
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth.models import Permission
class RoleAuthenticateBackend(ModelBackend):
"""
Authenticates against settings.AUTH_USER_MODEL.
"""
def get_role_permissions(self, user_obj, obj=None):
"""
Returns a set of permission strings that this user has through his/her
role groups. Here the parent does not has perm however the children has according to role.
:param user_obj: current user
:param obj: which object user want to access
:return: user_obj with role permissions
"""
if user_obj.is_anonymous() or obj is not None:
return set()
if not hasattr(user_obj, '_role_perm_cache'):
if user_obj.is_superuser:
perms = Permission.objects.all()
else:
user_group_ids = user_obj.roles.all().values_list('id', flat=True)
perms = Permission.objects.filter(**{"group__in": user_group_ids})
perms = perms.values_list('content_type__app_label', 'codename').order_by()
user_obj._role_perm_cache = set("%s.%s" % (ct, name) for ct, name in perms)
return user_obj._role_perm_cache
def get_all_permissions(self, user_obj, obj=None):
"""
:param user_obj: current user
:param obj: which object user want to access
:return: user with all permissions
"""
if user_obj.is_anonymous() or obj is not None:
return set()
if not hasattr(user_obj, '_perm_cache'):
user_obj._perm_cache = set(
"%s.%s" % (p.content_type.app_label, p.codename) for p in user_obj.user_permissions.select_related())
user_obj._perm_cache.update(self.get_group_permissions(user_obj))
user_obj._perm_cache.update(self.get_role_permissions(user_obj))
return user_obj._perm_cache
| {
"repo_name": "cenkbircanoglu/django-roles",
"path": "django_roles/backends/role_authenticate_backend.py",
"copies": "1",
"size": "1956",
"license": "mit",
"hash": -3153177723753045000,
"line_mean": 38.9183673469,
"line_max": 117,
"alpha_frac": 0.6119631902,
"autogenerated": false,
"ratio": 3.904191616766467,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5016154806966466,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cenk'
import json
class Config():
def __init__(self, path=None):
self.path = path
self.data = {}
self.cassandra_conf = None
self.elastic_conf = None
self.spark_conf = None
self.rollups = None
self.load_config_from_json()
self.parse()
def load_config_from_json(self):
try:
with open(self.path) as data_file:
self.data = json.load(data_file)
data_file.close()
except:
raise
def parse(self):
self.spark_conf = self.data.get("spark")
self.cassandra_conf = self.data.get("cassandra")
self.elastic = self.data.get("elasticsearch")
self.rollups = self.data.get("rollups")
def get_spark_master(self):
return self.spark_conf.get("master")
def get_cassandra_host(self):
return self.cassandra_conf.get("host")
def get_cassandra_keyspace(self):
return self.cassandra_conf.get("keyspace")
def get_cassandra_table(self):
return self.cassandra_conf.get("table")
def get_elastic_host(self):
return self.elastic_conf.get("host")
def get_elastic_index(self):
return self.elastic_conf.get("index")
def get_elastic_port(self):
return self.elastic_conf.get("port")
| {
"repo_name": "egemsoft/cassandra-spark-rollup",
"path": "cronjob/app/config.py",
"copies": "2",
"size": "1333",
"license": "mit",
"hash": 7490275946030369000,
"line_mean": 24.1509433962,
"line_max": 56,
"alpha_frac": 0.5881470368,
"autogenerated": false,
"ratio": 3.6222826086956523,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5210429645495652,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cenk'
import numpy as np
import scipy.optimize as sop
class SparseAutoencoder(object):
def process(self, epsilon=0.1):
""" Load the dataset and preprocess using ZCA Whitening """
from reader.stl import STL
import scipy
image_channels = 3 # number of channels in the image patches
vis_patch_side = 8 # side length of sampled image patches
hid_patch_side = 20 # side length of representative image patches
rho = 0.035 # desired average activation of hidden units
lamda = 0.003 # weight decay parameter
beta = 5 # weight of sparsity penalty term
max_iterations = 100 # number of optimization iterations
visible_size = vis_patch_side * vis_patch_side * image_channels # number of input units
hidden_size = hid_patch_side * hid_patch_side # number of hidden units
data = STL().sampled_patches
mean_patch = np.mean(data, axis=1, keepdims=True)
data = data - mean_patch
""" Compute the ZCA Whitening matrix """
sigma = np.dot(data, np.transpose(data)) / data.shape[1]
[u, s, v] = np.linalg.svd(sigma)
rescale_factors = np.diag(1 / np.sqrt(s + epsilon))
zca_white = np.dot(np.dot(u, rescale_factors), np.transpose(u))
""" Apply ZCA Whitening to the data """
data = np.dot(zca_white, data)
from model_error.cost import LinearCost
encoder = LinearCost(visible_size, hidden_size, rho, lamda, beta)
""" Run the L-BFGS algorithm to get the optimal parameter values """
opt_solution = sop.minimize(encoder.calculate_cost, encoder.theta,
args=(data,), method='L-BFGS-B',
jac=True, options={'maxiter': max_iterations})
opt_W1 = opt_solution.x[encoder.limit0: encoder.limit1].reshape(hidden_size, visible_size)
return opt_W1, zca_white, mean_patch
| {
"repo_name": "cenkbircanoglu/cnn-example",
"path": "sparsity/base.py",
"copies": "1",
"size": "1992",
"license": "mit",
"hash": 4377410912981769000,
"line_mean": 38.0588235294,
"line_max": 98,
"alpha_frac": 0.6114457831,
"autogenerated": false,
"ratio": 3.852998065764023,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4964443848864023,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cenk'
def check_vertical(cells, free_cells):
for (i, j) in cells:
if (i, j + 1) in cells and (i, j + 2) in free_cells:
return (i, j + 2)
if (i, j + 1) in free_cells and (i, j + 2) in cells:
return (i, j + 1)
if (i, j - 1) in cells and (i, j - 2) in free_cells:
return (i, j - 2)
if (i, j - 1) in free_cells and (i, j - 2) in cells:
return (i, j - 1)
return None
def check_horizontal(cells, free_cells):
for (i, j) in cells:
if (i + 1, j) in cells and (i + 2, j) in free_cells:
return (i + 2, j)
if (i + 1, j) in free_cells and (i + 2, j) in cells:
return (i + 1, j)
if (i - 1, j) in cells and (i - 2, j) in free_cells:
return (i - 2, j)
if (i - 1, j) in free_cells and (i - 2, j) in cells:
return (i - 1, j)
return None
def check_cross(cells, free_cells):
for (i, j) in cells:
if (i + 1, j + 1) in cells and (i + 2, j + 2) in free_cells:
return (i + 2, j + 2)
if (i - 1, j + 1) in cells and (i - 2, j + 2) in free_cells:
return (i - 2, j + 2)
if (i + 1, j + 1) in free_cells and (i + 2, j + 2) in cells:
return (i + 1, j + 1)
if (i - 1, j + 1) in free_cells and (i - 2, j + 2) in cells:
return (i - 1, j + 1)
if (i - 1, j - 1) in cells and (i - 2, j - 2) in free_cells:
return (i - 2, j - 2)
if (i + 1, j - 1) in cells and (i + 2, j - 2) in free_cells:
return (i + 2, j - 2)
if (i - 1, j - 1) in free_cells and (i - 2, j - 2) in cells:
return (i - 1, j - 1)
if (i + 1, j - 1) in free_cells and (i + 2, j - 2) in cells:
return (i + 1, j - 1)
return None
class Finisher():
def do_sockdolager(self, cells, free_cells):
move_vertical = check_vertical(cells, free_cells)
if move_vertical:
return move_vertical
move_horizontal = check_horizontal(cells, free_cells)
if move_horizontal:
return move_horizontal
move_cross = check_cross(cells, free_cells)
if move_cross:
return move_cross
# vertical_incr = Operations.increment_vertical(cell)
# vertical_decr = Operations.decrement_vertical(cell)
# horizontal_incr = Operations.increment_horizontal(cell)
# horizontal_decr = Operations.decrement_horizontal(cell)
# cross_incr = Operations.increment_cross(cell)
# cross_decr = Operations.decrement_cross(cell)
#
# if vertical_incr in cells:
# if vertical_decr in free_cells:
# return vertical_decr
#
# if vertical_decr in cells:
# if vertical_incr in free_cells:
# return vertical_incr
#
# if horizontal_incr in cells:
# if horizontal_decr in free_cells:
# return horizontal_decr
#
# if horizontal_decr in cells:
# if horizontal_incr in free_cells:
# return horizontal_incr
#
# if cross_incr in cells:
# if cross_decr in free_cells:
# return cross_decr
#
# if cross_decr in cells:
# if cross_incr in free_cells:
# return cross_incr
return None | {
"repo_name": "cenkbircanoglu/tic-tac-toe",
"path": "game/algorithm/finisher.py",
"copies": "1",
"size": "3530",
"license": "mit",
"hash": 3342080256166419500,
"line_mean": 35.78125,
"line_max": 69,
"alpha_frac": 0.4801699717,
"autogenerated": false,
"ratio": 3.374760994263862,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9352189737893687,
"avg_score": 0.0005482456140350877,
"num_lines": 96
} |
__author__ = 'cephalopodblue'
import json
import os
class JsonSerializer:
@staticmethod
def release_json(release):
"""
Get a dictionary that we like & json also likes
"""
release_data = {"glossary_title": release.glossary_title, "item_code": release.item_code, \
"name": release.title, "artist": release.artist}
return json.dumps(release_data)
@staticmethod
def track_json(track):
track_data = {"track_num": track.track_num, "track_name": track.title, \
"item_code": track.item_code}
return json.dumps(track_data)
@staticmethod
def artist_json(artist):
artist_data = {"name": artist.name, "item_code": artist.item_code}
return json.dumps(artist_data)
class JsonLogger:
def __init__(self, output_dir):
self.releases = os.path.join(output_dir, "releases.json")
self.track_dir = os.path.join(output_dir, "tracks")
if not os.path.exists(self.track_dir):
os.makedirs(self.track_dir)
def log_release(self, release):
with open(self.releases, "a+") as f:
f.write(JsonSerializer.release_json(release))
f.write("\n")
def log_track(self, track):
track_file = os.path.join(self.track_dir, (track.release_id + ".json"))
with open(track_file, "a+") as f:
f.write(JsonSerializer.track_json(track))
f.write("\n") | {
"repo_name": "hidat/audio_pipeline",
"path": "audio_pipeline/serializers/JsonSerializer.py",
"copies": "1",
"size": "1525",
"license": "mit",
"hash": -7916651079056881000,
"line_mean": 31.4680851064,
"line_max": 99,
"alpha_frac": 0.5704918033,
"autogenerated": false,
"ratio": 3.7195121951219514,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9686613482974697,
"avg_score": 0.02067810308945061,
"num_lines": 47
} |
__author__ = 'cephalopodblue'
import musicbrainzngs as ngs
from . import Util
import time
RETRY = 5
class MBInfo:
default_server = ngs.hostname
def __init__(self, server=None, backup_server=None, useragent=("hidat_audio_pipeline", "0.1")):
if server is not None and server != self.default_server:
ngs.set_hostname(server)
self.backup_server = backup_server
ngs.set_useragent(useragent[0], useragent[1])
@classmethod
def set_mbhost(cls, server=None):
cls.default_server = server
@classmethod
def set_backup(cls, server=None):
cls.backup_server = server
def __do_mb_request(self, request, *args, **kwargs):
"""
Perform the actual MB request
:param request: musicbrainzngs method call to perform
:return:
"""
mb_meta = None
for i in range(RETRY):
try:
mb_meta = request(*args, **kwargs)
break
except ngs.ResponseError as e:
raise e
except ngs.NetworkError:
# can't reach the musicbrainz server - wait 10 seconds and try again
time.sleep(.2)
try:
mb_meta = request(*args, **kwargs)
except ngs.NetworkError as e:
# if we stil can't reach it, try the backup server (if there is one)
if self.backup_server:
try:
ngs.set_hostname(self.backup_server)
mb_meta = request(*args, **kwargs)
except ngs.NetworkError as e:
# propagate error up
time.sleep(.2)
else:
time.sleep(.2)
if not mb_meta:
try:
mb_meta = request(*args, **kwargs)
except ngs.ResponseError as e:
raise e
except ngs.NetworkError as e:
# can't reach the musicbrainz server - wait 10 seconds and try again
raise e
return mb_meta
def get_group_releases(self, release_group_id):
include = ["artist-credits", "recordings", "isrcs", "media", "release-groups", "labels"]
if Util.is_mbid(release_group_id):
mb_meta = self.__do_mb_request(ngs.browse_releases, release_group=release_group_id, includes=include)
if mb_meta:
return mb_meta['release-list']
#####
# == Get Release
# Retrieves a raw release from MusicBrainz using their API
#####
def get_release(self, release_id):
include=["artist-credits", "recordings", "isrcs", "media", "release-groups", "labels", "artists"]
mb_meta = None
if Util.is_mbid(release_id):
mb_meta = self.__do_mb_request(ngs.get_release_by_id, release_id, includes=include)
if mb_meta:
return mb_meta['release']
#####
# == Get artist
# Retrieves raw artist metadata from MusicBrainz using their API
#####
def get_artist(self, artist_id):
include=["aliases", "url-rels", "annotation", "artist-rels"]
if Util.is_mbid(artist_id):
mb_meta = self.__do_mb_request(ngs.get_artist_by_id, artist_id, includes=include)
if mb_meta:
return mb_meta['artist'] | {
"repo_name": "hidat/audio_pipeline",
"path": "audio_pipeline/util/MBInfo.py",
"copies": "1",
"size": "3437",
"license": "mit",
"hash": 6161031231740158000,
"line_mean": 34.0816326531,
"line_max": 113,
"alpha_frac": 0.5341867908,
"autogenerated": false,
"ratio": 4.043529411764706,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5077716202564706,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cephalopodblue'
import os
import argparse
import mutagen
import shutil
import csv
import hashlib
import uuid as UUID
import sys
import xml.etree.ElementTree as ET
import datetime
import MBInfo
import MetaProcessor
import DaletSerializer
import musicbrainzngs.musicbrainz as musicbrainz
import unicodedata
_file_types = set([".xml"])
def process_directory(source_dir, output_dir, glossary_list_file, input_release_meta, serializer):
glossary_ids = []
with open(glossary_list_file, 'r') as f:
for line in f:
line = unicodedata.normalize('NFKD', line).encode('ascii', 'ignore').decode()
glossary_ids.append(line.rstrip())
glossary_ids = set(glossary_ids)
path_start = len(source_dir) + 1
track_meta_dir = os.path.join(output_dir, 'track_meta')
if not os.path.exists(track_meta_dir):
os.makedirs(track_meta_dir)
artist_meta_dir = os.path.join(output_dir, 'artist_meta')
if not os.path.exists(artist_meta_dir):
os.makedirs(artist_meta_dir)
release_meta_dir = os.path.join(output_dir, 'release_meta')
if not os.path.exists(release_meta_dir):
os.makedirs(release_meta_dir)
fail_dir = os.path.join(output_dir, 'failed')
if not os.path.exists(fail_dir):
os.makedirs(fail_dir)
print("Lookup Fail: ", fail_dir)
log_dir = os.path.join(output_dir, 'session_logs')
if not os.path.exists(log_dir):
os.makedirs(log_dir)
print("Logs: ", log_dir)
# A bunch of counts for dumb housekeeping / sanity checks
list_total_count = len(glossary_ids)
list_release_count = 0
list_artist_count = 0
list_unknown_count = 0
unknown_artists = 0
unknown_releases = 0
total_releases = 0
total_artists = 0
release_ids = set([])
artist_ids = set([])
# set up current log file
date_time = datetime.datetime
ts = date_time.now()
unique_log_id = ts.strftime("%d-%m-%y-%H%M%S%f.txt")
# create standard log file
log_file_name = os.path.join(log_dir, "filewalker_log_" + unique_log_id)
# create log of failed glossary IDs
failed_log_name = os.path.join(fail_dir, "fail_log_" + unique_log_id)
# log of releases that need new metadata b/c they need to be associated with an artist
artist_release_log_name = os.path.join(log_dir, "releases_from_artists_log_" + unique_log_id)
# Make a 'log' that's just a copy of the glossary list
glossary_list_log_name = os.path.join(log_dir, "glossary_list_log_" + unique_log_id)
shutil.copy(glossary_list_file, glossary_list_log_name)
# open standard log file; we'll just open fail and artist log as necessary
log_file = open(log_file_name, 'ab')
if os.path.basename(source_dir) == '':
# probably have an extra / in the name; check back one
source_dir = os.path.dirname(source_dir)
if not os.path.basename(source_dir) == 'track_meta':
# If we're not in track_meta already, move in there
source_dir = os.path.join(source_dir, 'track_meta')
if not os.path.exists(source_dir):
print("No track_meta folder around here!", file=sys.stderr)
for root, dir, files in os.walk(source_dir):
if len(root) > path_start:
path = root[path_start:]
else:
path = ''
for src_name in files:
file_name = os.path.join(root, src_name)
ext = os.path.splitext(file_name)[1].lower()
if ext in _file_types:
# Get the MusicBrainz Release ID from the file
track_xml = ET.parse(file_name).getroot()[0]
release_id = track_xml.find('KEXPRelease').text
artist_id = track_xml.find('KEXPArtist').text
if release_id in glossary_ids:
# Keep track of how many glossaries were releases:
if release_id not in release_ids:
list_release_count += 1
# Put this GUID in the release id set so we know to get release XML for items
release_ids.add(release_id)
# copy this track metadata
target = os.path.join(track_meta_dir, src_name)
shutil.copy(file_name, target)
if artist_id in glossary_ids:
# Keep track of how many glossaries were artists:
if artist_id not in artist_ids:
list_artist_count += 1
# Put this GUID in the artist id set so we know to get artist XML for items
artist_ids.add(artist_id)
if release_id not in release_ids:
# make a note of releases re-metadataed b/c of artist in the artist_release_log
with open(artist_release_log_name, 'ab') as f:
log_text = "artist\t" + artist_id + "\trelease\t" + release_id + "\r\n"
f.write(log_text.encode("UTF-8"))
# Put release ID of track associated with this artist in release id set
# so we get new XML for releases associated with this artist
release_ids.add(release_id)
# copy this track metadata
target = os.path.join(track_meta_dir, src_name)
shutil.copy(file_name, target)
# Completed track XML processing; get release & artist metadata
glossary_ids = glossary_ids.difference(set(release_ids))
glossary_ids = glossary_ids.difference(set(artist_ids))
list_unknown_count = len(glossary_ids)
# All glossary ids in release_ids are definitely releases, so just do a standard serialization
# There should not be duplicates, and frankly if there are I'm not going to bother filtering them out right now.
print("\nBEGINNING RELEASE GLOSSARY PROCESSING")
for release_id in release_ids:
print("Processing release " + str(release_id))
try:
mb_release = MBInfo.get_release(release_id)
total_releases += 1
release = MetaProcessor.process_release(mb_release)
# save release meta
serializer.save_release(release, input_release_meta, release_meta_dir)
# save release to log
log_file.write(release["log_text"].encode("UTF-8"))
for label in release["labels"]:
if 'label' in label:
label_log = "label\t" + str(label['label']['id']) + "\t" + str(label['label']['name']) + "\r\n"
log_file.write(label_log.encode("UTF-8"))
except musicbrainz.ResponseError:
# if somehow a non-valid release MBID got in the release_ids dict
# move it back into the glossary_ids list to try at end:
System.out.println("MUSICBRAINZ RESPONSE ERROR ON RELEASE " + release_id)
glossary_ids.append(release_id)
# All glossary IDs in artist_ids are definitely artists. Standard artist meta serialization.
print("\nBEGINNING ARTIST GLOSSARY PROCESSING")
for artist_id in artist_ids:
try:
print("Processing artist " + str(artist_id))
mb_artist = MBInfo.get_artist(artist_id)
total_artists += 1
artist_members = []
if "artist-relation-list" in mb_artist:
for member in mb_artist["artist-relation-list"]:
member_id = member['artist']['id']
if member['type'] == 'member of band' and "direction" in member \
and member["direction"] == "backward":
artist_members.append(MBInfo.get_artist(member_id))
# add artist to log
log = mb_artist['log_text']
log_file.write(log.encode('UTF-8'))
for member in artist_members:
log = member['log_text']
log_file.write(log.encode('UTF-8'))
serializer.save_artist(mb_artist, artist_members, artist_meta_dir)
except musicbrainz.ResponseError:
# If somehow a nonvalid artist MBID got in the artist_ids dict
# move it back into the glossary_ids list to try at end:
System.out.println("MUSICBRAINZ RESPONSE ERROR ON ARTIST " + artist_id)
glossary_ids.append(artist_id)
# Hopefully all glossary IDs have been properly sorted, but if they haven't:
# First try w/ glossary ID as artist ID, then w/ glossary ID as release ID
# For release IDs, we'll just pretend that the disc number is 1 (for now)
print("\nPROCESSING UNKNOWN GLOSSARIES")
for glossary_id in glossary_ids:
try:
print("Processing " + str(glossary_id) + " as artist")
mb_artist = MBInfo.get_artist(glossary_id)
# successfully retrieved artist info, so this is an artist. increment counter.
unknown_artists += 1
total_artists += 1
artist_members = []
if "artist-relation-list" in mb_artist:
for member in mb_artist["artist-relation-list"]:
member_id = member['artist']['id']
if member['type'] == 'member of band' and "direction" in member \
and member["direction"] == "backward":
artist_members.append(MBInfo.get_artist(member_id))
# add artist to log
log = mb_artist['log_text']
log_file.write(log.encode('UTF-8'))
for member in artist_members:
log = member['log_text']
log_file.write(log.encode('UTF-8'))
serializer.save_artist(mb_artist, artist_members, artist_meta_dir)
except musicbrainz.ResponseError:
try:
# Glossary ID was not an artist ID, so we'll try again as release ID
print("Processing " + str(glossary_id) + " as release")
mb_release = MBInfo.get_release(glossary_id)
# successfully retrieved release info, so this is a release. increment counter.
unknown_releases += 1
total_releases += 1
release = MetaProcessor.process_release(mb_release)
# save release meta
serializer.save_release(release, input_release_meta, release_meta_dir)
# save release to log
log_file.write(release["log_text"].encode("UTF-8"))
for label in release["labels"]:
if 'label' in label:
label_log = "label\t" + str(label['label']['id']) + "\t" + str(label['label']['name']) + "\r\n"
log_file.write(label_log.encode("UTF-8"))
# Glossary ID was neither artist nor release ID; print error message and move ID to failed log
except musicbrainz.ResponseError:
print("ERROR: " + str(glossary_id) + " is not a valid artist or release MBID!")
with open(failed_log_name, 'a') as f:
f.write(glossary_id)
print("Total number of glossaries passed in: " + str(list_total_count))
print("Number of glossaries in list that are releases: " + str(list_release_count))
print("Number of glossaries in list that are artists: " + str(list_artist_count))
print("Number of glossaries in list that were unknown: " + str(list_unknown_count))
print("Number of unknown that are releases: " + str(unknown_releases))
print("Number of unknown that are artists: " + str(unknown_artists))
log_file.close()
def main():
"""
Crawls the given directory and makes a copy of all releases with MBIDs matching the passed list
"""
options = {"acq": "Recent Acquisitions", "recent acquisitions": "Recent Acquisitions", "electronic": "Electronic",
"ele": "Electronic", "exp": "Experimental", "experimental": "Experimental", "hip": "Hip Hop",
"hip hop": "Hip Hop", "jaz": "Jazz", "jazz": "Jazz", "liv": "Live on KEXP", "live on kexp": "Live on Kexp",
"loc": "Local", "local": "Local", "reg": "Reggae", "reggae": "Reggae", "roc": "Rock/Pop", "rock": "Rock/Pop",
"pop": "Rock/Pop", "rock/pop": "Rock/Pop", "roo": "Roots", "roots": "Roots",
"rot": "Rotation", "rotation": "Rotation", "sho": "Shows Around Town", "shows around town": "Shows Around Town",
"sou": "Soundtracks", "soundtracks": "Soundtracks", "wor": "World", "world": "World",
"heavy": "Heavy", "library": "Library", "light": "Light", "medium": "Medium", "r/n": "R/N"}
parser = argparse.ArgumentParser(description='Get metadata of a list of release & artist GUIDs, as well as all associated tracks and releases. \
To be used with track metadata XML files that are associated with the specified release and artist GUIDs.')
parser.add_argument('input_directory', help="Directory of previous batch.")
parser.add_argument('output_directory', help="Output metadata directory.")
parser.add_argument('glossary_list_file', help="File containing a list of glossary ids")
parser.add_argument('-c', '--category', type=str.casefold, choices=["recent acquisitions", "acq", "electronc", "ele", "experimental", "exp", "hip hop", "hip", "jaz", "jazz", "live on kexp", "liv", "local", "reggae", "reg", "rock", "pop", "rock/pop", "roc", "roots", "roo", "rotation", "rot", "shows around town", "sho", "soundtracks", "sou", "world", "wor"], help="Category or genre of releases being filewalked")
parser.add_argument('-r', '--rotation', type=str.casefold, choices=["heavy", "library", "light", "medium", "r/n"], help="Rotation workflow value")
args = parser.parse_args()
input_release_meta = {}
input_release_meta["category"] = options[args.category] if args.category != None else ""
input_release_meta["rotation"] = options[args.rotation] if args.rotation != None else ""
process_directory(args.input_directory, args.output_directory, args.glossary_list_file, input_release_meta, DaletSerializer)
main()
| {
"repo_name": "hidat/audio_pipeline",
"path": "audio_pipeline/file_walker/TrackXMLWalker.py",
"copies": "1",
"size": "14631",
"license": "mit",
"hash": -8080738823980865000,
"line_mean": 48.4290540541,
"line_max": 417,
"alpha_frac": 0.580001367,
"autogenerated": false,
"ratio": 3.8472258743097556,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4927227241309755,
"avg_score": null,
"num_lines": null
} |
__author__ = 'ceposta'
'''
BIG NOTE: We dont use this script yet...
it's experimental..
woudd like to get to use it soon...
'''
import sys, urllib, urllib2, json, re;
if len(sys.argv) < 4:
print "invalid parameters"
print "args: AppName VersionNumber OSEBrokerUrl OSEDomain"
SOURCE_APP_NAME = sys.argv[1]
VERSION_NUMBER = sys.argv[2]
OPENSHIFT_BROKER = sys.argv[3]
OPENSHIFT_DOMAIN = sys.argv[4]
print "app_name=%s version=%s broker=%s domain=%s" % (SOURCE_APP_NAME, VERSION_NUMBER, OPENSHIFT_BROKER, OPENSHIFT_DOMAIN)
# filter out chars OSE can't deal with
OPENSHIFT_APP_NAME = filter(str.isalnum,SOURCE_APP_NAME+VERSION_NUMBER)
OPENSHIFT_CARTRIDGE_FUSE = "fuse-6.1.1"
HEADER_ACCEPT = "Accept: application/json"
OPENSHIFT_API="/broker/rest/"
OPENSHIFT_USER = "christian"
OPENSHIFT_PASSWORD = "christian"
OPENSHIFT_GEAR_PROFILE = "xpaas"
def encodeUserData(username,passwd):
return "Basic %s" % (("%s:%s" % (username,passwd)).encode('base64').rstrip())
def get_fuse_attr(text):
items = ['Console User:', 'Console Password:', 'Zookeeper URL:', 'Zookeeper Password:']
rc = []
for t in items:
m = re.search('(?<='+t+')[^\n]+', text)
captured = m.group(0).lstrip()
rc.append(captured)
return rc
def check_app_exists():
try:
url = "{0}{1}domain/{2}/application/{3}?nolinks=true".format(OPENSHIFT_BROKER, OPENSHIFT_API, OPENSHIFT_DOMAIN, OPENSHIFT_APP_NAME)
req = urllib2.Request(url)
print "check app exists url " + url
req.add_header('Accept', HEADER_ACCEPT)
req.add_header('Authorization', encodeUserData(OPENSHIFT_USER, OPENSHIFT_PASSWORD))
res = urllib2.urlopen(req)
print "response " + res.read()
result = json.loads(res.read())
if "ok" == result["status"]:
return True
else:
return False
except :
return False
def create_app():
try:
dict = (
("name",OPENSHIFT_APP_NAME),
("gear_size", OPENSHIFT_GEAR_PROFILE),
("cartridges[]",OPENSHIFT_CARTRIDGE_FUSE)
)
dict_encode = urllib.urlencode(dict)
url = "{0}{1}domain/{2}/applications".format(OPENSHIFT_BROKER, OPENSHIFT_API, OPENSHIFT_DOMAIN)
print "URL to create app: " + url
req = urllib2.Request(url)
req.add_header('Accept', HEADER_ACCEPT)
req.add_header('Authorization', encodeUserData(OPENSHIFT_USER, OPENSHIFT_PASSWORD))
print "Please wait..."
res = urllib2.urlopen(req, dict_encode)
result = json.loads(res.read())
fuse_text = result["messages"][2]["text"]
print fuse_text
print '("{0}" "{1}" "{2}" "{3}" "{4}" "{5}" "{6}")'.format("0", result["data"]["app_url"], result["data"]["ssh_url"],
* get_fuse_attr(fuse_text))
except urllib2.URLError as e:
print e
result = json.loads(e.read())
print '("{0}" "{1}")'.format("1", result["messages"][0]["text"])
except RuntimeError as e:
print e
if __name__ == "__main__":
if check_app_exists():
print "This OSE app already exists!!"
else:
print "Creating Application..."
create_app() | {
"repo_name": "finiteloopme/cd-jboss-fuse",
"path": "ose-scripts/create_ose.py",
"copies": "1",
"size": "3264",
"license": "apache-2.0",
"hash": -2048429047051906800,
"line_mean": 29.8018867925,
"line_max": 139,
"alpha_frac": 0.5968137255,
"autogenerated": false,
"ratio": 3.392931392931393,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4489745118431393,
"avg_score": null,
"num_lines": null
} |
__author__ = 'ceposta'
#
#
# Example how to call this:
# $ python check_app_exists.py https://broker.hosts.pocteam.com /broker/rest/ dev christian christian fuse10 fusesource-fuse-1.0.0
# expects these params:
# 1 -- OSE broker
# 2 -- path to rest API, eg, /broker/rest/ <-- note the trailing slash
# 3 -- domain
# 4 -- user
# 5 -- password
# 6 -- app name
# 7 -- cartridge to use
# Returns
# RESULT[0] = Status Code 0
#
# Application Does Exist
# RESULT[0] = Status Code 2
# RESULT[1] = Application URL
# RESULT[2] = Git URL of Application
#
# Error Occurred
# RESULT[0] = Status Code 1
# RESULT[1] = Error Message
import sys, urllib2, json;
def encodeUserData(username,passwd):
return "Basic %s" % (("%s:%s" % (username,passwd)).encode('base64').rstrip())
try:
url = "{0}{1}domain/{2}/application/{3}?nolinks=true".format(sys.argv[1],sys.argv[2],sys.argv[3],sys.argv[6])
req = urllib2.Request(url)
req.add_header('Accept','application/json')
req.add_header('Authorization',encodeUserData(sys.argv[4],sys.argv[5]))
res = urllib2.urlopen(req)
result = json.loads(res.read())
if "ok" == result["status"]:
print '("{0}" "{1}" "{2}")'.format("2", result["data"]["app_url"], result["data"]["git_url"])
else:
print "1"
except urllib2.URLError, e:
result = json.loads(e.read())
if "not_found" == result["status"]:
print ("0")
else:
print '("{0}" "{1}")'.format("1", result["messages"][0]["text"])
except:
print ("1")
| {
"repo_name": "finiteloopme/cd-jboss-fuse",
"path": "ose-scripts/check_app_exists.py",
"copies": "1",
"size": "1499",
"license": "apache-2.0",
"hash": -9130067839594099000,
"line_mean": 28.98,
"line_max": 130,
"alpha_frac": 0.6190793863,
"autogenerated": false,
"ratio": 2.9624505928853755,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40815299791853754,
"avg_score": null,
"num_lines": null
} |
__author__ = 'ceposta'
#
# python create_new_app.py https://broker.hosts.pocteam.com /broker/rest/ dev christian christian fuse10 fuse-1.0.0
# expects these params:
# 1 -- OSE broker
# 2 -- path to rest API, eg, /broker/rest/ <-- note the trailing slash
# 3 -- domain
# 4 -- user
# 5 -- password
# 6 -- app name
# 7 -- cartridge to use
# Application Created Successfully
# RESULT[0] = Status Code 0
# RESULT[1] = Application URL
# RESULT[2] = SSH url
# RESULT[3] = Console User Name
# RESULT[4] = Console Password
# RESULT[5] = ZK URL
# RESULT[6] = ZK Password
#
# Error Occurred
# RESULT[0] = Status Code 1
# RESULT[1] = Error Message
import sys, urllib, urllib2, json, re;
def encodeUserData(username,passwd):
return "Basic %s" % (("%s:%s" % (username,passwd)).encode('base64').rstrip())
def get_fuse_attr(text):
items = ['Console User:', 'Console Password:', 'Zookeeper URL:', 'Zookeeper Password:']
rc = []
for t in items:
m = re.search('(?<='+t+')[^\n]+', text)
captured = m.group(0).lstrip()
rc.append(captured)
return rc
try:
dict = (("name",sys.argv[6]),("gear_size", "xpaas"),("cartridges[][name]",sys.argv[7]))
dict_encode = urllib.urlencode(dict)
url = "{0}{1}domain/{2}/applications".format(sys.argv[1],sys.argv[2],sys.argv[3])
req = urllib2.Request(url)
req.add_header('Accept','application/json')
req.add_header('Authorization',encodeUserData(sys.argv[4],sys.argv[5]))
res = urllib2.urlopen(req, dict_encode)
result = json.loads(res.read())
fuse_text = result["messages"][2]["text"]
print '("{0}" "{1}" "{2}" "{3}" "{4}" "{5}" "{6}")'.format("0", result["data"]["app_url"], result["data"]["ssh_url"],
* get_fuse_attr(fuse_text))
except urllib2.URLError, e:
result = json.loads(e.read())
print '("{0}" "{1}")'.format("1", result["messages"][0]["text"])
except RuntimeError, e:
print ("1") | {
"repo_name": "finiteloopme/cd-jboss-fuse",
"path": "ose-scripts/create_new_app.py",
"copies": "1",
"size": "1966",
"license": "apache-2.0",
"hash": -8547086767828930000,
"line_mean": 32.9137931034,
"line_max": 121,
"alpha_frac": 0.5981688708,
"autogenerated": false,
"ratio": 3.076682316118936,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9115424501504941,
"avg_score": 0.011885337082799064,
"num_lines": 58
} |
__author__ = 'cerias'
from bottle import Bottle, ServerAdapter
# copied from bottle. Only changes are to import ssl and wrap the socket
class SSLWSGIRefServer(ServerAdapter):
def run(self, handler):
from wsgiref.simple_server import make_server, WSGIRequestHandler
import ssl
if self.quiet:
class QuietHandler(WSGIRequestHandler):
def log_request(*args, **kw): pass
self.options['handler_class'] = QuietHandler
srv = make_server(self.host, self.port, handler, **self.options)
srv.socket = ssl.wrap_socket (
srv.socket,
certfile='ssl/server.pem', # path to certificate
server_side=True)
srv.serve_forever()
class WebServer:
def __init__(self, host, port):
self._host = host
self._port = port
self._app = Bottle()
self._route()
def _route(self):
self._app.route('/', method='GET', callback=self._index)
def start(self):
srv = SSLWSGIRefServer(host=self._host, port=self._port)
self._app.run(server=srv)
def _index(self):
return 'Welcome'
| {
"repo_name": "cerias/ptMonitor",
"path": "webserver.py",
"copies": "1",
"size": "1144",
"license": "apache-2.0",
"hash": 4460558262590159000,
"line_mean": 29.1052631579,
"line_max": 73,
"alpha_frac": 0.6092657343,
"autogenerated": false,
"ratio": 3.8133333333333335,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9749794323542448,
"avg_score": 0.03456094881817713,
"num_lines": 38
} |
__author__ = 'cerias'
from logger import log
from subprocess import call
from ConfigManagement import ConfigPlugin
class manager:
def __init__(self):
self._c = ConfigPlugin("tomcat")
self._url = "http://{}:{}@127.0.0.1/manager/text/".format(self._c.getVar("auth","username"),self._c.getVar("auth","password"))
pass
def deploy(self):
url="{}deploy?path={}&update=true&war=file:{}".format(self._url,self._c.getVar("default","warName"),self._c.getVar("default","warName"))
call(["curl",url])
pass
def undeploy(self):
url="{}undeploy?path={}".format(self._url,self._c.getVar("default","warName"))
call(["curl",url])
pass
def start(self):
url="{}undeploy?path={}".format(self._url,self._c.getVar("default","warName"))
pass
def stop(self):
url="{}undeploy?path={}".format(self._url,self._c.getVar("default","warName"))
pass
url="http://console:console@127.0.0.1:8080/manager/text/deploy?path=/test&update=true&war=file:/home/cerias/IdeaProjects/testProjekt/testProjekt/test.war"
#
# response = urllib2.urlopen(url)
# dat = response.read()
# response.close()
# logging.debug(dat)
| {
"repo_name": "cerias/ptMonitor",
"path": "plugins/tomcat.py",
"copies": "1",
"size": "1220",
"license": "apache-2.0",
"hash": -3694108177011415000,
"line_mean": 22.4615384615,
"line_max": 154,
"alpha_frac": 0.6196721311,
"autogenerated": false,
"ratio": 3.2620320855614975,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9328456542272276,
"avg_score": 0.01064953487784411,
"num_lines": 52
} |
__author__ = 'cfiloteo'
from django.dispatch import receiver
from django_cas_ng.signals import cas_user_authenticated
from home import models as hmod
###########################################################
### Signal handler for when users authenticate via CAS
@receiver(cas_user_authenticated)
def cas_authentication_handler(sender, **kwargs):
user = kwargs['user']
attributes = kwargs['attributes']
# fill out the user account with info from BYU
for fieldname, attrname in [
( 'first_name', 'preferredFirstName' ),
( 'last_name' , 'preferredSurname' ),
( 'email' , 'emailAddress' ),
( 'fullname' , 'fullName' ),
]:
if attributes.get(attrname):
setattr(user, fieldname, attributes.get(attrname))
else:
setattr(user, fieldname, '')
# byu status logic
status_list = []
for attrname in [
'activeParttimeEmployee',
'activeFulltimeEmployee',
'activeFulltimeInstructor',
'inactiveFulltimeInstructor',
'activeParttimeNonBYUEmployee',
'inactiveParttimeNonBYUEmployee',
'activeEligibletoRegisterStudent',
'inactiveFulltimeNonBYUEmployee',
'inactiveParttimeInstructor',
'inactiveParttimeEmployee',
'activeFulltimeNonBYUEmployee',
'inactiveFulltimeEmployee',
'activeParttimeInstructor',
'alumni',
]:
if attributes.get(attrname) == 'true':
status_list.append(attrname)
user.byu_status = ','.join(status_list)
# save the user
user.save()
| {
"repo_name": "AIS-BYU/ais-site",
"path": "website/home/__init__.py",
"copies": "1",
"size": "1594",
"license": "mit",
"hash": -8193064842254987000,
"line_mean": 31.5306122449,
"line_max": 62,
"alpha_frac": 0.6141781681,
"autogenerated": false,
"ratio": 4.1947368421052635,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5308915010205263,
"avg_score": null,
"num_lines": null
} |
__author__ = 'CFPB Labs'
import time
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from nose.tools import *
class TransitSubsidyApp():
"""
Abstration of the Transit Subsidy application. This extends the
WebDriver Page model pattern (http://code.google.com/p/selenium/wiki/PageObjects)
and applies to the application as a whole rather than a single page state.
@note_to_self: one or more page objects could comprise a application object, which,
more or less, could serve as a testing facade.
"""
def __init__(self,driver,base_url):
self.driver = driver
self.base_url = base_url
def reset(self):
self.driver.find_element_by_link_text('Reset Form').click()
def login(self, username="ted", password="ted" ):
self.driver.get( self.base_url + "/login/")
eq_("Your Intranet >", self.driver.title)
self.driver.find_element_by_id("id_username").clear()
self.driver.find_element_by_id("id_username").send_keys(username)
self.driver.find_element_by_id("id_password").clear()
self.driver.find_element_by_id("id_password").send_keys(password)
self.driver.find_element_by_id("btn_login").click()
eq_("Your Intranet > Transit Subsidy Request", self.driver.title)
def logout(self):
driver.get(base_url + "/logout/")
def commute_from( self, street='123 Main St', city='Anytown', state="VA", zip="62312" ):
self.driver.find_element_by_id("id_origin_street").clear()
self.driver.find_element_by_id("id_origin_street").send_keys(street)
self.driver.find_element_by_id("id_origin_city").clear()
self.driver.find_element_by_id("id_origin_city").send_keys(city)
self.driver.find_element_by_id("id_origin_state").clear()
self.driver.find_element_by_id("id_origin_state").send_keys(state)
self.driver.find_element_by_id("id_origin_zip").clear()
self.driver.find_element_by_id("id_origin_zip").send_keys(zip)
def commute_to(self, destination_id=2):
self.driver.find_element_by_id("id_destination").find_elements_by_tag_name('option')[destination_id].click()
def add_segment(self, segment_id,mode_id,amount,add_another=False):
self.driver.find_element_by_id("segment-type_%s" % segment_id).find_elements_by_tag_name('option')[mode_id].click()
self.driver.find_element_by_id("segment-amount_%s" % segment_id).clear()
self.driver.find_element_by_id("segment-amount_%s" % segment_id).send_keys(amount)
if add_another: self.driver.find_element_by_id("add_%s" % segment_id).click()
def add_other_segment(self, segment_id, other_text, amount, add_another=False):
self.driver.find_element_by_id("segment-type_%s" % segment_id).find_elements_by_tag_name('option')[17].click()
time.sleep(1)
self.driver.find_element_by_id("segment-other_%s" % segment_id).send_keys(other_text)
self.driver.find_element_by_id("segment-amount_%s" % segment_id).clear()
self.driver.find_element_by_id("segment-amount_%s" % segment_id).send_keys(amount)
if add_another: self.driver.find_element_by_id("add_%s" % segment_id).click()
def remove_segment(self,segment_id):
self.driver.find_element_by_id('rm_%s' % segment_id).click()
def click_add(self):
self.driver.find_element_by_id('add_1').click()
def select_workdays(self, id=2, other=None):
self.driver.find_element_by_xpath("(//input[@id='id_work_sked'])[%s]" % id).click()
if id==4:
self.driver.find_element_by_id('id_number_of_workdays').clear()
self.driver.find_element_by_id('id_number_of_workdays').send_keys(other)
def view_smartriphelp(self):
self.driver.find_element_by_id("id_help_smartrip").click()
#Keys.ESCAPE should work, too
self.driver.find_element_by_id("cboxClose").click()
def add_smartrip(self, num='00020 0001 5644 364 6'):
self.driver.find_element_by_id("id_dc_wmta_smartrip_id").clear()
self.driver.find_element_by_id("id_dc_wmta_smartrip_id").send_keys(num)
def enroll(self):
time.sleep(.5)
self.driver.find_element_by_id("btn_enroll_smartrip").click()
time.sleep(1)
def sign(self, last_four_ssn='1234', signature='Mick Jagger'):
time.sleep(.5)
self.driver.find_element_by_id("id_last_four_ssn").send_keys(last_four_ssn)
self.driver.find_element_by_id("id_signature").send_keys(signature)
self.driver.find_element_by_id("btn_agree").click()
time.sleep(.5)
eq_("Your Intranet > Transit Subsidy Confirmation", self.driver.title)
def dont_sign(self):
self.driver.find_element_by_id("btn_no_agree").click()
eq_("Your Intranet > Transit Subsidy Request", self.driver.title)
def withdraw_enrollment(self):
# Running out of time this morning. This aint workin!
# self.driver.find_element_by_link_text('Cancel my enrollment.').click()
#test no agree (for grins)
# time.sleep(.5)
# self.driver.find_element_by_id("btn_withdraw_no_agree").click()
#Selenium thowing Element is not clickable at point (558, 165). Other element would receive the click: <div id="cboxOverlay" style="cursor: pointer; opacity: 0.22499999403953552; "></div>
# self.driver.find_element_by_id('id_withdrawl_dialog').send_keys(Keys.ESCAPE) #Just hit Escape instead
#In theaory, this should work, too: self.driver.find_element_by_id("cboxClose").click()
#OK - now let'
self.driver.find_element_by_partial_link_text('Cancel my enrollment').click()
time.sleep(.5)
self.driver.find_element_by_id("btn_withdraw_agree").click()
eq_("Your Intranet > Transit Subsidy Withdrawl Confirmation", self.driver.title)
| {
"repo_name": "mjjavaid/cfpb-transit_subsidy",
"path": "tests/selenium/TransitSubsidyApp.py",
"copies": "2",
"size": "6299",
"license": "cc0-1.0",
"hash": 5579742521086228000,
"line_mean": 43.048951049,
"line_max": 195,
"alpha_frac": 0.6543895856,
"autogenerated": false,
"ratio": 3.3222573839662446,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4976646969566245,
"avg_score": null,
"num_lines": null
} |
__author__ = "CFPBLabs"
"""
Tests the TransitSubsidyApp which abstract the functionality of the actual application.
"""
from base_test import *
#---------------------- Fixture ----------------------#
# Assumes tests will be run on the same server the app is running on.
# Obviously, this will have to be changed if testing a remote instance.
base_url = "http://localhost:8000"
def setup_module(module):
global driver , transit , base_url
logger.info('setup_module: %s' % module.__name__)
driver = new_driver()
transit = TransitSubsidyApp(driver,base_url)
def teardown_module(module):
global driver
logger.info('teardown_module %s' % module.__name__)
driver.quit()
#------------------------------------------------------#
def first():
time.sleep(1)
def last():
driver.get(base_url + "/logout")
@with_setup(first,last)
def test_that_patti_smith_withdraws_enrollment():
transit.login('patti','patti')
transit.withdraw_enrollment()
#Ted (who does not have a claim) tries to register without entering any fields
@with_setup(first,last)
def test_form_validation():
transit.login('ted','ted')
driver.get(base_url + '/transit')
driver.find_element_by_id('btn_enroll_smartrip').click()
ids = [ 'id_origin_zip', 'segment-amount_1', 'work_sked', 'id_number_of_workdays', 'id_amount']
def valididate_messages(id):
e = driver.find_element_by_css_selector('em[for="%s"]' % id)
expected = 'error' # Will be "success" when validation passes
actual = e.get_attribute('class')
eq_( expected, actual, "Validation error should be present for : %s" % id)
for id in ids:
yield valididate_messages , id
#Patti Smith registers Or Updates
@with_setup(first,last)
def test_end2end_PattiSmith_OnTheBus():
transit.login('patti','patti')
transit.commute_from()
transit.commute_to()
try:
transit.add_segment( segment_id='1', mode_id=2, amount='1.5', add_another=True )
transit.add_segment( segment_id='2', mode_id=15, amount='2.25', add_another=False )
#If returning user, the first empty segment (id=1) is removed
except NoSuchElementException as e:
transit.add_segment( segment_id='2', mode_id=2, amount='1.50', add_another=False )
transit.add_segment( segment_id='3', mode_id=15, amount='2.25', add_another=False )
transit.select_workdays(1)
transit.view_smartriphelp()
zzz()
transit.add_smartrip()
transit.enroll()
transit.sign('1234','Patti Smith')
@with_setup(first,last)
def test_end2end_TedNugent_Cancels_at_last_minute():
transit.login('ted','ted')
transit.commute_from("123 Sunset Ave", "Hollywood", "CA", "90029")
transit.commute_to(7)
transit.add_other_segment("1", "Limo", "400", False)
transit.select_workdays(id=4, other='1')
transit.enroll()
transit.dont_sign()
@with_setup(first,last)
def test_add_2_segments_eq_6_bucks():
transit.login('patti','patti')
transit.add_segment( segment_id='2', mode_id=1, amount='4.25', add_another=False )
transit.add_segment( segment_id='3', mode_id=5, amount='1.75', add_another=True )
transit.remove_segment( segment_id='3')
transit.add_segment( segment_id='4', mode_id=3, amount='1.75', add_another=False )
_total = driver.find_element_by_id('totals').get_attribute('value')
eq_( '6.00', _total)
@with_setup(first,last)
def test_validate_smartrip_segments():
transit.login()
# return
sel = driver.find_element_by_id('segment-type_1')
options = sel.find_elements_by_tag_name('option')
def exercise_option(i):
id = str(i)
transit.add_segment( segment_id=id, mode_id=i, amount='1', add_another=True )
for i in range(1,len(options)):
# logger.info( 'i=%s' % i )
yield exercise_option, i
total = driver.find_element_by_id('totals').get_attribute('value')
expected = len(options)-1
eq_( str(expected) + '.00', total )
@with_setup(first,last)
def test_iterate_Smartrip_segments():
transit.login()
#Potentially brittle: IDs for Art,Dash,Metro Bus,Metro
smartrips = [2,6,15,16]
def exercise_option(id):
transit.add_segment( segment_id='1', mode_id=id, amount='1', add_another=False )
transit.enroll()
is_textpresent(driver,'Enter your Smartrip card number')
for id in smartrips:
transit.reset()
yield exercise_option, id
@with_setup(first,last)
def test_Smartrip_length():
transit.login()
transit.add_segment( segment_id='1', mode_id=2, amount='1', add_another=False )
transit.add_smartrip('12345678')
transit.enroll()
e = driver.find_element_by_css_selector('em[for="id_dc_wmta_smartrip_id"]')
expected = 'error'
actual = e.get_attribute('class')
eq_( expected, actual, "Smartrip error should be present.")
@with_setup(first,last)
def test_add_remove_many_segments():
transit.login()
transit.add_segment( segment_id='1', mode_id=1, amount='1', add_another=True )
transit.add_segment( segment_id='2', mode_id=2, amount='2', add_another=True )
transit.add_segment( segment_id='3', mode_id=3, amount='3', add_another=True )
transit.add_segment( segment_id='4', mode_id=4, amount='4', add_another=False )
transit.remove_segment(2)
transit.click_add() #5
transit.add_segment( segment_id='5', mode_id=5, amount='5', add_another=False )
transit.remove_segment(4)
transit.click_add() #6
transit.add_segment( segment_id='6', mode_id=6, amount='6', add_another=False )
transit.remove_segment(6)
transit.remove_segment(5)
transit.remove_segment(3)
transit.reset()
| {
"repo_name": "mjjavaid/cfpb-transit_subsidy",
"path": "tests/selenium/transit_subsidy_ui_tests.py",
"copies": "2",
"size": "5777",
"license": "cc0-1.0",
"hash": 7163625790720117000,
"line_mean": 28.7783505155,
"line_max": 99,
"alpha_frac": 0.6364895274,
"autogenerated": false,
"ratio": 3.1210156672069154,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4757505194606915,
"avg_score": null,
"num_lines": null
} |
__author__ = 'CFPB Labs'
__version__ = '0.9.1'
#-------------------------------------------------------------------------------
from django.db import models
from django.forms.widgets import Select,HiddenInput,Textarea
from django.forms import ModelForm
from django import forms
from django.contrib.auth.models import User
import re
MAX_CLAIM = 125
class OfficeLocation (models.Model):
"""
Represents a CFPB office. Details with respect to room number or
office number, floor, etc., are intentionally omitted
@author: CFPB Labs
@date: 09/28/2011
@contact: bill
"""
id = models.CharField(max_length=12,primary_key=True)
street = models.CharField(max_length=56)
suite = models.CharField(max_length=56, blank=True, null=True)
city = models.CharField(max_length=56)
state = models.CharField(max_length=2)
zip = models.CharField(max_length=10)
def __unicode__(self):
return '%s, %s, %s %s' % (self.street, self.city, self.state, self.zip)
class Mode(models.Model):
"""Represents a transit subsidy benefit abstraction for a mode of transportation.
@organization: CFPB Labs
@date: 11/18/2011
@author: miklane
"""
class Meta:
ordering = ["short_name"]
long_name = models.CharField(max_length=100)
short_name = models.CharField(max_length=56)
url_link = models.CharField(max_length=1000, null=True, blank=True)
"""Example:Smartrip,Debit Card,Metro Check"""
distribution_method = models.CharField(max_length=100)
locality = models.CharField(max_length=100)
def __unicode__(self):
return u'%s, %s, %s, %s' % (self.long_name, self.short_name, self.locality, self.distribution_method)
class TransitSubsidyModes(models.Model):
# manager = models.Manager() #renaming as 'objects' seems idiomatic to querying
transit_subsidy = models.ForeignKey('TransitSubsidy')
mode = models.ForeignKey('Mode')
cost = models.DecimalField(decimal_places=2,max_digits=5)
other_mode = models.CharField(max_length=64, null=True, blank=True)
timestamp = models.DateTimeField(auto_now=True)
def __unicode__(self):
return u'%s, %s, %s, %s' % (self.transit_subsidy, self.mode, self.cost, self.timestamp)
class TransitSubsidy(models.Model):
"""
Represents a transit subsidy benefit abstraction for an individual/employee.
@organization: CFPB Labs
@date: 09/28/2011
@author: bill
"""
modes = models.ManyToManyField(Mode, through='TransitSubsidyModes')
user = models.ForeignKey(User,primary_key=True)
destination = models.ForeignKey( OfficeLocation )
#one time only! If not exists assume new. Do once.
date_enrolled = models.DateTimeField()
date_withdrawn = models.DateTimeField(null=True,blank=True)
timestamp = models.DateTimeField(auto_now=True)
last_four_ssn = models.CharField(max_length=56)
signature = models.CharField(max_length=56)
origin_street = models.CharField(max_length=56)
origin_city = models.CharField(max_length=56)
origin_state = models.CharField(max_length=2)
origin_zip = models.CharField(max_length=5)
number_of_workdays = models.SmallIntegerField() #Choice: 20,16,# 18, or other
daily_roundtrip_cost = models.DecimalField(decimal_places=2,max_digits=5)
daily_parking_cost = models.DecimalField(decimal_places=2, max_digits=5, null=True, blank=True)
total_commute_cost = models.DecimalField(decimal_places=2,max_digits=5)
""" The amount being distributed """
amount = models.DecimalField(decimal_places=2,max_digits=5)
dc_wmta_smartrip_id = models.CharField( max_length=56, blank=True, null=True)
approved_on = models.DateTimeField(null=True,blank=True)
approved_by = models.CharField(max_length=56,null=True,blank=True)
def __unicode__(self):
return u'%s %s <%s>' % (self.user.last_name, self.user.first_name, self.user.username)
class TransitSubsidyForm(ModelForm):
"""
A ModelForm based on the TransitSubsidy model
@author: CFPB Labs
"""
def clean(self):
data = self.cleaned_data
#print data
return data
def clean_last_four_ssn(self):
"""The last four of an SSN (not encrypted)"""
ssn = self.cleaned_data['last_four_ssn']
pattern = re.compile('[0-9]{4}')
if ( pattern.match(ssn) == None ):
raise forms.ValidationError ('This must be exactly four digits.')
# return encrypt(_KEY,ssn)
return ssn
def clean_number_of_workdays(self):
"""Constraint 1-31 days"""
days = self.cleaned_data['number_of_workdays']
if days > 31:
raise forms.ValidationError ('You can''t work more than 31 days in a month.')
if days < 1:
raise forms.ValidationError ('Looks like you entered 0 less for the days per month you work. Really?')
return days
def clean_amount(self):
""" $125 max"""
amt = self.cleaned_data['amount']
if amt > MAX_CLAIM:
raise forms.ValidationError('The most you can request is $%s.' % MAX_CLAIM )
if amt < 1:
raise forms.ValidationError('Really?! That looks to be 0 or less.')
return amt
class Meta:
model = TransitSubsidy
#'person_name',
fields = ( 'last_four_ssn', 'origin_street', 'origin_city', 'origin_state', 'origin_zip',
'destination', 'number_of_workdays', 'dc_wmta_smartrip_id',
'amount', 'total_commute_cost', 'daily_parking_cost', 'daily_roundtrip_cost', 'signature' )
widgets = {
'id': HiddenInput(),
'route_description': Textarea(attrs={'rows': 2, 'cols': 53,}),
}
| {
"repo_name": "cfpb/transit_subsidy",
"path": "transit_subsidy/models.py",
"copies": "2",
"size": "6006",
"license": "cc0-1.0",
"hash": 537117629374898200,
"line_mean": 32.125,
"line_max": 114,
"alpha_frac": 0.6192141192,
"autogenerated": false,
"ratio": 3.6801470588235294,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.529936117802353,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cgomezfandino@gmail.com'
import datetime as dt
import v20
from configparser import ConfigParser
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# Create an object config
config = ConfigParser()
# Read the config
config.read("../API_Connection_Oanda/pyalgo.cfg")
class MRBT_Backtester(object):
''' Momentum backtesting strategy:
Attributes
==========
symbol: str
Oanda symbol with which to work with
start: str
start date for data retrieval
end: str
end date for data retrieval
amount: int, float
amount to be invested at the beginning
tc: float
proportional transaction costs (e.g. 0.3% = 0.003) per trade
sufix: str
timeFrame:
Candle TimeFrame
Methods
=========
get_data:
retrieves and prepares the base data set
run_strategy:
runs the backtest for the momentum-based strategy
plot_strategy:
plots the performance of the strategy compared to the symbol
'''
def __init__(self, symbol, start, end, amount = 10000, tc = 0.000, sufix = '.000000000Z', timeFrame = 'H4', price = 'A'):
'''
symbol:
SYmbol
:param start:
:param end:
:param amount:
:param tc:
:param sufix:
:param timeFrame:
:param price:
'''
self.symbol = symbol # EUR_USD
# self.start = start
# self.end = end
self.amount = amount
self.tc = tc
# self.lvrage = lvrage
self.suffix = sufix
self.timeFrame = timeFrame
self.price = price
self.start = dt.datetime.combine(pd.to_datetime(start), dt.time(9,00))
self.end = dt.datetime.combine(pd.to_datetime(end), dt.time(16,00))
# This string suffix is needed to conform to the Oanda API requirements regarding start and end times.
self.fromTime = self.start.isoformat('T') + self.suffix
self.toTime = self.end.isoformat('T') + self.suffix
self.results = None
self.colors = sns.hls_palette(14)
self.toplot_c = ['creturns_c']
self.toplot_p = ['creturns_p']
self.toplot_hist = ['returns']
self.ctx = v20.Context(
'api-fxpractice.oanda.com',
443,
True,
application='sample_code',
token=config['oanda_v20']['access_token'],
datetime_format='RFC3339')
self.get_data()
def get_data(self):
res = self.ctx.instrument.candles(
instrument= self.symbol,
fromTime= self.fromTime,
toTime= self.toTime,
granularity= self.timeFrame,
price= self.price)
# data.keys()
raw = res.get('candles')
raw = [cs.dict() for cs in raw]
for cs in raw:
cs.update(cs['ask'])
del cs['ask']
data = pd.DataFrame(raw)
data['time'] = pd.to_datetime(data['time'], unit='ns')
data = data.set_index('time')
data.index = pd.DatetimeIndex(data.index)
# print data.info()
cols = ['c', 'l', 'h', 'o']
data[cols] = data[cols].astype('float64')
data.rename(columns={'c': 'CloseAsk', 'l': 'LowAsk',
'h': 'HighAsk', 'o': 'OpenAsk'}, inplace=True)
data['returns'] = np.log(data['CloseAsk'] / data['CloseAsk'].shift(1))
self.asset = data
def run_strategy(self, SMA, threshold_std = 1, roll = 100, r = 0.0225, halfKC = True):
'''
This function run a momentum backtest.
:param momentum:
================
Number of lags you want to to test for momuntum strategy
:return:
================
The backtest returns the following values:
aperf_c: Absolute Strategy performance in Cash
aperf_p: Absolute Strategy performance in Percentage
operf_c: Out-/underperformance Of strategy in Cash
operf_p: Out-/underperformance Of strategy in Percentage
mdd_c: Maximum Drawdown in Cash
mdd_p:Maximum Drawdown in Percentage
'''
asset = self.asset.copy()
self.SMA = SMA
## Kelly Criterion
if halfKC is True:
asset['meanRoll'] = asset['returns'].rolling(roll).mean() * 6 * 252
asset['stdRoll'] = asset['returns'].rolling(roll).std() * 6 * 252 ** 0.5
asset['KC'] = np.where( ((asset['meanRoll'] - r) / asset['stdRoll']**2)/2 < 1, 1, ((asset['meanRoll'] - r) / asset['stdRoll']**2)/2 )
asset['KC'].fillna(1,inplace =True)
else:
asset['meanRoll'] = asset['returns'].rolling(roll).mean() * 6 * 252
asset['stdRoll'] = asset['returns'].rolling(roll).std() * 6 * 252 ** 0.5
asset['KC'] = np.where( ((asset['meanRoll'] - r) / asset['stdRoll']**2) < 1, 1, ((asset['meanRoll'] - r) / asset['stdRoll']**2) )
asset['KC'].fillna(1,inplace =True)
# self.str_rtrn = ['returns']
# self.drawdown = []
#self.cumrent = []
# Cumulative returns without laverage
# In Cash
asset['creturns_c'] = self.amount * asset['returns'].cumsum().apply(np.exp)
# In Percentage
asset['creturns_p'] = asset['returns'].cumsum().apply(np.exp)
# Cumulative returns with laverage
asset['lreturns'] = asset['returns'] * asset['KC'] # self.lvrage
# In Cash
asset['lcreturns_c'] = self.amount * asset['lreturns'].cumsum().apply(np.exp)
# In Percentage
asset['lcreturns_p'] = asset['lreturns'].cumsum().apply(np.exp)
# Cum Returns in cash
asset['lcmreturns_c'] = asset['lcreturns_c'].cummax()
# Cum Returns in Percentage
asset['lcmreturns_p'] = asset['lcreturns_p'].cummax()
# MDD in cash
asset['ddreturns_c'] = asset['lcmreturns_c'] - asset['lcreturns_c']
# MDD in Percentag
asset['ddreturns_p'] = asset['lcmreturns_p'] - asset['lcreturns_p']
dicti = {'Mean Reverting Strategies': {}}
x = []
y = []
z = []
for i in SMA:
asset['sma_%i' %i] = asset['CloseAsk'].rolling(i).mean()
asset['distance_%i' %i] = asset['CloseAsk'] - asset['sma_%i' %i]
self.threshold = threshold_std * np.std(asset['distance_%i' %i])
## Position
asset['position_%i' %i] = np.where(asset['distance_%i' %i] > self.threshold, -1, np.nan)
asset['position_%i' %i] = np.where(asset['distance_%i' %i] < -self.threshold, 1, asset['position_%i' %i])
asset['position_%i' %i] = np.where(asset['distance_%i' %i] * asset['distance_%i' %i].shift(1) < 0, 0, asset['position_%i' %i])
## Fill al na for 0
asset['position_%i' %i] = asset['position_%i' %i].ffill().fillna(0)
asset['strategy_%i' %i] = asset['position_%i' %i].shift(1) * asset['returns']
##
asset['lstrategy_%i' % i] = asset['strategy_%i' % i] * asset['KC']
self.toplot_hist.append('lstrategy_%i' % i)
## determinate when a trade takes places (long or short)
trades = asset['position_%i' %i].diff().fillna(0) != 0
## subtracting transaction cost from return when trade takes place
asset['lstrategy_%i' %i][trades] -= self.tc
## Cumulative returns in Cash
# asset['cstrategy_c_%i' %i] = self.amount * asset['strategy_%i' %i].cumsum().apply(lambda x: x * self.lvrage).apply(np.exp)
asset['cstrategy_c_%i' % i] = self.amount * asset['lstrategy_%i' % i].cumsum().apply(np.exp)
## Cumulative returns in percentage
# asset['cstrategy_p_%i' %i] = asset['strategy_%i' %i].cumsum().apply(lambda x: x * self.lvrage).apply(np.exp)
asset['cstrategy_p_%i' % i] = asset['lstrategy_%i' % i].cumsum().apply(np.exp)
## Max Cummulative returns in cash
asset['cmstrategy_c_%i' % i] = asset['cstrategy_c_%i' % i].cummax()
## Max Cummulative returns in percentage
asset['cmstrategy_p_%i' % i] = asset['cstrategy_p_%i' % i].cummax()
## Max Drawdown un Cash
asset['ddstrategy_c_%i' % i] = asset['cmstrategy_c_%i' % i] - asset['cstrategy_c_%i' % i]
## Max Drawdown in Percentage
asset['ddstrategy_p_%i' % i] = asset['cmstrategy_p_%i' % i] - asset['cstrategy_p_%i' % i]
## Adding values that we wanna plot
self.toplot_c.append('cstrategy_c_%i' % i)
self.toplot_p.append('cstrategy_p_%i' % i)
## save asset df into self.results
self.results = asset
## Final calculations for return
## absolute Strategy performance in Cash:
aperf_c = self.results['cstrategy_c_%i' %i].ix[-1]
## absolute Strategy performance in Percentage:
aperf_p = self.results['cstrategy_p_%i' %i].ix[-1]
## Out-/underperformance Of strategy in Cash
operf_c = aperf_c - self.results['creturns_c'].ix[-1]
## Out-/underperformance Of strategy in Percentage
operf_p = aperf_p - self.results['creturns_p'].ix[-1]
## Maximum Drawdown in Cash
mdd_c = self.results['ddstrategy_c_%i' %i].max()
## Maximum Drawdown in Percentage
mdd_p = self.results['ddstrategy_p_%i' %i].max()
keys = ['aperf_c_%i' %i, 'aperf_p_%i' %i, 'operf_c_%i' %i, 'operf_p_%i' %i, 'mdd_c_%i' %i, 'mdd_p_%i' %i]
values = ['%.2f' % np.round(aperf_c, 2), '%.2f' % np.round(aperf_p, 2), '%.2f' % np.round(operf_c, 2),
'%.2f' % np.round(operf_p, 2), '%.2f' % np.round(mdd_c, 2), '%.2f' % np.round(mdd_p, 2)]
res = dict(zip(keys, values))
dicti['Mean Reverting Strategies']['strategy_%i' %i] = res
x.append(i)
y.append(aperf_p)
z.append(mdd_p)
self.x = x # SMA
self.y = y # final returns
self.z = z # mdd
# return np.round(aperf_c,2), round(aperf_p,2), round(operf_c,2), round(operf_p,3), mdd_c, mdd_p
return dicti
def plot_strategy(self):
#self.results = self.run_strategy()
if self.results is None:
print('No results to plot yet. Run a strategy.')
title = 'Mean Reverting Backtesting - %s \n %s ' % (self.symbol, self.timeFrame)
# self.results[self.toplot_c].plot(title=title, figsize=(10, 6)) #Cash
self.results[self.toplot_p].plot(title=title, figsize=(10, 6), color= self.colors) #Percentage
plt.show()
def hist_returns(self):
if self.results is None:
print('No results to plot yet. Run a strategy.')
title = 'Histogram Returns - Mean Reverting Backtesting - %s \n %s ' % (self.symbol, self.timeFrame)
self.results[self.toplot_hist].plot.hist(title=title, color=self.colors, figsize=(10, 6), alpha = 0.5, bins=30) #in Cash
# self.results[self.toplot_p].plot.hist(title=title, figsize=(10, 6), alpha = 0.5, bins=30) #in Percentage
plt.ylabel('Rentabilidad %')
# plt.hist(self.results['creturns_p'])
plt.show()
# def plot_mr(self):
#
# if self.results is None:
# print('No results to plot yet. Run a strategy.')
#
# title = 'Mean Reverting (%i) Backtesting - %s ' % (self.SMA, self.symbol)
# self.results[['distance']].plot(title=title, figsize=(10, 6))
# plt.axhline(self.threshold, color='r')
# plt.axhline(-self.threshold, color='r')
# plt.axhline(0, color='r')
# # self.results[['creturns_p', 'cstrategy_p']].plot(title=title, figsize=(10, 6))
# plt.show()
def plot_bstmr(self):
if self.results is None:
print('No results to plot yet. Run a strategy.')
title = 'All Mean Reverting Strategies Final Returns - %s \n %s ' % (self.symbol, self.timeFrame)
# fig, ax1 = plt.subplots()
# ax1.plot(self.x,self.y, 'b-', alpha = 0.5)
# ax1.set_ylabel('Final Returns', color='b')
# ax1.tick_params('y', colors='b')
# ax2 = ax1.twinx()
# ax2.plot(self.x,self.z, 'r--')
# ax2.set_ylabel('Max.Drawdown', color='r')
# ax2.tick_params('y', colors='r')
# fig.tight_layout()
# plt.legend()
plt.plot(self.x, self.y, 'b-o', alpha = 0.5)
plt.plot(self.x, self.z, 'r--o', alpha = 0.5)
plt.title(title)
plt.legend(['Final Returns', 'Maximum Drawdown'])
plt.xlabel('Mean Reverting/SMA')
plt.ylabel('Returns/MDD')
plt.show()
def plot_series(self):
title = '%s - %s' %(self.symbol,self.timeFrame)
plt.plot(self.asset['CloseAsk'])
plt.xlabel('Date')
plt.ylabel('Price')
plt.title(title)
plt.show()
if __name__ == '__main__':
mrbt = MRBT_Backtester('EUR_USD', '2015-01-01', '2017-01-01')
print(mrbt.run_strategy(SMA=[x for x in range(20,220,20)],threshold_std=1.5 , roll=100 ,halfKC=True))
mrbt.plot_strategy()
#mrbt.plot_mr()
mrbt.plot_bstmr()
mrbt.hist_returns()
# mrbt.plot_series()
| {
"repo_name": "cgomezfandino/Project_PTX",
"path": "Models_TFM/mrbt_KCL.py",
"copies": "1",
"size": "13314",
"license": "mit",
"hash": -4155866523309742000,
"line_mean": 35.3770491803,
"line_max": 145,
"alpha_frac": 0.5512242752,
"autogenerated": false,
"ratio": 3.317717418390232,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4368941693590232,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cgomezfandino@gmail.com'
import pandas as pd
import configparser
import v20
import json
config = configparser.ConfigParser()
### Connection to know the account number
config.read('../API_Connection_Oanda/pyalgo.cfg')
ctx = v20.Context(
'api-fxpractice.oanda.com',
443,
True,
application= 'sample_code',
token= config['oanda_v20']['access_token'],
datetime_format= 'RFC3339')
response = ctx.account.list()
accounts = response.get('accounts')
for accounts in accounts:
print('Account: %s' %accounts)
### Retrive all Instruments
response = ctx.account.instruments(
config['oanda_v20']['account_id'])
instruments = response.get('instruments')
print(instruments[0].dict())
# symbols = []
for instrument in instruments:
ins = instrument.dict()
print('%20s | %10s | %20s' % (ins['displayName'],
ins['name'],
ins['type']))
# symbols.append([ins['displayName'],ins['name'],ins['type']])
# sym = pd.DataFrame(symbols, columns=['displayName','Name','Type'])
# sym.to_csv('Instrumens.csv',';')
# print(pd.DataFrame(symbols, columns=['displayName','Name','Type']))
| {
"repo_name": "cgomezfandino/Project_PTX",
"path": "API_Connection_Oanda/Oanda_Instruments.py",
"copies": "1",
"size": "1185",
"license": "mit",
"hash": -5912929131225544000,
"line_mean": 20.9444444444,
"line_max": 69,
"alpha_frac": 0.641350211,
"autogenerated": false,
"ratio": 3.4248554913294798,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.456620570232948,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cgonzalez'
import os
import numpy as np
import pygal
from pygal.style import RedBlueStyle as PlotStyle
from PyQt4 import QtGui
from PyQt4 import QtCore
from .modules import regression
from .forms.MainWindow_UI import *
from .forms.AboutAlges_UI import *
from .forms.About_UI import *
class Main(QtGui.QMainWindow):
def __init__(self):
"""
Main window initialization
"""
QtGui.QMainWindow.__init__(self)
#Instance variables
self.x = []
self.y = []
self.fit = None
self.datafile = ""
#UI settings
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
#Signals
self.ui.update_button.clicked.connect(self.manualupdate_plot)
self.ui.datafile_button.clicked.connect(self.select_file)
self.ui.about_action.triggered.connect(self.show_about)
self.ui.aboutalges_action.triggered.connect(self.show_about_alges)
self.ui.datasize_spinbox.valueChanged.connect(self.autoupdate_plot)
self.ui.alpha_spinbox.valueChanged.connect(self.autoupdate_plot)
self.ui.datasize_check.toggled.connect(self.autoupdate_plot)
self.ui.alpha_spinbox.valueChanged.connect(lambda x: self.ui.alpha_slider.setValue(int(x*1000)))
self.ui.alpha_slider.valueChanged.connect(lambda x: self.ui.alpha_spinbox.setValue(float(x/1000)))
self.ui.alpha_spinbox.setValue(0.5)
def select_file(self):
"""
Opens file dialog
"""
filename = QtGui.QFileDialog.getOpenFileName(self, "Open datafile",
QtCore.QDir.currentPath(),
"Text files (*.txt);"
"Comma separated (*.csv);"
"All files (*.*)")
self.ui.datafile_line.setText(filename)
self.datafile = self.ui.datafile_line.text()
self.load_data(self.datafile)
self.autoupdate_plot(0)
def load_data(self, filename, cls=(1, 2)):
"""
Loads data from file
"""
try:
data = np.loadtxt(filename, delimiter=',', skiprows=1, usecols=cls)
self.x = data[:, 0]
self.y = data[:, 1]
except FileNotFoundError:
self.x = []
self.y = []
self.ui.statusbar.showMessage("File not found.", 5000)
self.fit = regression.Regression(self.x, self.y)
def autoupdate_plot(self, value):
"""
Auto update plot slot
"""
if self.ui.autoupdate_check.isChecked():
self.update_plot()
def manualupdate_plot(self):
if self.datafile != self.ui.datafile_line.text():
self.datafile = self.ui.datafile_line.text()
self.load_data(self.datafile)
self.update_plot()
def update_plot(self):
"""
Main function, load data, perform adjust and show plot
"""
if (len(self.x) == 0) or (len(self.y) == 0):
return
try:
window = self.ui.datasize_spinbox.value()
slice = list(range(5, 60, 2))
alpha = self.ui.alpha_spinbox.value()
if self.ui.datasize_check.isChecked():
[m, c, r, i, step] = self.fit.regression(percentil=window, alpha=alpha)
else:
[m, c, r, i, step] = self.fit.optimization(alpha=alpha, steps=slice)
except Exception as e:
self.ui.statusbar.showMessage("Error, please try again.", 2000)
print(e)
return
y_adj = [c, self.x[0]*m+c, self.x[-1]*m+c]
x_adj = [0, self.x[0], self.x[-1]]
str_fx = "f(x) = {0:.2}*x{1:+.2}".format(m, c)
str_r2 = "R2 = {0:.4}".format(r)
str_fit = "{0} ({1})".format(str_fx, str_r2)
plot = pygal.XY(stroke=True, style=PlotStyle, disable_xml_declaration=True)
plot.title = os.path.basename(self.datafile)
plot.x_title = 'Shear rate'
plot.y_title = 'Shear stress'
plot.legend_at_bottom = True
css_dir = os.path.dirname(__file__)+'/data_css'
plot.config.css.append(css_dir+'/base-new.css')
plot.add('Experimental data', [(x, y) for x, y in zip(self.x, self.y)])
plot.add('Fit {0}'.format(str_fit), [(x, y) for x, y in zip(x_adj, y_adj)])
plot.add('Used data', [(x, y) for x, y in zip(self.x[i:i+step], self.y[i:i+step])])
plot_html = plot.render()
page_html = self.__get_page(plot_html)
self.ui.plot_webview.setContent(page_html)
self.ui.statusbar.showMessage("Graph loaded.", 5000)
def __get_page(self, body):
"""
Returns full html with given body
"""
# js_dir = "http://kozea.github.com/pygal.js/javascripts"
js_dir = "file://"+os.path.dirname(__file__)+'/data_js'
js1 = js_dir+"/svg.jquery.js"
js2 = js_dir+"/pygal-tooltips.js"
page_html = "<!DOCTYPE html><html><head>" \
"<script src=\""+js1+"\"></script>" \
"<script src=\""+js2+"\"></script>" \
"</head><body>"+body+"</body></html>"
return page_html
def show_about(self):
#Display about alges dialog
widget = AboutWindow(self)
widget.show()
def show_about_alges(self):
#Display about dialog
widget = AboutAlgesWindow(self)
widget.show()
class AboutAlgesWindow(QtGui.QDialog):
"""
About dialog for ALGES
"""
def __init__(self, parent):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_AboutAlgesWidget()
self.ui.setupUi(self)
class AboutWindow(QtGui.QDialog):
"""
About dialog for application
"""
def __init__(self, parent):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_AboutWidget()
self.ui.setupUi(self) | {
"repo_name": "carlgonz/u-fit",
"path": "src/python/u_fit/main.py",
"copies": "1",
"size": "5989",
"license": "mit",
"hash": 3031755201750351000,
"line_mean": 30.6931216931,
"line_max": 106,
"alpha_frac": 0.552012022,
"autogenerated": false,
"ratio": 3.5691299165673422,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46211419385673425,
"avg_score": null,
"num_lines": null
} |
__author__ = 'chachalaca'
from Roulette import Roulette
from Strategy import Strategy
class AntiMartingale(Strategy):
def __init__(self, bet: float, cash: float, roulette: Roulette):
self.init_bet = bet
self.cash = cash
self.roulette = roulette
def play(self):
history = []
bet = self.init_bet
cash = self.cash
while cash >= bet <= self.roulette.max_bet:
history.append({"cash": cash, "bet": bet})
cash -= bet
r = self.roulette.bet_on_color(bet)
cash += r
if r == 0:
bet = self.init_bet
else:
bet *= 2
return history
def play_for(self, goal, all_in=True):
bet = self.init_bet
cash = self.cash
while cash < goal:
if cash < self.roulette.min_bet:
return False
if all_in is False and (bet > self.roulette.max_bet or bet > cash):
return False
bet = min(bet, self.roulette.max_bet)
if bet > cash:
bet = cash # go all in
cash -= bet
r = self.roulette.bet_on_color(bet)
cash += r
if r == 0:
bet = self.init_bet
else:
bet *= 2
return True
| {
"repo_name": "chachalaca/MonteCarloRoulette",
"path": "AntiMartingale.py",
"copies": "1",
"size": "1349",
"license": "mit",
"hash": -262107926134238600,
"line_mean": 21.8644067797,
"line_max": 79,
"alpha_frac": 0.4788732394,
"autogenerated": false,
"ratio": 3.685792349726776,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46646655891267763,
"avg_score": null,
"num_lines": null
} |
__author__ = 'chachalaca'
from Roulette import Roulette
from Strategy import Strategy
class DAlembert(Strategy):
def __init__(self, init_bet: float, cash: float, roulette: Roulette):
self.init_bet = init_bet
self.cash = cash
self.roulette = roulette
def play(self):
history = []
bet = self.init_bet
cash = self.cash
while cash >= bet <= self.roulette.max_bet:
history.append({"cash": cash, "bet": bet})
cash -= bet
r = self.roulette.bet_on_color(bet)
cash += r
if r == 0:
bet += 1
else:
bet = max(self.init_bet, bet-1)
return history
def play_for(self, goal, all_in=True):
bet = self.init_bet
cash = self.cash
while cash < goal:
if cash < self.roulette.min_bet:
return False
if all_in is False and (bet > self.roulette.max_bet or bet > cash):
return False
bet = min(bet, self.roulette.max_bet)
if bet > cash:
bet = cash # go all in
cash -= bet
r = self.roulette.bet_on_color(bet)
cash += r
if r == 0:
bet += 1
else:
bet = max(self.init_bet, bet-1)
return True
| {
"repo_name": "chachalaca/MonteCarloRoulette",
"path": "DAlembert.py",
"copies": "1",
"size": "1377",
"license": "mit",
"hash": 8482275323314439000,
"line_mean": 22.7413793103,
"line_max": 79,
"alpha_frac": 0.4814814815,
"autogenerated": false,
"ratio": 3.5953002610966056,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9571378461331255,
"avg_score": 0.001080656253070046,
"num_lines": 58
} |
__author__ = 'chachalaca'
from Roulette import Roulette
from Strategy import Strategy
class Fibonacci(Strategy):
def __init__(self, init_bet: float, cash: float, roulette: Roulette):
self.init_bet = init_bet
self.cash = cash
self.roulette = roulette
def fib(self, n):
if n < 2:
return n
return self.fib(n-2) + self.fib(n-1)
def play(self):
history = []
bet = self.init_bet
cash = self.cash
fib = 0
while cash >= bet <= self.roulette.max_bet:
history.append({"cash": cash, "bet": bet})
cash -= bet
r = self.roulette.bet_on_color(bet)
cash += r
if r == 0:
fib += 1
bet = self.fib(fib)
else:
fib = max(fib-2, 0)
bet = self.fib(fib)
return history
def play_for(self, goal, all_in=True):
bet = self.init_bet
cash = self.cash
fib = 0
while cash < goal:
if cash < self.roulette.min_bet:
return False
if all_in is False and (bet > self.roulette.max_bet or bet > cash):
return False
bet = min(bet, self.roulette.max_bet)
if bet > cash:
bet = cash # go all in
cash -= bet
r = self.roulette.bet_on_color(bet)
cash += r
if r == 0:
fib += 1
else:
fib = max(fib-2, 0)
bet = self.fib(fib)
return True
| {
"repo_name": "chachalaca/MonteCarloRoulette",
"path": "Fibonacci.py",
"copies": "1",
"size": "1599",
"license": "mit",
"hash": 45379741469466550,
"line_mean": 21.5211267606,
"line_max": 79,
"alpha_frac": 0.4602876798,
"autogenerated": false,
"ratio": 3.4535637149028076,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44138513947028074,
"avg_score": null,
"num_lines": null
} |
__author__ = 'chachalaca'
from Roulette import Roulette
from Strategy import Strategy
class Labouchere(Strategy):
init_series = None
def __init__(self, init_bet: float, cash: float, roulette: Roulette):
self.init_bet = init_bet
self.cash = cash
self.roulette = roulette
self.init_series = [x+init_bet for x in [0, 1, 2, 3, 4, 5]]
def play(self):
history = []
bet = self.init_series[0]+self.init_series[-1]
cash = self.cash
series = self.init_series
while cash >= bet <= self.roulette.max_bet:
history.append({"cash": cash, "bet": bet})
cash -= bet
r = self.roulette.bet_on_color(bet)
cash += r
if r == 0:
series.append(bet)
else:
if len(series) > 2:
del series[0]
del series[-1]
else:
series = self.init_series
bet = series[0]+series[-1]
return history
def play_for(self, goal, all_in=True):
bet = self.init_series[0]+self.init_series[-1]
cash = self.cash
series = self.init_series
while cash < goal:
if cash < self.roulette.min_bet:
return False
if all_in is False and (bet > self.roulette.max_bet or bet > cash):
return False
if bet > self.roulette.max_bet:
series.append(bet - self.roulette.max_bet)
bet = self.roulette.max_bet
if bet > cash:
series.append(bet - cash)
bet = cash # go all in
cash -= bet
r = self.roulette.bet_on_color(bet)
cash += r
if r == 0:
series.append(bet)
else:
if len(series) > 2:
del series[0]
del series[-1]
else:
series = self.init_series
bet = series[0]+series[-1]
return True
| {
"repo_name": "chachalaca/MonteCarloRoulette",
"path": "Labouchere.py",
"copies": "1",
"size": "2083",
"license": "mit",
"hash": 6592158193160886000,
"line_mean": 25.0375,
"line_max": 79,
"alpha_frac": 0.4728756601,
"autogenerated": false,
"ratio": 3.766726943942134,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4739602604042134,
"avg_score": null,
"num_lines": null
} |
__author__ = 'chachalaca'
from Roulette import Roulette
from Strategy import Strategy
class Martingale(Strategy):
def __init__(self, bet: float, cash: float, roulette: Roulette):
self.init_bet = bet
self.cash = cash
self.roulette = roulette
def play(self):
history = []
bet = self.init_bet
cash = self.cash
while cash >= bet <= self.roulette.max_bet:
history.append({"cash": cash, "bet": bet})
cash -= bet
r = self.roulette.bet_on_color(bet)
cash += r
if r == 0:
bet *= 2
else:
bet = self.init_bet
return history
def play_for(self, goal, all_in=True):
bet = self.init_bet
cash = self.cash
while cash < goal:
if cash < self.roulette.min_bet:
return False
if all_in is False and (bet > self.roulette.max_bet or bet > cash):
return False
bet = min(bet, self.roulette.max_bet)
if bet > cash:
bet = cash # go all in
cash -= bet
r = self.roulette.bet_on_color(bet)
cash += r
if r == 0:
bet *= 2
else:
bet = self.init_bet
return True
| {
"repo_name": "chachalaca/MonteCarloRoulette",
"path": "Martingale.py",
"copies": "1",
"size": "1344",
"license": "mit",
"hash": 5559362592309068000,
"line_mean": 22.1724137931,
"line_max": 79,
"alpha_frac": 0.4776785714,
"autogenerated": false,
"ratio": 3.6923076923076925,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9664697012728787,
"avg_score": 0.0010578501957812301,
"num_lines": 58
} |
__author__ = 'chachalaca'
import numpy as np
from functools import reduce
class KMeans:
clusters_count = None
cluster_centers = None
def __init__(self, clusters_count):
self.clusters_count = clusters_count
def fit(self, data):
self.cluster_centers = self._lloyd_k_means(data)
return self
def predict(self, data):
if self.cluster_centers is None:
raise RuntimeError(
"fit() must be called before predict()."
)
return list(
map(
lambda item: self._get_nearest_center(item, self.cluster_centers),
data
)
)
def _check_data(self, data):
if issubclass(np.dtype("float64").type, data.dtype.type) is not True:
raise ValueError(
"Data must be instance of float64, %d given." % (
data.dtype
)
)
if data.shape[0] < self.clusters_count:
raise ValueError(
"Samples count (%d) must be higher or equal to clusters count (%d)." % (
data.shape[0],
self.clusters_count
)
)
if len(data.shape) != 2:
raise ValueError(
"Wrong data shape (%d)." % (
data.shape
)
)
def _lloyd_k_means(self, data):
cluster_centers = {}
for i in range(self.clusters_count):
init_center = []
for j in range(data.shape[1]):
init_center.append(
np.random.uniform(
np.min(data[:,j]),
np.max(data[:,j])
)
)
cluster_centers[i] = np.array(init_center)
clusters = {k: [] for k, v in cluster_centers.items()}
for x in data:
nearest_center = self._get_nearest_center(x, cluster_centers)
clusters[nearest_center].append(x)
cluster_centers = {c: np.mean(values, axis=0) for c, values in clusters.items()}
while True:
best_change = {
"delta": 0,
"cluster_to": None,
"cluster_from": None,
"x": None
}
for cluster_to in clusters.keys():
for x in data:
if np.array(x) in np.array(clusters[cluster_to]):
continue
cluster_from = self._get_nearest_center(x, cluster_centers)
_clusters = {k: list(v) for k, v in clusters.items()}
_clusters[cluster_from] = [y for y in _clusters[cluster_from] if not np.array_equal(x, y)]
_clusters[cluster_to].append(x)
delta = self._get_objective_function_value(clusters, cluster_centers) \
- self._get_objective_function_value(_clusters, cluster_centers)
if delta > 0 and delta > best_change["delta"]:
best_change = {
"delta": delta,
"cluster_to": cluster_to,
"cluster_from": cluster_from,
"x": x
}
if best_change["delta"] > 0:
clusters[best_change["cluster_from"]].remove(best_change["x"])
clusters[best_change["cluster_to"]].append(best_change["x"])
cluster_centers[best_change["cluster_from"]] = np.mean(clusters[best_change["cluster_from"]], axis=0)
cluster_centers[best_change["cluster_to"]] = np.mean(clusters[best_change["cluster_to"]], axis=0)
else:
return cluster_centers
@staticmethod
def _get_nearest_center(x, cluster_centers):
distance = {}
for k, c in cluster_centers.items():
distance[k] = np.linalg.norm(x-c)
return min(distance, key=distance.get)
@staticmethod
def _get_objective_function_value(clusters, cluster_centers):
return sum([
reduce(
lambda x, y: x + np.linalg.norm(y-cluster_centers[c])**2,
values,
0
)
for c, values in clusters.items()
]) / len(clusters)
| {
"repo_name": "chachalaca/K-means",
"path": "KMeans.py",
"copies": "1",
"size": "4374",
"license": "mit",
"hash": -5831556167015193000,
"line_mean": 32.3893129771,
"line_max": 117,
"alpha_frac": 0.4821673525,
"autogenerated": false,
"ratio": 4.347912524850894,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5330079877350894,
"avg_score": null,
"num_lines": null
} |
__author__ = 'chachalaca'
import numpy as np
from FrenchRoulette import FrenchRoulette
from AmericanRoulette import AmericanRoulette
from Martingale import Martingale
from Fibonacci import Fibonacci
from DAlembert import DAlembert
from Labouchere import Labouchere
from AntiMartingale import AntiMartingale
import pandas as pd
import seaborn as sns
sns.set(style="darkgrid")
iterations = 10000
init_bet = 1
min_bet = 1
max_bet = 50
cash = 100
roulettes = [
FrenchRoulette,
AmericanRoulette
]
strategies = [
Martingale,
Fibonacci,
DAlembert,
Labouchere,
AntiMartingale
]
param_str = str(init_bet)+"-"+str(min_bet)+"-"+str(max_bet)+"-"+str(cash)
param_str_short = str(init_bet)+"-"+str(min_bet)
def main():
#run_simulations()
#make_visualizations()
print_stats()
def run_simulations():
avg = {}
data = pd.DataFrame(columns=('roulette', 'strategy', 'rounds', 'max_cash', 'doubled'))
data2 = pd.DataFrame(columns=('roulette', 'strategy', 'max_bet', 'cash', 'doubled', 'all_in'))
for Roulette in roulettes:
avg[Roulette.__name__] = {}
print(Roulette.__name__)
for Strategy in strategies:
print(Strategy.__name__)
# General simulations
for game in range(1, iterations):
strategy = Strategy(init_bet, cash, Roulette(min_bet, max_bet))
result = strategy.play()
double = strategy.play_for(cash*2)
data.loc[len(data)+1] = [
Roulette.__name__,
Strategy.__name__,
len(result),
max(list(map(lambda r: r["cash"], result))),
double
]
# Probability data for heatmaps
r = [min_bet*i for i in range(1,20)] # range for max_bet & cash values
for max_bet_value in r:
for cash_value in r:
doubled = []
doubled_all_in = []
for game in range(1, 500):
strategy = Strategy(init_bet, cash_value, Roulette(min_bet, max_bet_value))
doubled.append(strategy.play_for(goal=cash_value*2, all_in=False))
doubled_all_in.append(strategy.play_for(goal=cash_value*2, all_in=True))
data2.loc[len(data2)+1] = [
Roulette.__name__,
Strategy.__name__,
str(int(max_bet_value)),
str(int(cash_value)),
np.average(doubled),
False
]
data2.loc[len(data2)+1] = [
Roulette.__name__,
Strategy.__name__,
max_bet_value,
cash_value,
np.average(doubled_all_in),
True
]
data.to_pickle("data/data-"+param_str+".pkl")
data2.to_pickle("data/data2-"+param_str_short+".pkl")
# data.to_csv("data/data-"+param_str+".csv")
def make_visualizations():
data = pd.read_pickle("data/data-"+param_str+".pkl")
data2 = pd.read_pickle("data/data2-"+param_str_short+".pkl")
data2[["max_bet", "cash"]] = data2[["max_bet", "cash"]].astype(int)
for Roulette in roulettes:
for Strategy in strategies:
filtered1 = data[data["roulette"] == Roulette.__name__]
filtered = filtered1[filtered1["strategy"] == Strategy.__name__]
print(filtered.count())
# Individual distribution plots
sns.distplot(filtered["rounds"].tolist())
sns.plt.savefig("figures/"+Roulette.__name__+Strategy.__name__+"-"+param_str+"-rounds.png")
sns.plt.clf()
sns.distplot(filtered["max_cash"].tolist())
sns.plt.savefig("figures/"+Roulette.__name__+Strategy.__name__+"-"+param_str+"-max_cash.png")
sns.plt.clf()
# Distribution plots
g = sns.FacetGrid(data, row="strategy", col="roulette", margin_titles=True, size=3, aspect=1)
g.map(sns.kdeplot, "max_cash", shade=True, cut=0)
g.set(xlim=(cash, 400), ylim=(0, 0.04))
g.savefig("figures/strategies/max_cash-"+param_str_short+".png")
g = sns.FacetGrid(data, row="strategy", col="roulette", margin_titles=True, size=3, aspect=1)
g.map(sns.kdeplot, "rounds", shade=True, cut=0)
g.set(xlim=(0, 1000), ylim=(0, 0.04))
g.savefig("figures/strategies/rounds-"+param_str_short+".png")
# Probability heatmaps
g = sns.FacetGrid(data2[data2["all_in"] == 1], row="strategy", col="roulette", margin_titles=True, size=5, aspect=1)
g.map_dataframe(lambda data, color: sns.heatmap(data.pivot('max_bet', 'cash', 'doubled'), linewidths=.5, vmin=0, vmax=1))
g.savefig("figures/probabilities/heatmap-all_in-"+param_str_short+".png")
g = sns.FacetGrid(data2[data2["all_in"] == 0], row="strategy", col="roulette", margin_titles=True, size=5, aspect=1)
g.map_dataframe(lambda data, color: sns.heatmap(data.pivot('max_bet', 'cash', 'doubled'), linewidths=.5, vmin=0, vmax=1))
g.savefig("figures/probabilities/heatmap-"+param_str_short+".png")
def print_stats():
data = pd.read_pickle("data/data-"+param_str+".pkl")
avg = {}
for Roulette in roulettes:
avg[Roulette.__name__] = {}
for Strategy in strategies:
filtered1 = data[data["roulette"] == Roulette.__name__]
filtered = filtered1[filtered1["strategy"] == Strategy.__name__]
avg[Roulette.__name__][Strategy.__name__] = {
"rounds": str(np.average(filtered["rounds"].tolist()))+" ("+str(np.var(filtered["rounds"].tolist()))+")",
"maximum cash": str(np.average(filtered["max_cash"].tolist()))+" ("+str(np.var(filtered["max_cash"].tolist()))+")",
"double cash probability": str(np.average(filtered["doubled"].tolist()))
}
__pretty_print_dict(avg)
def __pretty_print_dict(d, indent=0):
for key, value in d.items():
print('\t' * indent + str(key))
if isinstance(value, dict):
__pretty_print_dict(value, indent+1)
else:
print('\t' * (indent+1) + str(value))
if __name__ == "__main__":
main()
| {
"repo_name": "chachalaca/MonteCarloRoulette",
"path": "main.py",
"copies": "1",
"size": "6344",
"license": "mit",
"hash": -4909996861291498000,
"line_mean": 31.5333333333,
"line_max": 131,
"alpha_frac": 0.5559583859,
"autogenerated": false,
"ratio": 3.351294241944004,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9394913071794699,
"avg_score": 0.002467911209860878,
"num_lines": 195
} |
__author__ = 'Chad Dotson'
def valid_position(n_queen_positions, new_position):
for existing_position in n_queen_positions:
if existing_position[1] == new_position[1] or existing_position[0] == new_position[0]:
return False
row_difference = abs(new_position[1] - existing_position[1])
column_difference = abs(new_position[0] - existing_position[0])
if row_difference == column_difference:
return False
return True
def solve(n_queens_size, n_queen_positions, current_column):
if current_column == n_queens_size and len(n_queen_positions) == n_queens_size:
return [n_queen_positions]
solutions = []
for row in range(0, n_queens_size):
new_position = (current_column, row)
if valid_position(n_queen_positions, new_position):
new_queens_positions = n_queen_positions[:]
new_queens_positions.append(new_position)
solution = solve(n_queens_size, new_queens_positions, current_column+1)
if solution is not None:
solutions.extend(solution)
return solutions
import time
t1 = time.time()
solutions = solve(11, [], 0)
dt = round(time.time() - t1, 2)
msg = "N-Queens Found {0} Solutions in {1}s".format(len(solutions), str(dt))
try:
alert(msg)
#debugger
except:
print (msg)
| {
"repo_name": "axiros/transcrypt",
"path": "src/demo_1_queens/solver.py",
"copies": "1",
"size": "1358",
"license": "apache-2.0",
"hash": 7145266736148330000,
"line_mean": 26.7142857143,
"line_max": 94,
"alpha_frac": 0.6340206186,
"autogenerated": false,
"ratio": 3.4035087719298245,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9490597647907963,
"avg_score": 0.009386348524372153,
"num_lines": 49
} |
__author__ = 'Chad'
import pandas as pd
import numpy as np
# Pandas makes data exploration and manipulation easier and more readable
# Pandas has its own read_csv method; it's smart enough to infer data types
train_df = pd.read_csv('.\\data\\train.csv', header=0)
# To prepare our data we must first convert text values to numeric values
# The Series object provides a map() function for easily translating values
sex_map = {'female': 0, 'male': 1}
train_df['SexNum'] = train_df.Sex.map(sex_map).astype(int)
# map() also supports more complex logic; here we're crudely parsing the title out of the Name column
train_df['Title'] = train_df.Name.map(lambda x: x.split(',')[1].strip().split(' ')[0])
# ...but we still need to convert titles to a number
title_map = {}
for i, v in enumerate(sorted(train_df.Title.unique())): # this does allow invalid titles to be mapped
title_map[v] = i
train_df['TitleNum'] = train_df.Title.map(title_map)
# Let's fill in missing Age values by Pclass and Title
num_class_values = len(train_df.Pclass.unique())
num_title_values = len(title_map)
train_df['AgeFill'] = train_df.Age
age_stats = np.zeros((num_class_values, num_title_values))
for c in range(num_class_values):
default_age = train_df[train_df.Pclass == c + 1].Age.dropna().median()
for t in range(num_title_values):
age_stats[c, t] = train_df[(train_df.Pclass == c + 1) & (train_df.TitleNum == t)].Age.dropna().median()
if np.isnan(age_stats[c, t]):
age_stats[c, t] = default_age
train_df.loc[(train_df.Age.isnull()) & (train_df.Pclass == c + 1) & (train_df.TitleNum == t), 'AgeFill'] = age_stats[c, t]
# We can also "engineer" new features by combining others
train_df['FamilySize'] = train_df['SibSp'] + train_df['Parch']
# With our training data prepped, we now load the test data
test_df = pd.read_csv('.\\data\\test.csv', header=0)
#test_df.info()
# Use loc[] to update DataFrame values in-place; for simplicity we'll replace missing Fares with the population avg Fare
test_df.loc[test_df.Fare.isnull(), 'Fare'] = train_df.Fare.mean()
# Apply Same transformations to SexNum, Title, TitleNum, and AgeFill
test_df['SexNum'] = test_df.Sex.map(sex_map).astype(int)
test_df['Title'] = test_df.Name.map(lambda x: x.split(',')[1].strip().split(' ')[0])
test_df['TitleNum'] = test_df.Title.map(title_map)
test_df['AgeFill'] = test_df.Age
for c in range(num_class_values):
for t in range(num_title_values):
test_df.loc[(test_df.Age.isnull()) & (test_df.Pclass == c + 1) & (test_df.TitleNum == t), 'AgeFill'] = age_stats[c, t]
# ...and FamilySize
test_df['FamilySize'] = test_df['SibSp'] + test_df['Parch']
# Lastly, if new Titles are present in the test set just set them to the most popular title by Sex
test_df.loc[(test_df.TitleNum.isnull()) & (test_df.Sex == 'female'), 'TitleNum'] = train_df[train_df.Sex == 'female'].TitleNum.value_counts().index[0]
test_df.loc[(test_df.TitleNum.isnull()) & (test_df.Sex == 'male'), 'TitleNum'] = train_df[train_df.Sex == 'male'].TitleNum.value_counts().index[0]
# Now we guess which numeric columns will produce the best results; drop the unnecessary columns from each set
train_df = train_df.drop(['Name', 'Sex', 'Ticket', 'Cabin', 'Embarked', 'Title', 'Age', 'SibSp', 'Parch'], axis=1)
# dropna() removes any row with NaN value and .values returns the data as an ndarray
train_data = train_df.dropna().values
# ...do the same thing to test_df
test_df = test_df.drop(['Name', 'Sex', 'Ticket', 'Cabin', 'Embarked', 'Title', 'Age', 'SibSp', 'Parch'], axis=1)
test_data = test_df.dropna().values
# And FINALLY, we train our model and make predictions
# A random forest is a meta estimator that fits a number of decision tree classifiers on various sub-samples of the
# dataset and use averaging to improve the predictive accuracy and control over-fitting
# http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html
from sklearn.ensemble import RandomForestClassifier
# Create an RFC with 100 decision trees
forest = RandomForestClassifier(n_estimators=100)
# Build a forest of trees using:
# X = the training data features (except PassengerId and Survived)
# y = the training data outputs (just Survived)
forest = forest.fit(train_data[0::, 2::], train_data[0::, 1])
# Compute the predicted output (survival outcomes) for each test row's features (except PassengerId)
# In this process each tree votes for an outcome weighted by its probability estimates. The predicted class is the one
# with the highest mean probability estimate across all trees in the forest
output = forest.predict(test_data[0::, 1::])
# Combine PassengerId and predicted Survived values
result = pd.DataFrame([test_data[0::, 0], output], index=['PassengerId', 'Survived'], dtype='int').T
# ...and write them to a .csv file
result.to_csv('.\\data\\randomforestmodel.csv', index=False)
| {
"repo_name": "SgfPythonDevs/cboschert-intro2ml",
"path": "scripts/PandasML.py",
"copies": "1",
"size": "4904",
"license": "mit",
"hash": -7154288555571643000,
"line_mean": 50.0833333333,
"line_max": 150,
"alpha_frac": 0.7020799347,
"autogenerated": false,
"ratio": 3.090107120352867,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9278268416246269,
"avg_score": 0.002783727761319505,
"num_lines": 96
} |
__author__ = 'Chao'
import numpy as np
from sklearn import svm, cross_validation
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
activity_label = {'1': 'WALKING',
'2': 'WALKING_UPSTAIRS',
'3': 'WALKING_DOWNSTAIRS',
'4': 'SITTING',
'5': 'STANDING',
'6': 'LAYING'}
# ############################# Open data set ###############################
X = []
y = []
X_fin = []
y_fin = []
print "Opening dataset..."
try:
with open("X_train.txt", 'rU') as f:
res = list(f)
for line in res:
line.strip("\n")
pair = line.split(" ")
while pair.__contains__(""):
pair.remove("")
for i in xrange(pair.__len__()):
pair[i] = float(pair[i])
X.append(pair)
f.close()
with open("y_train.txt", 'rU') as f:
res = list(f)
for line in res:
y.append(int(line.strip("\n")[0]))
f.close()
except:
print "Error in reading the train set file."
exit()
try:
with open("X_test.txt", 'rU') as f:
res = list(f)
for line in res:
line.strip("\n")
pair = line.split(" ")
while pair.__contains__(""):
pair.remove("")
for i in xrange(pair.__len__()):
pair[i] = float(pair[i])
X_fin.append(pair)
f.close()
with open("y_test.txt", 'rU') as f:
res = list(f)
for line in res:
y_fin.append(int(line.strip("\n")[0]))
f.close()
except:
print "Error in reading the train set file."
exit()
print "Dataset opened."
X = np.array(X)
y = np.array(y)
###### Separate data set into 70% training set and 30% test set
print "Separating data into 70% training set & 30% test set..."
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.3)
print "Dataset separated."
###### Get best parameters ######
############################### Kernel=Linear ###############################
print "######## SVM, Kernel = Linear #########"
#C_linear = [0.1, 1, 10, 100]
C_linear = [3]
result_linear = []
print "C value chosen from: ", C_linear
print "Calculating accuracy with K-fold..."
for C in C_linear:
svc_linear = svm.SVC(kernel='linear', C=C)
scores = cross_validation.cross_val_score(svc_linear, X_train, y_train, scoring='accuracy', cv=6)
result_linear.append(scores.mean())
print "result:", result_linear
#Result with different C are equal, so here choose C=1 directly as the best parameter.
best_param_linear = {"C": 3}
#linear_test_score = svm.SVC(kernel='linear', C=best_param_linear.get("C")).fit(X_test, y_test).score(X_test, y_test)
#rbf_test_score = svm.SVC(kernel='rbf', C=best_param_rbf.get("C"), gamma=best_param_rbf.get("gamma")).fit(X_test, y_test).score(X_test, y_test)
#poly_test_score = svm.SVC(kernel='poly', C=best_param_poly.get("C"), degree=best_param_poly.get("degree")).fit(X_test, y_test).score(X_test, y_test)
linear_test = svm.SVC(kernel='linear', C=best_param_linear.get("C")).fit(X, y)
count1 = 0
count2 = 0
for i in xrange(X_fin.__len__()):
count2 += 1
a = linear_test.predict(X_fin[i])
b = y_fin[i]
if a == [b]:
count1 += 1
print "Total cases: ", count2
print "Correct Prediction: ", count1
print "Correct Rate: ", float(count1) / count2
#print "Linear Kernel test score: ", linear_test_score
#print "RBF Kernel test score: ", rbf_test_score
#print "Poly Kernel test score: ", poly_test_score
################################### Random Forests ####################################
print "##### Random Forest ######"
n_estimators_list = range(1, 16, 1)
result_random_forests = []
max_score_rf = float("-inf")
best_param_rf = None
for n_estimators in n_estimators_list:
print "Testing n_estimators = ", n_estimators
rf_clf = RandomForestClassifier(n_estimators=n_estimators, max_depth=None, min_samples_split=1, random_state=0)
scores = cross_validation.cross_val_score(rf_clf, X_train, y_train, scoring="accuracy", cv=6)
result_random_forests.append(scores.mean())
if scores.mean() > max_score_rf:
max_score_rf = scores.mean()
best_param_rf = {"n_estimators": n_estimators}
print "number of trees: ", n_estimators_list
print "results: ", result_random_forests
print "best accuracy: ", max_score_rf
print "best parameter: ", best_param_rf
rf_clf_test_score = RandomForestClassifier(n_estimators=best_param_rf.get("n_estimators"), max_depth=None,
min_samples_split=1, random_state=0).fit(X_test, y_test).score(X_test,
y_test)
print "Test set accuracy: ", rf_clf_test_score
rf_clf = RandomForestClassifier(n_estimators=best_param_rf.get("n_estimators"), max_depth=None, min_samples_split=1,
random_state=0).fit(X, y)
count1 = 0
count2 = 0
for i in xrange(X_fin.__len__()):
count2 += 1
a = rf_clf.predict(X_fin[i])
b = y_fin[i]
print "+ ", a[0],
print "- ", b
if a == [b]:
count1 += 1
print "Total cases: ", count2
print "Correct Prediction: ", count1
print "Correct Rate: ", float(count1) / count2
################################### K Nearest Neighbors ####################################
print "##### K Nearest Neighbors ######"
n_neighbors_list = range(1, 6, 1)
result_n_neighbors = []
max_score_knn = float("-inf")
best_param_knn = None
for n_neighbors in n_neighbors_list:
print "Testing n_neighbors = ", n_neighbors
neigh = KNeighborsClassifier(n_neighbors=n_neighbors)
scores = cross_validation.cross_val_score(neigh, X_train, y_train, scoring="accuracy", cv=6)
result_n_neighbors.append(scores.mean())
if scores.mean() > max_score_knn:
max_score_knn = scores.mean()
best_param_knn = {"n_neighbors": n_neighbors}
print "number of neighbors: ", n_neighbors_list
print "results: ", result_n_neighbors
print "best accuracy: ", max_score_knn
print "best parameter: ", best_param_knn
neigh_test_score = KNeighborsClassifier(best_param_knn.get("n_neighbors")).fit(X_test, y_test).score(X_test, y_test)
print "Test set accuracy: ", neigh_test_score
neigh = KNeighborsClassifier(best_param_knn.get("n_neighbors")).fit(X, y)
count1 = 0
count2 = 0
for i in xrange(X_fin.__len__()):
count2 += 1
a = neigh.predict(X_fin[i])
b = y_fin[i]
if a == [b]:
count1 += 1
print "Total cases: ", count2
print "Correct Prediction: ", count1
print "Correct Rate: ", float(count1) / count2
| {
"repo_name": "Sapphirine/Human-Activity-Monitoring-and-Prediction",
"path": "analysis.py",
"copies": "1",
"size": "6718",
"license": "apache-2.0",
"hash": -3678663670991686700,
"line_mean": 33.8082901554,
"line_max": 149,
"alpha_frac": 0.575915451,
"autogenerated": false,
"ratio": 3.24697921701305,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.432289466801305,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Chapin Bryce'
__version__ = 0.00
import os
import datetime
import logging
#
# Main function. Callable by other scripts
#
base = os.path.dirname(os.path.realpath(__file__))
def main(outpath, targ, rule, os_type, config):
start = datetime.datetime.now()
if not os.path.exists(outpath):
os.makedirs(outpath)
logging.basicConfig(filename=outpath + '/lcdic.log',
level=logging.DEBUG,
format='%(asctime)s | %(levelname)s | %(message)s')
case = config['case_number']
eid = config['eid']
examiner = config['name']
targeted_user = config['target_users']
d = config['d']
extensions = config['extensions']
ext_for_users = config['ext_for_users']
# Enable STDOUT Printing for debug
if d:
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s | %(levelname)s | %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
logging.info('Start of collection')
logging.info('Case Number: ' + case)
logging.info('Evidence Number: ' + eid)
logging.info('Examiner Name: ' + examiner)
logging.info('Evidence: ' + targ)
logging.info('Output directory: ' + outpath)
logging.info('Configuration File: ' + config['config_path'])
logging.info('Yara Rule: ' + rule)
from collectors import windows
from collectors import debian
from collectors import search
coll = None
# Create Object
if os_type == 'winxp':
coll = windows.WinXP()
logging.info('Collection of Windows XP Initialized')
elif os_type == 'win7':
coll = windows.Win7()
logging.info('Collection of Windows 7 Initialized')
elif os_type == 'ubu13':
coll = debian.Ubuntu13()
logging.info('Collection of Ubuntu 13 Initialized')
else:
raise UserWarning('Invalid OS Selected')
# Initialize Class Items based on input
coll.targ = targ
coll.case = case
coll.eid = eid
coll.dest = outpath
coll.hashtype = config['hashtype']
if targeted_user:
coll.target_user = targeted_user
if extensions:
coll.extensions = extensions
coll.ext_for_users = ext_for_users
coll.setup()
# Call methods
paths_to_process = coll.collector()
# Add in Yara
if rule:
logging.info('Yara Searching Started')
ysearch = search.YaraSearch(rule, targ)
rules = ysearch.run()
logging.info('Yara Searching Completed')
for r in rules:
paths_to_process.append(r['file'])
logging.info("Creation of TarBall and Hashing started")
coll.complete_collection(paths_to_process)
logging.info("Creation of TarBall and Hashing completed")
end = datetime.datetime.now()
logging.info('Run time: ' + str(end - start))
logging.info("LCDI Collector Completed")
#
# End of Main Function
#
#
# Start parsing for __main__ script
#
def _argparse():
"""
Parse Args & return object
:return:
"""
import argparse
parser = argparse.ArgumentParser(description='LCDI Collector, a script to automate targeted collections. '
'See config.ini to set optional information and configurations',
version=__version__, epilog='Created by ' + __author__)
parser.add_argument('targ', metavar='C:', help="Path to the root of the targeted volume")
parser.add_argument('dest', metavar='/path/to/output', help="Path to the root of the output directory, "
"will create if it does not exist")
parser.add_argument('os', metavar='list', help='Select OS. type `list` for list of supports OS\'s')
parser.add_argument('-c', '--config', help='Path to custom config file. Default is config/config.ini',
default=base+'/config/config.ini')
parser.add_argument('-r', '--rule', help='Yara Search Term (single string keyword) or Path to custom Yara rules '
'file. Sample located in config/yara.rules', default='')
return parser.parse_args()
def _config_parser(config):
"""
Read configuration file into dictionary
:param config: string path to configuration file
:return: config dictionary
"""
from ConfigParser import SafeConfigParser
parser = SafeConfigParser()
parser.read(config)
parser_dict = dict()
parser_dict['config_path'] = config
parser_dict['case_number'] = parser.get('Options', 'case_number')
parser_dict['eid'] = parser.get('Options', 'eid')
parser_dict['name'] = parser.get('Options', 'name')
parser_dict['extensions'] = parser.get('Options', 'extensions').split(',')
parser_dict['target_users'] = parser.get('Options', 'target_users').split(',')
parser_dict['d'] = parser.getboolean('Options', 'd')
parser_dict['ext_for_users'] = parser.getboolean('Options', 'ext_for_users')
parser_dict['hashtype'] = parser.get('Options', 'hashtype')
return parser_dict
#
# Start Main Program
#
if __name__ == '__main__':
args = _argparse()
if args.config:
config = _config_parser(args.config)
else:
raise 'Config File not found. Please specify or replace the deafult'
# init vars
os_type = ''
coll = None
outpath = ''
# Convert OS to Process
# TODO Have a method to auto-determine OS of evidence
supported_os = {'win7': 'Windows 7', 'winxp': 'Windows XP', 'ubu13': 'Ubuntu 13'}
if args.os.lower() in supported_os.keys():
os_type = args.os.lower()
elif args.os.lower() == 'list':
print 'OS Name'.ljust(20) + 'Name to supply'
for os in supported_os.keys():
print supported_os[os].ljust(20) + os
quit()
else:
print('Invalid Arguments: Select from list below')
print 'OS Name'.ljust(20) + 'Name to supply'
for os in supported_os.keys():
print supported_os[os].ljust(20) + os
quit()
outpath = os.path.join(args.dest, config['case_number'])
main(outpath, args.targ, args.rule, os_type, config)
| {
"repo_name": "lcdi/LCDIC",
"path": "lcdic.py",
"copies": "1",
"size": "6283",
"license": "mit",
"hash": -7154462556837671000,
"line_mean": 31.056122449,
"line_max": 117,
"alpha_frac": 0.61403788,
"autogenerated": false,
"ratio": 3.8688423645320196,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49828802445320197,
"avg_score": null,
"num_lines": null
} |
__author__ = 'chapter09'
"""
a fat tree implementation within the mininet envrionment
"""
from mininet.topo import Topo
class FatTree(Topo):
"Create a fat-tree topology."
def __init__(self, k=4):
'''Init.
@param k number of ports
'''
if (k % 2) != 0 or k <= 0:
print "ERROR: k should be even number"
exit(0)
# Initialize topology
Topo.__init__(self)
self.coreSwitches = []
self.pods = []
#core
for i in range(0, (k/2)**2):
self.coreSwitches.append(self.addSwitch("cs_" + str(i)))
for i in range(0, k):
pod = self.__createPod(str(i), k)
self.pods.append(pod)
#aggreagate <--> core
for pod in range(0, len(self.pods)):
for aggr in range(0, len(self.pods[pod][0])):
aggrSwitch = self.pods[pod][0][aggr]
for core in range(aggr*k/2, (aggr+1)*k/2):
self.addLink(self.coreSwitches[core], aggrSwitch)
def __createPod(self, podId, k):
aggrSwitches = []
torSwitches = []
hosts = []
#aggregate and tor
for i in range(0, k/2):
aggrSwitches.append(self.addSwitch("as_" + podId \
+ "_" + str(i)))
torSwitches.append(self.addSwitch("ts_" + podId \
+ "_" + str(i)))
#tor <--> aggregate
for tor in range(0, len(torSwitches)):
for aggr in range(0, len(aggrSwitches)):
self.addLink(torSwitches[tor], aggrSwitches[aggr])
#host <--> tor
for tor in range(0, len(torSwitches)):
for host in range(tor*k/2, (tor+1)*k/2):
h = self.addHost("h_" + podId + \
"_" + str(host))
hosts.append(h)
self.addLink(torSwitches[tor], h)
return (aggrSwitches, torSwitches, hosts)
topos = {'fattree': (lambda k: FatTree(k))}
| {
"repo_name": "chapter09/mininet_misc",
"path": "fattree.py",
"copies": "1",
"size": "2014",
"license": "apache-2.0",
"hash": 6680155477093971000,
"line_mean": 26.9722222222,
"line_max": 69,
"alpha_frac": 0.4920556107,
"autogenerated": false,
"ratio": 3.4545454545454546,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44466010652454546,
"avg_score": null,
"num_lines": null
} |
__author__ = 'chapter'
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
def weight_varible(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
print("Download Done!")
sess = tf.InteractiveSession()
# paras
W_conv1 = weight_varible([5, 5, 1, 32])
b_conv1 = bias_variable([32])
# conv layer-1
x = tf.placeholder(tf.float32, [None, 784])
x_image = tf.reshape(x, [-1, 28, 28, 1])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
# conv layer-2
W_conv2 = weight_varible([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
# full connection
W_fc1 = weight_varible([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# dropout
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# output layer: softmax
W_fc2 = weight_varible([1024, 10])
b_fc2 = bias_variable([10])
y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
y_ = tf.placeholder(tf.float32, [None, 10])
# model training
cross_entropy = -tf.reduce_sum(y_ * tf.log(y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.arg_max(y_conv, 1), tf.arg_max(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
sess.run(tf.initialize_all_variables())
for i in range(20000):
batch = mnist.train.next_batch(50)
if i % 100 == 0:
train_accuacy = accuracy.eval(feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0})
print("step %d, training accuracy %g"%(i, train_accuacy))
train_step.run(feed_dict = {x: batch[0], y_: batch[1], keep_prob: 0.5})
# accuacy on test
print("test accuracy %g"%(accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))) | {
"repo_name": "samleoqh/machine-ln",
"path": "src/tflow/tf_cnn_example.py",
"copies": "1",
"size": "2351",
"license": "mit",
"hash": -5436775930724392000,
"line_mean": 28.4,
"line_max": 114,
"alpha_frac": 0.6626967248,
"autogenerated": false,
"ratio": 2.459205020920502,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3621901745720502,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Charles Leifer'
__license__ = 'MIT'
__version__ = '0.4.8'
from huey.api import Huey, crontab
try:
import redis
from huey.backends.redis_backend import RedisBlockingQueue
from huey.backends.redis_backend import RedisDataStore
from huey.backends.redis_backend import RedisEventEmitter
from huey.backends.redis_backend import RedisSchedule
class RedisHuey(Huey):
def __init__(self, name='huey', store_none=False, always_eager=False,
read_timeout=None, **conn_kwargs):
queue = RedisBlockingQueue(
name,
read_timeout=read_timeout,
**conn_kwargs)
result_store = RedisDataStore(name, **conn_kwargs)
schedule = RedisSchedule(name, **conn_kwargs)
events = RedisEventEmitter(name, **conn_kwargs)
super(RedisHuey, self).__init__(
queue=queue,
result_store=result_store,
schedule=schedule,
events=events,
store_none=store_none,
always_eager=always_eager)
except ImportError:
class RedisHuey(object):
def __init__(self, *args, **kwargs):
raise RuntimeError('Error, "redis" is not installed. Install '
'using pip: "pip install redis"')
try:
from huey.backends.sqlite_backend import SqliteQueue
from huey.backends.sqlite_backend import SqliteDataStore
from huey.backends.sqlite_backend import SqliteSchedule
class SqliteHuey(Huey):
def __init__(self, name='huey', store_none=False, always_eager=False,
location=None):
if location is None:
raise ValueError("Please specify a database file with the "
"'location' parameter")
queue = SqliteQueue(name, location)
result_store = SqliteDataStore(name, location)
schedule = SqliteSchedule(name, location)
super(SqliteHuey, self).__init__(
queue=queue,
result_store=result_store,
schedule=schedule,
events=None,
store_none=store_none,
always_eager=always_eager)
except ImportError:
class SqliteHuey(object):
def __init__(self, *args, **kwargs):
raise RuntimeError('Error, "sqlite" is not installed.')
| {
"repo_name": "angvp/huey",
"path": "huey/__init__.py",
"copies": "4",
"size": "2434",
"license": "mit",
"hash": 8191345308639195000,
"line_mean": 38.2580645161,
"line_max": 77,
"alpha_frac": 0.5846343468,
"autogenerated": false,
"ratio": 4.2404181184668985,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 62
} |
__author__ = 'Charles Leifer'
__license__ = 'MIT'
__version__ = '0.4.9'
from huey.api import Huey, crontab
try:
import redis
from huey.backends.redis_backend import RedisBlockingQueue
from huey.backends.redis_backend import RedisDataStore
from huey.backends.redis_backend import RedisEventEmitter
from huey.backends.redis_backend import RedisSchedule
class RedisHuey(Huey):
def __init__(self, name='huey', store_none=False, always_eager=False,
read_timeout=None, **conn_kwargs):
queue = RedisBlockingQueue(
name,
read_timeout=read_timeout,
**conn_kwargs)
result_store = RedisDataStore(name, **conn_kwargs)
schedule = RedisSchedule(name, **conn_kwargs)
events = RedisEventEmitter(name, **conn_kwargs)
super(RedisHuey, self).__init__(
queue=queue,
result_store=result_store,
schedule=schedule,
events=events,
store_none=store_none,
always_eager=always_eager)
except ImportError:
class RedisHuey(object):
def __init__(self, *args, **kwargs):
raise RuntimeError('Error, "redis" is not installed. Install '
'using pip: "pip install redis"')
try:
from huey.backends.sqlite_backend import SqliteQueue
from huey.backends.sqlite_backend import SqliteDataStore
from huey.backends.sqlite_backend import SqliteSchedule
class SqliteHuey(Huey):
def __init__(self, name='huey', store_none=False, always_eager=False,
location=None):
if location is None:
raise ValueError("Please specify a database file with the "
"'location' parameter")
queue = SqliteQueue(name, location)
result_store = SqliteDataStore(name, location)
schedule = SqliteSchedule(name, location)
super(SqliteHuey, self).__init__(
queue=queue,
result_store=result_store,
schedule=schedule,
events=None,
store_none=store_none,
always_eager=always_eager)
except ImportError:
class SqliteHuey(object):
def __init__(self, *args, **kwargs):
raise RuntimeError('Error, "sqlite" is not installed.')
| {
"repo_name": "antoviaque/huey",
"path": "huey/__init__.py",
"copies": "1",
"size": "2434",
"license": "mit",
"hash": -1805909005860005600,
"line_mean": 38.2580645161,
"line_max": 77,
"alpha_frac": 0.5846343468,
"autogenerated": false,
"ratio": 4.2404181184668985,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5325052465266898,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Charles'
from StringIO import StringIO
from imghdr import what
from hashlib import md5
import os
from subprocess import *
import logging
import sys
import subprocess
from PIL import Image
# from ssim import compute_ssim
from webm import handlers
from webm import decode
import time
import shutil
from collections import defaultdict
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# from memory_profiler import profile
def image_pixel_type_detection(width, height):
if width * height < 5000:
return 'Tiny'
elif (5000 <= width * height < 50000) or (width * height >= 5000 and (width <= 150 or height <= 150)):
return 'Small'
elif (50000 <= width * height < 250000) and (width >= 150 and height >= 150):
return 'Middle'
elif width * height >= 250000 and width >= 150 and height >= 150:
return 'Large'
else:
return 'Unknown'
def image_type_detection(body):
"""Reture Real Type of body
"""
common_type = 'unknown'
if not body:
return '-'
image_fp = StringIO(body)
common_type = what(image_fp)
if common_type:
return common_type
else:
try:
data = image_fp.read()
width, height = decode.GetInfo(data)
ob = handlers.WebPHandler(bytearray(data), width, height)
if ob.is_valid:
common_type = 'webp'
except Exception as e:
pass
finally:
return common_type
def get_image_info(real_image_type, body):
""" only in webp, gif, png, jpeg, bmp
"""
image_fp = StringIO(body)
if real_image_type == 'webp':
data = image_fp.read()
width, height = decode.GetInfo(data)
image_pix_count = int(width) * int(height)
else:
image = Image.open(image_fp)
width, height = image.size
image_pix_count = width * height
image_fp.seek(0)
md5_code = md5(image_fp.read()).hexdigest()
return md5_code, width, height, image_pix_count
def compress_image_by_webp(body, ):
""" Compress image and return runtime
"""
try:
with open("cal_image", 'w') as w:
w.write(body)
FNULL = open(os.devnull, 'w')
start = time.time()
subprocess.call(["cwebp", "-q", "50", "cal_image", "-o", "zip_image_50.webp"], stdout=FNULL,
stderr=subprocess.STDOUT)
end = time.time()
zip_size_50 = os.stat("zip_image_50.webp").st_size
md5_code_50 = md5(open("zip_image_50.webp").read()).hexdigest()
run_time_50 = end - start
start = time.time()
subprocess.call(["cwebp", "-q", "70", "cal_image", "-o", "zip_image_70.webp"], stdout=FNULL,
stderr=subprocess.STDOUT)
end = time.time()
zip_size_70 = os.stat("zip_image_70.webp").st_size
md5_code_70 = md5(open("zip_image_70.webp").read()).hexdigest()
run_time_70 = end - start
start = time.time()
subprocess.call(["cwebp", "-q", "75", "cal_image", "-o", "zip_image_75.webp"], stdout=FNULL,
stderr=subprocess.STDOUT)
end = time.time()
zip_size_75 = os.stat("zip_image_75.webp").st_size
md5_code_75 = md5(open("zip_image_75.webp").read()).hexdigest()
run_time_75 = end - start
except Exception as e:
logging.info("error {} type:{}".format(e))
zip_size_50, md5_code_50, run_time_50, = '-', '-', '-'
zip_size_70, md5_code_70, run_time_70, = '-', '-', '-'
zip_size_75, md5_code_75, run_time_75, = '-', '-', '-'
return zip_size_50, md5_code_50, run_time_50, zip_size_70, md5_code_70, run_time_70, zip_size_75, md5_code_75, run_time_75,
##########################################################
def compress_jpeg_file_by_webp75(jpeg_file):
try:
FNULL = open(os.devnull, 'w')
start = time.time()
subprocess.call("cwebp -q 75 {} -o zip_image_75.webp".format(jpeg_file), shell=True, stdout=FNULL,
stderr=subprocess.STDOUT)
end = time.time()
cwebp_time = end - start
FNULL = open(os.devnull, 'w')
start = time.time()
subprocess.call("dwebp zip_image_75.webp -o {}".format("webp.png"), shell=True, stdout=FNULL,
stderr=subprocess.STDOUT)
end = time.time()
dwebp_time = end - start
except Exception as e:
logging.info("error {}".format(e))
cwebp_time, dwebp_time = '-', '-'
return cwebp_time, dwebp_time
def compress_local_jpeg_file_by_webp75(jpeg_file):
try:
FNULL = open(os.devnull, 'w')
start = time.time()
subprocess.call("cp {} cal_image".format(jpeg_file), shell=True, stdout=FNULL,
stderr=subprocess.STDOUT)
end = time.time()
mv_time = end - start
start = time.time()
subprocess.call("cwebp -q 75 cal_image -o zip_image_75.webp", shell=True, stdout=FNULL,
stderr=subprocess.STDOUT)
end = time.time()
cwebp_time = end - start
FNULL = open(os.devnull, 'w')
start = time.time()
subprocess.call("dwebp zip_image_75.webp -o {}".format("webp.png"), shell=True, stdout=FNULL,
stderr=subprocess.STDOUT)
end = time.time()
dwebp_time = end - start
except Exception as e:
logging.info("error {}".format(e))
mv_time,cwebp_time, dwebp_time = '-', '-','-'
return mv_time,cwebp_time, dwebp_time
def compress_jpeg_file_by_ziporxy(filename):
""" Compress image and return runtime
"""
try:
FNULL = open(os.devnull, 'w')
start = time.time()
subprocess.call("./demo_median/demo -f {} -o ziproxy_image.jpg".format(filename), shell=True, stdout=FNULL,
stderr=subprocess.STDOUT)
end = time.time()
zip_time = end - start
except Exception as e:
logging.info("error {}".format(e))
zip_time = '-'
return zip_time
def compress_local_jpeg_file_by_ziporxy(filename):
""" Compress image and return runtime
"""
try:
FNULL = open(os.devnull, 'w')
start = time.time()
subprocess.call("cp {} cal_image".format(filename), shell=True, stdout=FNULL,
stderr=subprocess.STDOUT)
end = time.time()
mv_time = end - start
start = time.time()
subprocess.call("./demo_median/demo -f cal_image -o ziproxy_image.jpg", shell=True, stdout=FNULL,
stderr=subprocess.STDOUT)
end = time.time()
zip_time = end - start
except Exception as e:
logging.info("error {}".format(e))
zip_time = '-'
mv_time = '-'
return zip_time, mv_time
###############################
def compress_image_by_webp75(body, ):
""" Compress image and return runtime
"""
try:
with open("cal_image", 'w') as w:
w.write(body)
FNULL = open(os.devnull, 'w')
start = time.time()
subprocess.call("cwebp -q 75 cal_image -o zip_image_75.webp", shell=True, stdout=FNULL,
stderr=subprocess.STDOUT)
end = time.time()
zip_size_75 = os.stat("zip_image_75.webp").st_size
md5_code_75 = md5(open("zip_image_75.webp").read()).hexdigest()
run_time_75 = end - start
start = time.time()
subprocess.call("dwebp zip_image_75.webp -o web2png.jpg",shell=True, stdout=FNULL,
stderr=subprocess.STDOUT)
end = time.time()
dwebp_time = end - start
except Exception as e:
logging.info("error {}".format(e))
zip_size_75, md5_code_75, run_time_75,dwebp_time = '-', '-', '-','-'
return zip_size_75, md5_code_75, run_time_75,dwebp_time
def compress_image_by_ziporxy(body, ):
""" Compress image and return runtime
"""
try:
with open("cal_image", 'w') as w:
w.write(body)
FNULL = open(os.devnull, 'w')
start = time.time()
subprocess.call("./demo_median/demo -f cal_image -o ziproxy_image.jpg", shell=True, stdout=FNULL,
stderr=subprocess.STDOUT)
end = time.time()
zip_size_zp = os.stat("ziproxy_image.jpg").st_size
md5_code_zp = md5(open("ziproxy_image.jpg").read()).hexdigest()
run_time_zp = end - start
except Exception as e:
logging.info("error {}".format(e))
zip_size_zp, md5_code_zp, run_time_zp, = '-', '-', '-'
return zip_size_zp, md5_code_zp, run_time_zp
###############################################
# @profile
def compute_webp_ssim():
try:
ssim_50 = compute_ssim("cal_image", "zip_image_50.webp")
ssim_70 = compute_ssim("cal_image", "zip_image_70.webp")
ssim_75 = compute_ssim("cal_image", "zip_image_75.webp")
except Exception as e:
logging.info("error {} type:{}".format(e))
ssim_50, ssim_70, ssim_75 = '-', '-', '-'
return ssim_50, ssim_70, ssim_75
def convert_webp_to_png():
"""
convert image by
"""
try:
FNULL = open(os.devnull, 'w')
start = time.time()
subprocess.call(["dwebp", "zip_image.webp", "-o", "web2png.png"], stdout=FNULL,
stderr=subprocess.STDOUT)
end = time.time()
run_time = end - start
except Exception as e:
run_time = '-'
return run_time
def ziprxoy_zip():
"""
convert image by ziproxy
"""
try:
FNULL = open(os.devnull, 'w')
start = time.time()
subprocess.call("./demo/demo -f cal_image -o ziproxy_image", shell=True, stdout=FNULL,
stderr=subprocess.STDOUT)
end = time.time()
run_time = end - start
except Exception as e:
run_time = '-'
return run_time
def cal_ssim(body):
""" Compress image and return runtime
"""
high_ssim, median_ssim, low_ssim = '-', '-', '-'
high_size, median_size, low_size = '-', '-', '-'
try:
with open("ssim_ori_image", 'w') as w:
w.write(body)
FNULL = open(os.devnull, 'w')
subprocess.call("./demo_high/demo -f ssim_ori_image -o ssim_high", shell=True, stdout=FNULL,
stderr=subprocess.STDOUT)
subprocess.call("./demo_median/demo -f ssim_ori_image -o ssim_median", shell=True, stdout=FNULL,
stderr=subprocess.STDOUT)
subprocess.call("./demo_low/demo -f ssim_ori_image -o ssim_low", shell=True, stdout=FNULL,
stderr=subprocess.STDOUT)
high_size = os.stat("ssim_high").st_size
median_size = os.stat("ssim_median").st_size
low_size = os.stat("ssim_low").st_size
high_ssim = compute_ssim("ssim_ori_image", "ssim_high")
median_ssim = compute_ssim("ssim_ori_image", "ssim_median")
low_ssim = compute_ssim("ssim_ori_image", "ssim_low")
# if what("ssim_ori_image") == 'png' and high_ssim < 0.1:
# shutil.copyfile("ssim_ori_image", "save_ori_image")
# shutil.copyfile("ssim_high", "save_high_image")
# shutil.copyfile("ssim_median", "save_median_image")
# shutil.copyfile("ssim_low", "save_low_image")
# print high_ssim, median_ssim, low_ssim
# exit()
except Exception as e:
logging.info("error {} ".format(e))
return high_ssim, median_ssim, low_ssim, high_size, median_size, low_size
def get_ziproxy_total_ssim(body):
size_dict = defaultdict(int)
ssim_dict = defaultdict(float)
try:
with open("ssim_ori_image", 'w') as w:
w.write(body)
FNULL = open(os.devnull, 'w')
for qf in range(5, 100, 5):
subprocess.call("./demo/demo -f ssim_ori_image -o ssim_{} -q {}".format(str(qf), str(qf)), shell=True,
stdout=FNULL, stderr=subprocess.STDOUT)
size_dict[qf] = os.stat("ssim_{}".format(str(qf))).st_size
ssim_dict[qf] = compute_ssim("ssim_ori_image", "ssim_{}".format(str(qf)))
return reduce(lambda x,y: x+"\t"+y ,map(lambda x,y:str(x)+"\t"+str(y),size_dict.values(),ssim_dict.values()))
except Exception as e:
logging.info("ziprxy error {} ".format(e))
if __name__ == "__main__":
# print compute_ssim("/Users/Charles/Desktop/4.jpg", "/Users/Charles/Desktop/70.webp")
# print compute_ssim("/Users/Charles/Desktop/4.jpg", "/Users/Charles/Desktop/50.webp")
print get_ziproxy_total_ssim("1.jpg") | {
"repo_name": "CharlesZhong/Mobile-Celluar-Measure",
"path": "http_parser/image.py",
"copies": "1",
"size": "12607",
"license": "mit",
"hash": 5067858890870215000,
"line_mean": 32.8924731183,
"line_max": 127,
"alpha_frac": 0.5587372095,
"autogenerated": false,
"ratio": 3.3466949827448897,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9396885790166665,
"avg_score": 0.0017092804156452611,
"num_lines": 372
} |
__author__ = 'charles'
from twitter_connection import TwitterAgent
from IRToolKit import TF_IDF
import multiprocessing
seed_list = [
"nytimes",
"BBC",
"TheEconomist",
"CBCNews",
"Forbes",
"CNN",
"washingtonpost",
"Reuters",
"globeandmail",
]
replace_dict = {
"http": ("http", "url"),
"@" : ("@", "id"),
}
def clean_data(input_file):
for line in input_file:
buffer = ""
for word in line.split():
if replace_dict["http"][0] in word:
buffer += replace_dict["http"][1]
buffer += " "
elif replace_dict["@"][0] in word:
buffer += replace_dict["@"][1]
buffer += " "
else:
buffer += word
buffer += " "
def read_tweets_id_list(file_name):
new_list = []
with open(file_name) as f:
for line in f:
new_list.append(line.strip())
return new_list
def thread_worker(seed_id):
agent = TwitterAgent()
agent.request(seed_id)
tweets = agent.get_tweets(seed_id)
with open("inputs/"+seed_id+".tweets", "w") as f:
for tweet in tweets["tweets"]:
f.write(tweet+"\n")
with open("inputs/"+seed_id+".retweets", "w") as f:
for tweet in tweets["retweets"]:
f.write(tweet[0]+": "+tweet[1]+"\n")
with open("inputs/"+seed_id+".tweets") as f:
handler = TF_IDF.DocumentHandler(f, frequency_filter=3)
result = handler.get_term_frequency()
print result
MINING = True
PARALLEL = True
MANY = False
if __name__ == "__main__":
if PARALLEL:
pool = multiprocessing.Pool()
# pool.map(thread_worker, ["CBCCanada", "CBCWorldNews", "nytchangster"])
pool.map(thread_worker, read_tweets_id_list("tmp/twitter_ids.txt"))
elif MANY:
for account in read_tweets_id_list("tmp/twitter_ids.txt"):
seed_id = account
if MINING:
agent = TwitterAgent()
agent.request(seed_id)
tweets = agent.get_tweets(seed_id)
with open("inputs/"+seed_id+".tweets", "w") as f:
for tweet in tweets["tweets"]:
f.write(tweet+"\n")
with open("inputs/"+seed_id+".retweets", "w") as f:
for tweet in tweets["retweets"]:
f.write(tweet[0]+": "+tweet[1]+"\n")
with open("inputs/"+seed_id+".tweets") as f:
handler = TF_IDF.DocumentHandler(f, frequency_filter=3)
result = handler.get_term_frequency()
print result
else:
seed_id = "BBCFood"
if MINING:
agent = TwitterAgent()
agent.request(seed_id)
tweets = agent.get_tweets(seed_id)
with open("inputs/"+seed_id+".tweets", "w") as f:
for tweet in tweets["tweets"]:
f.write(tweet+"\n")
with open("inputs/"+seed_id+".retweets", "w") as f:
for tweet in tweets["retweets"]:
f.write(tweet[0]+": "+tweet[1]+"\n")
with open("inputs/"+seed_id+".tweets") as f:
handler = TF_IDF.DocumentHandler(f, frequency_filter=3)
result = handler.get_term_frequency()
print result
print "done" | {
"repo_name": "TextMiningToolKitTeam/TwitterMining",
"path": "subjective_objective_miner.py",
"copies": "1",
"size": "3335",
"license": "isc",
"hash": 914793429683365400,
"line_mean": 31.3883495146,
"line_max": 80,
"alpha_frac": 0.5166416792,
"autogenerated": false,
"ratio": 3.57449088960343,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.459113256880343,
"avg_score": null,
"num_lines": null
} |
__author__ = 'charles'
import os
def CreateTwitterSet(seed_id):
id_set = set()
twitter_id = "washingtonpost"
with open("data_pool/"+twitter_id+".retweets") as f:
for line in f:
words = line.split(": ")
if len(words[0].split()) == 1:
id_set.add(words[0])
with open("tmp/"+twitter_id+"_retweets_ids.txt", "w") as f:
for word in id_set:
f.write(word+"\n")
def CalculateNumberTweets(folder_path, extension=None):
counter = 0
for filename in os.listdir(folder_path):
if extension is not None:
if extension == filename.split(".")[-1]:
with open(os.path.join(folder_path, filename)) as f:
for line in f:
counter += 1
else:
with open(os.path.join(folder_path, filename)) as f:
for line in f:
counter += 1
return counter
if __name__ == "__main__":
print CalculateNumberTweets("data_pool", "retweets")
| {
"repo_name": "TextMiningToolKitTeam/TwitterMining",
"path": "processing_unit.py",
"copies": "1",
"size": "1048",
"license": "isc",
"hash": -197861354564620320,
"line_mean": 31.75,
"line_max": 68,
"alpha_frac": 0.5209923664,
"autogenerated": false,
"ratio": 3.7971014492753623,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4818093815675362,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Charles'
import sys
import logging
from BaseHTTPServer import BaseHTTPRequestHandler
from StringIO import StringIO
from pprint import pprint
from http_parser.http import HttpStream
from http_parser.reader import SocketReader
from model import HTTP_Requset, HTTP_Response
try:
from http_parser.parser import HttpParser
except ImportError:
from http_parser.pyparser import HttpParser
reload(sys)
sys.setdefaultencoding('utf-8')
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
class Request_Parser():
def __init__(self):
self.parser = HttpParser()
self.len_request = 0
self.len_body = 0
def parse(self, raw_requset):
self.len_request = len(bytearray(raw_requset))
self.parser.execute(raw_requset, self.len_request)
self.len_body = len(bytearray(self.parser.recv_body()))
def get_all_keys(self):
"""Get All the key in request headers."""
return self.parser.get_headers().keys()
def get_keys(self, *args):
header_keys = {}
for key in args:
header_keys[key] = self.parser.get_headers().get(key, '-')
return header_keys
def get_request(self, *args):
values = self.get_keys(*args)
obj = HTTP_Requset(values, self.len_request, self.len_body)
return obj
def get_body(self):
return self.parser.recv_body()
class Response_Parser():
def __init__(self):
self.parser = HttpParser()
self.len_response = 0
self.len_body = 0
self.body = None
def parse(self, raw_response):
self.len_response = len(bytearray(raw_response))
self.parser.execute(raw_response, self.len_response)
self.body = self.parser.recv_body()
self.len_body = len(bytearray(self.body))
def get_all_keys(self):
"""Get All the key in request headers."""
return self.parser.get_headers().keys()
def get_keys(self, *args):
header_keys = {}
for key in args:
header_keys[key] = self.parser.get_headers().get(key, '-')
return header_keys
def get_reponse(self, *args):
values = self.get_keys(*args)
status_code = self.parser.get_status_code()
obj = HTTP_Response(status_code, values, self.len_response, self.len_body)
return obj
def get_body(self):
return self.body
| {
"repo_name": "CharlesZhong/Mobile-Celluar-Measure",
"path": "http_parser/parser.py",
"copies": "1",
"size": "2393",
"license": "mit",
"hash": -5795212457326023000,
"line_mean": 28.1829268293,
"line_max": 82,
"alpha_frac": 0.6372753865,
"autogenerated": false,
"ratio": 3.670245398773006,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4807520785273006,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.