text stringlengths 0 1.05M | meta dict |
|---|---|
__author__ = 'a_medelyan'
import os
# class to hold our test instance (document plus its correct manual keywords)
class TestDoc:
def __init__(self, name):
self.name = name
self.text = ''
self.keywords = []
# reading documents and their keywords from a directory
def read_data(input_dir):
test_set = {}
for doc in os.listdir(input_dir):
file_reader = open(os.path.join(input_dir,doc), 'r')
file_name = doc[:-4]
if file_name not in test_set:
d = TestDoc(file_name)
else:
d = test_set[file_name]
if not doc.endswith(".txt"):
continue
# get document text
text = file_reader.read()
d.text = text
# get document keywords
file_reader = open(os.path.join(input_dir,file_name + ".key"), 'r')
manual_keywords = file_reader.read()
for line in manual_keywords.split('\n'):
line = line.rstrip().lower()
if len(line) > 0:
if '\t' in line:
d.keywords.append(line[0:line.find('\t')])
else:
d.keywords.append(line)
# add document to test set
test_set[file_name] = d
return test_set | {
"repo_name": "azhar3339/RAKE-tutorial",
"path": "test_data.py",
"copies": "7",
"size": "1260",
"license": "mit",
"hash": -4995581503702450000,
"line_mean": 25.2708333333,
"line_max": 77,
"alpha_frac": 0.5357142857,
"autogenerated": false,
"ratio": 3.7837837837837838,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7819498069483783,
"avg_score": null,
"num_lines": null
} |
__author__ = 'a_medelyan'
import test_data
import rake
import sys
# reading a directory with test documents
input_dir = sys.argv[1]
# number of top ranked keywords to evaluate
top = int(sys.argv[2])
test_set = test_data.read_data(input_dir)
best_fmeasure = 0
best_vals = []
for min_char_length in range(3,8):
for max_words_length in range(3,6):
for min_keyword_frequency in range(1,7):
rake_object = rake.Rake("SmartStoplist.txt", min_char_length, max_words_length, min_keyword_frequency)
total_fmeasure = 0
for test_doc in test_set.values():
keywords = rake_object.run(test_doc.text)
num_manual_keywords = len(test_doc.keywords)
correct = 0
try:
for i in range(0,min(top, len(keywords))):
if keywords[i][0] in set(test_doc.keywords):
correct += 1
except IndexError:
print("Problem with evaluating ", keywords)
precision = correct/float(top)
recall = correct/float(num_manual_keywords)
if precision > 0 and recall > 0:
total_fmeasure += 2*precision*recall/(precision + recall)
avg_fmeasure = round(total_fmeasure*100/float(len(test_set)), 2)
if avg_fmeasure > best_fmeasure:
best_fmeasure = avg_fmeasure
best_vals = [min_char_length, max_words_length, min_keyword_frequency]
print(min_char_length, max_words_length, min_keyword_frequency, "\t", avg_fmeasure)
print("")
print("Best result at ", best_fmeasure)
print("with\tmin_char_length", best_vals[0])
print("\tmax_words_length", best_vals[1])
print("\tmin_keyword_frequency", best_vals[2]) | {
"repo_name": "beallej/event-detection",
"path": "Keywords_Wordnet/RAKEtutorialmaster/optimize_rake.py",
"copies": "1",
"size": "1811",
"license": "mit",
"hash": -8717904885476824000,
"line_mean": 33.1886792453,
"line_max": 114,
"alpha_frac": 0.5875207068,
"autogenerated": false,
"ratio": 3.643863179074447,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47313838858744467,
"avg_score": null,
"num_lines": null
} |
__author__ = 'a_medelyan'
import rake
import operator
import sys
# EXAMPLE ONE - SIMPLE
stoppath = "SmartStoplist.txt"
# 1. initialize RAKE by providing a path to a stopwords file
rake_object = rake.Rake(stoppath, 5, 3, 4)
# 2. run on RAKE on a given text
sample_file = open(sys.argv[1], 'r')
text = sample_file.read()
keywords = rake_object.run(text)
# 3. print results
print "Keywords:", keywords
print "----------"
# EXAMPLE TWO - BEHIND THE SCENES (from https://github.com/aneesha/RAKE/rake.py)
# 1. initialize RAKE by providing a path to a stopwords file
rake_object = rake.Rake(stoppath)
text = "Compatibility of systems of linear constraints over the set of natural numbers. Criteria of compatibility " \
"of a system of linear Diophantine equations, strict inequations, and nonstrict inequations are considered. " \
"Upper bounds for components of a minimal set of solutions and algorithms of construction of minimal generating"\
" sets of solutions for all types of systems are given. These criteria and the corresponding algorithms " \
"for constructing a minimal supporting set of solutions can be used in solving all the considered types of " \
"systems and systems of mixed types."
# 1. Split text into sentences
sentenceList = rake.split_sentences(text)
for sentence in sentenceList:
print "Sentence:", sentence
# generate candidate keywords
stopwordpattern = rake.build_stop_word_regex(stoppath)
phraseList = rake.generate_candidate_keywords(sentenceList, stopwordpattern)
print "Phrases:", phraseList
# calculate individual word scores
wordscores = rake.calculate_word_scores(phraseList)
# generate candidate keyword scores
keywordcandidates = rake.generate_candidate_keyword_scores(phraseList, wordscores)
for candidate in keywordcandidates.keys():
print "Candidate: ", candidate, ", score: ", keywordcandidates.get(candidate)
# sort candidates by score to determine top-scoring keywords
sortedKeywords = sorted(keywordcandidates.iteritems(), key=operator.itemgetter(1), reverse=True)
totalKeywords = len(sortedKeywords)
# for example, you could just take the top third as the final keywords
for keyword in sortedKeywords[0:(totalKeywords / 3)]:
print "Keyword: ", keyword[0], ", score: ", keyword[1]
print rake_object.run(text)
| {
"repo_name": "geoff111/AnnualReports",
"path": "RAKE-tutorial/rake_tutorial.py",
"copies": "1",
"size": "2312",
"license": "mit",
"hash": -1114474855520476400,
"line_mean": 35.125,
"line_max": 120,
"alpha_frac": 0.7525951557,
"autogenerated": false,
"ratio": 3.729032258064516,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4981627413764516,
"avg_score": null,
"num_lines": null
} |
__author__ = 'a_medelyan'
import rake
import operator
# EXAMPLE ONE - SIMPLE
stoppath = "SmartStoplist.txt"
# 1. initialize RAKE by providing a path to a stopwords file
rake_object = rake.Rake(stoppath, 5, 3, 4)
# 2. run on RAKE on a given text
# sample_file = open("data/docs/fao_test/w2167e.txt", 'r')
sample_file = open("/home/joseph/github/RAKE-tutorial/data/docs/automatic_bug_repair", 'r')
text = sample_file.read()
# keywords = rake_object.run(text)
# 3. print results
# print "Keywords:", keywords
# print "----------"
# EXAMPLE TWO - BEHIND THE SCENES (from https://github.com/aneesha/RAKE/rake.py)
# 1. initialize RAKE by providing a path to a stopwords file
rake_object = rake.Rake(stoppath)
# text = "Compatibility of systems of linear constraints over the set of natural numbers. Criteria of compatibility " \
# "of a system of linear Diophantine equations, strict inequations, and nonstrict inequations are considered. " \
# "Upper bounds for components of a minimal set of solutions and algorithms of construction of minimal generating"\
# " sets of solutions for all types of systems are given. These criteria and the corresponding algorithms " \
# "for constructing a minimal supporting set of solutions can be used in solving all the considered types of " \
# "systems and systems of mixed types."
# 1. Split text into sentences
sentenceList = rake.split_sentences(text)
for sentence in sentenceList:
print "Sentence:", sentence
# generate candidate keywords
stopwordpattern = rake.build_stop_word_regex(stoppath)
phraseList = rake.generate_candidate_keywords(sentenceList, stopwordpattern)
print "Phrases:", phraseList
# calculate individual word scores
wordscores = rake.calculate_word_scores(phraseList)
# generate candidate keyword scores
keywordcandidates = rake.generate_candidate_keyword_scores(phraseList, wordscores)
for candidate in keywordcandidates.keys():
print "Candidate: ", candidate, ", score: ", keywordcandidates.get(candidate)
# sort candidates by score to determine top-scoring keywords
sortedKeywords = sorted(keywordcandidates.iteritems(), key=operator.itemgetter(1), reverse=True)
totalKeywords = len(sortedKeywords)
# for example, you could just take the top third as the final keywords
for keyword in sortedKeywords[0:(totalKeywords / 3)]:
print "Keyword: ", keyword[0], ", score: ", keyword[1]
print rake_object.run(text)
| {
"repo_name": "scorpiovn/RAKE-tutorial",
"path": "rake_tutorial.py",
"copies": "1",
"size": "2433",
"license": "mit",
"hash": -731747729208282800,
"line_mean": 37.015625,
"line_max": 122,
"alpha_frac": 0.7464036169,
"autogenerated": false,
"ratio": 3.6044444444444443,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.984056414563014,
"avg_score": 0.0020567831428607956,
"num_lines": 64
} |
__author__ = 'Ameen Tayyebi'
from preprocessor import *
class Generator:
# Whitespace character used to indent the output (switch to tab if desired)
indent_white_space = ' '
output_file_path = ""
module_name = ""
preprocessed_classes = []
preprocessed_enums = []
def __init__(self, preprocessed_classes, preprocessed_enums, output_file_path, module_name):
self.output_file_path = output_file_path
self.module_name = module_name
self.preprocessed_classes = preprocessed_classes
self.preprocessed_enums = preprocessed_enums
def write(self):
# Open output file
output_file = open(self.output_file_path, 'w')
self.write_module(output_file)
# Close the output file
output_file.close()
def write_module(self, output_file):
# Write module declaration
if self.module_name != '':
output_file.write('declare module %s {\r\n' % self.module_name)
# Process all enums
for enum in self.preprocessed_enums:
self.write_enum(enum, output_file, 1)
# Process all classes
for cls in self.preprocessed_classes:
self.write_class(cls, output_file, 1)
if self.module_name != "":
output_file.write("}\r\n")
def write_enum(self, enum, output_file, indent_level):
indent = Generator.get_indent(indent_level)
output_file.write('%sexport enum %s {\r\n' % (indent, enum['name']))
for enum_value in enum['values']:
output_file.write('%s%s = %s,\r\n' %
(Generator.get_indent(indent_level + 1), enum_value['name'], enum_value['value']))
output_file.write('%s}\r\n' % indent)
def write_class(self, cls, output_file, indent_level):
if cls['name'] == Preprocessor.ignore_tag:
return
indent = Generator.get_indent(indent_level)
if len(cls['inherits']) > 0:
# if there is multiple inheritance in the original C++, we choose the very first class since
# TypeScript does not allow multiple inheritance
output_file.write('%sexport class %s extends %s.%s {\r\n' %
(indent, cls['name'], self.module_name, cls['inherits'][0]['class']))
else:
output_file.write('%sexport class %s {\r\n' % (indent, cls['name']))
# Generate methods
for method in sorted(cls['methods']['public'], key=lambda m: m['name']):
Generator.write_method(method, output_file, indent_level + 1)
# Generate setters and getter for public properties
for prop in cls['properties']['public']:
Generator.write_property(prop, output_file, indent_level + 1)
output_file.write('%s}\r\n' % indent)
# Generate nested classes. In TypeScript, there are no nested classes, so
# the way to declare them is via a module that has the same name as the
# outer class
if len(cls['nested_classes']) > 0:
output_file.write('%sexport module %s {\r\n' % (indent, cls['name']))
for nested_class in cls['nested_classes']:
self.write_class(nested_class, output_file, indent_level + 1)
output_file.write('%s}\r\n' % indent)
''' Write getter and setter for properties '''
@staticmethod
def write_property(prop, output_file, indent_level):
# Ignore malformed properties
if prop['name'] == Preprocessor.ignore_tag:
return
indent = Generator.get_indent(indent_level)
output_file.write('%sget_%s():%s;\r\n' % (indent, prop['name'], prop['type']))
output_file.write('%sset_%s(value:%s):void;\r\n' % (indent, prop['name'], prop['type']))
@staticmethod
def write_method(method, output_file, indent_level):
indent = Generator.get_indent(indent_level)
# If the preprocessor indicates that the method should be ignored, then do so
if method['name'] == Preprocessor.ignore_tag:
return
# Ignore destructors
if method['destructor']:
return
# Generate list of parameters
param_list = ','.join('%s:%s' % (arg['name'], arg['type']) for arg in method['parameters'])
# Print composed signature
if method['constructor']:
output_file.write('%sconstructor(%s);\r\n' % (indent, param_list))
else:
output_file.write('%s%s%s(%s):%s;\r\n' % (
indent,
('static ' if method['static'] else ''),
method['name'],
param_list, method['rtnType']))
@staticmethod
def get_indent(level):
return level * Generator.indent_white_space | {
"repo_name": "ameent/c2t",
"path": "generator.py",
"copies": "1",
"size": "4780",
"license": "apache-2.0",
"hash": 1536937414946798800,
"line_mean": 33.6449275362,
"line_max": 112,
"alpha_frac": 0.5849372385,
"autogenerated": false,
"ratio": 3.966804979253112,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5051742217753112,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Ameen Tayyebi'
import CppHeaderParser
import re
class Preprocessor:
# Methods that need to be ignored will be renamed to this string
ignore_tag = '____ignore____'
headers = []
module_name = ""
def __init__(self, module_name):
self.module_name = module_name
def add_header(self, header):
if not isinstance(header, CppHeaderParser.CppHeader):
raise "Must provide a CppHeader"
self.headers.append(header)
def preprocess(self):
if len(self.headers) == 0:
raise "No headers have been added to the preprocessor"
processed_classes = []
processed_enums = []
for header in self.headers:
classes, enums = self.preprocess_header(header, self.module_name)
processed_classes += classes
processed_enums += enums
return processed_classes, processed_enums
@staticmethod
def preprocess_header(header, module_name):
processed_classes = []
processed_enums = []
classes_with_nested_types = {}
for cls in header.classes.values():
if Preprocessor.preprocess_class(cls, header, classes_with_nested_types, module_name):
processed_classes.append(cls)
for enum in header.global_enums.values():
processed_enums.append(enum)
return processed_classes, processed_enums
@staticmethod
def preprocess_class(cls, header, classes_with_nested_types, module_name):
# Ignore unions
if cls['name'].startswith('union ') or cls['name'] == 'union':
cls['name'] = Preprocessor.ignore_tag
return False
# If we are dealing with an anonymous class definition, then ignore it
if cls['name'] == '':
cls['name'] = Preprocessor.ignore_tag
return False
# Change :: to . to match Typescript syntax
cls['name'] = cls['name'].replace('::', '.')
# Track the fact that this class has nested types
if len(cls['nested_classes']) > 0:
classes_with_nested_types[cls['name']] = True
# Go over all the methods that are public and change their parameter types to Typescript syntax
for method in cls['methods']['public']:
Preprocessor.process_method(method, header, classes_with_nested_types, module_name)
# Process properties now
for prop in cls['properties']['public']:
# Ignore properties that have no name. This can happen if someone yawns with eyes open, or
# if an anonymous union is defined inside a class, for example:
# union { void* m_internalInfo1; int m_internalTmpValue;};
if prop['name'] == '':
prop['name'] = Preprocessor.ignore_tag
# Fix type
prop['type'] = Preprocessor.convert_type(prop['type'], header, classes_with_nested_types, module_name)
# Inheritance chain
for parent in cls['inherits']:
parent['class'] = Preprocessor.clean_template(parent['class']).replace('::', '.')
# Recurse on nested classes
for nested_cls in cls['nested_classes']:
Preprocessor.preprocess_class(nested_cls, header, classes_with_nested_types, module_name)
return True
@staticmethod
def process_method(method, header, classes_with_nested_types, module_name):
# If the method is already processed (it could occur when we are processing
# class hierarchies since subclasses use the same object instance to define
# a method that they have inherited from a parent; the same method class instance
# is shared with all subclasses)
if method.get('processed'):
return
# If method is pure virtual, then ignore it because there is no way to directly call it from JavaScript
if method.get('pure_virtual') and method['pure_virtual']:
method['name'] = Preprocessor.ignore_tag
return
# Fix return type of method
if method.get('returns_unknown'):
# This could happen if the class is templated and a templated type is returned
method['rtnType'] = 'any'
else:
method['rtnType'] = Preprocessor.convert_type(method['rtnType'], header,
classes_with_nested_types, module_name)
# Is this an operator? If so, the key 'operator' will be truthy and
# hold the value of the operator, for example '='
# This code assumes that the bindings use the same format as those of Emscripten
if method['operator']:
swaps = {
'=': 'op_set',
'+': 'op_add',
'-': 'op_sub',
'*': 'op_mul',
'/': 'op_div',
'[]': 'op_get',
'==': 'op_eq'
}
if swaps.get(method['operator']):
method['name'] = swaps[method['operator']]
else:
# If we are dealing with an unsupported operator, then ignore it
method['name'] = Preprocessor.ignore_tag
# For each of the parameters, fix the types and names
arg_index = 0
for arg in method['parameters']:
# Sometimes CppHeaderParser parses the reference character & into the name of the argument
arg['name'] = arg['name'].replace('&', '')
# If the parameter has no name, then create a generic one for it
if arg['name'] == '':
arg['name'] = 'arg' + str(arg_index)
arg_index += 1
# Remove any templates if present (e.g. SomeClass<SomeType>)
if arg.get('template'):
arg['type'] = Preprocessor.clean_template(arg['type'])
# Fix the type
arg['type'] = Preprocessor.convert_type(arg['type'], header, classes_with_nested_types, module_name)
method['processed'] = True
@staticmethod
def convert_type(t, header, classes_with_nested_types, module_name):
# Clean signage, const, etc.
t = Preprocessor.clean_type(t)
# Check to see if the type is a typedef and if so, convert it to its equivalent
if header.typedefs.get(t):
t = header.typedefs[t]
# Again, clean signage, etc. since the typedef may have resolved to something
# with the unneeded extras
t = Preprocessor.clean_type(t)
# Remove any templated parameters
t = Preprocessor.clean_template(t)
# Finally convert if it is a built-in type
t = Preprocessor.swap_builtin_types(t)
# If the type is a class that has nested types, we'll append the name of the global
# module before it so that TypeScript does not get confused. Since TypeScript does not have
# true nested class support, nested classes are printed as follows:
# C++:
# class Outer {
# class Inner {};
# };
# TypeScript:
# class Outer {
# }
# module Outer {
# class Inner {
# }
# }
# Now, a reference to the Outer class may be ambiguous between the Outer class and Outer module.
if classes_with_nested_types.get(t):
t = module_name + "." + t
return t
@staticmethod
def clean_template(t):
return re.sub(r'<.*>', '', t)
@staticmethod
def swap_builtin_types(t):
swapper = {
'int': 'number',
'float': 'number',
'double': 'number',
'bool': 'boolean',
'char': 'string',
'short': 'number',
'long': 'number',
'unsigned': 'number'
}
if swapper.get(t):
t = swapper[t]
return t
@staticmethod
def clean_type(t):
# Unsigned char * and void * are raw pointers and we don't have any equivalents in Typescript
return t.replace('unsigned char *', 'any').replace('void *', 'any') \
.replace('inline', '').replace('const ', '') \
.replace('struct ', '').replace('static ', '') \
.replace('class ', '').replace('unsigned ', '') \
.replace('mutable ', '').replace('short ', '') \
.replace('long ', '').replace('friend ', '') \
.replace('&', '').replace('*', '') \
.replace(' ', '').replace('::', '.')
| {
"repo_name": "ameent/c2t",
"path": "preprocessor.py",
"copies": "1",
"size": "8512",
"license": "apache-2.0",
"hash": 5977455937693071000,
"line_mean": 35.6896551724,
"line_max": 114,
"alpha_frac": 0.5659069549,
"autogenerated": false,
"ratio": 4.387628865979382,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0010823941587076692,
"num_lines": 232
} |
__author__ = 'Ameen Tayyebi'
import os
import CppHeaderParser
from preprocessor import *
from generator import *
class Translator:
# List of header parsers
parsers = []
# Name of output file to produce
output_file_name = ""
# Location of header files to be parsed
header_folder = ""
# List of directories to be excluded in the header folder
excluded_folders = []
# Name of top-level module name
module_name = ""
# Preprocessor results
processed_classes = []
processed_enums = []
def __init__(self, output_name, header_directory, output_module_name, folders_to_exclude):
"""
Creates a new header translator instance.
:param output_name: Name of the output file name where the result will be written.
:param header_directory: Folder which will be scouted for .h and .hpp files to be used as input
:param output_module_name: An optional name for a module will be prepended to all generated classes and interfaces
:param folders_to_exclude: Subdirectories that should be excluded when the header_folder is being searched
for .h and .hpp files
"""
self.output_file_name = output_name
self.header_folder = header_directory
self.module_name = output_module_name
self.excluded_folders = folders_to_exclude
def parse(self):
print "--> Parsing header files in ", self.header_folder
self.initialize_parsers()
print "--> Parsing finished"
def preprocess(self):
print "--> Preprocessing headers"
preprocessor = Preprocessor(self.module_name)
for parser in self.parsers:
preprocessor.add_header(parser)
self.processed_classes, self.processed_enums = preprocessor.preprocess()
print "--> Preprocessing finished"
def dump(self):
print "--> Generating ", self.output_file_name
generator = Generator(self.processed_classes, self.processed_enums, self.output_file_name, self.module_name)
generator.write()
print "--> Output successfully generated"
@staticmethod
def grab_folder_exclusions():
""" Grabs a list of folders to exclude from the parsed folder """
folders_to_exclude = []
exclude_prompt = "Enter name of folder to be excluded from headers: (empty string to ignore) "
exclude_folder = raw_input(exclude_prompt)
# Keep asking the user for excluded folders until empty string is entered
while exclude_folder != "":
folders_to_exclude.append(exclude_folder)
exclude_folder = raw_input(exclude_prompt)
return folders_to_exclude
def initialize_parsers(self):
""" Parse out all header file in the given directory """
for path, directories, files in os.walk(self.header_folder):
# Parse header files
f = ""
try:
for f in files:
if f.endswith(".h") or f.endswith(".hpp"):
self.parsers.append(CppHeaderParser.CppHeader(os.path.join(path, f)))
except Exception as e:
print "Error parsing file: ", os.path.join(path, f), e
# Make sure excluded folders are not traversed
for excluded_folder in self.excluded_folders:
if excluded_folder in directories:
directories.remove(excluded_folder) | {
"repo_name": "ameent/c2t",
"path": "translator.py",
"copies": "1",
"size": "3483",
"license": "apache-2.0",
"hash": -837560373749029500,
"line_mean": 32.8252427184,
"line_max": 122,
"alpha_frac": 0.6299167384,
"autogenerated": false,
"ratio": 4.517509727626459,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.564742646602646,
"avg_score": null,
"num_lines": null
} |
__author__ = 'amelie'
from copy import deepcopy
import numpy
from preimage.utils.alphabet import get_index_to_n_gram
from preimage.exceptions.n_gram import InvalidNGramLengthError, InvalidYLengthError, NoThresholdsError
from preimage.exceptions.shape import InvalidShapeError
class EulerianPath:
"""Eulerian path algorithm for the pre-image of the n-gram kernel
Solves the pre-image of the n-gram kernel by finding an Eulerian circuit in a graph. Since the n-gram weights are
rounded to integer values, this might predict an approximation of the exact pre-image. However, this algorithm is
faster than the branch and bound search.
Attributes
----------
n : int
N-gram length.
is_merging_path : bool
True if merges path when the graph is not connected, False otherwise (choose the longest path instead).
min_n_gram_count : int
The minimum number of n-gram in the string to predict.
Notes
-----
This is an implementation of the eulerian path algorithm as defined in [1]_. However, there are multiple ways to
merge the paths together when the graph is not connected. We chose to find the longest path and merge the remaining
paths after, but this might differ from what Cortes et al. did in their work. The n-gram kernel also has a
non-unique pre-image. For instance if the string to predict has the n-grams "ab" and "ba" both "aba" and "bab" are
valid pre-image. In that case we chose randomly a pre-image among the possible ones (this is done by visiting the
edges and the nodes in a random order). Note that the code could also be faster but we only cared about implementing
the algorithm correctly.
References
----------
.. [1] Cortes, Corinna, Mehryar Mohri, and Jason Weston. "A general regression framework for learning
string-to-string mappings." Predicting Structured Data (2007): 143-168.
"""
def __init__(self, alphabet, n, min_length=1, is_merging_path=True):
"""Initialize the eulerian path algorithm
Parameters
----------
alphabet : list
list of letters.
n : int
n-gram length.
min_length :int
Minimum length of the predicted string.
is_merging_path : bool
True if merges path when the graph is not connected, False otherwise (choose the longest path instead).
"""
self.n = int(n)
self.is_merging_path = is_merging_path
self.min_n_gram_count = 1 if min_length <= self.n else min_length - self.n + 1
self._index_to_n_gram = get_index_to_n_gram(alphabet, self.n)
self._n_gram_count = len(alphabet) ** self.n
self._verify_n(self.n)
def _verify_n(self, n):
if n <= 1:
raise InvalidNGramLengthError(n, 1)
def find_eulerian_path(self, n_gram_weights, y_length=None, thresholds=None):
"""Solve the pre-image of n-gram kernel.
Rounds the n_gram_weights to integer values, creates a graph with the predicted n-grams, and predicts the output
string by finding an eulerian path in this graph. If the graph is not connected, it will merge multiple paths
together when is_merging_path is True, and predict the longest path otherwise.
Parameters
----------
n_gram_weights : array, shape=[len(alphabet)**n]
Weight of each n-gram.
y_length : int or None
Length of the string to predict. None if thresholds are used.
thresholds :array, shape=[len(alphabet)**n] or None
Threshold value for each n-gram above which the n_gram_weights are rounded to one.
None if y_length is given.
Returns
-------
y: string
The predicted string.
"""
self._verify_weights_length_thresholds(n_gram_weights, y_length, thresholds)
rounded_weights = self._round_weights(n_gram_weights, thresholds, y_length)
n_gram_indexes = numpy.where(rounded_weights > 0)[0]
n_grams = self._get_n_grams_in_selected_indexes(n_gram_indexes, rounded_weights)
y = self._find_y_corresponding_to_n_grams(n_grams)
if y_length is not None:
y = y[0:y_length]
return y
def _verify_weights_length_thresholds(self, n_gram_weights, y_length, thresholds):
if n_gram_weights.shape[0] != self._n_gram_count:
raise InvalidShapeError('n_gram_weights', n_gram_weights.shape, [(self._n_gram_count,)])
if y_length is not None and y_length < self.n:
raise InvalidYLengthError(self.n, y_length)
if thresholds is not None and thresholds.shape[0] != self._n_gram_count:
raise InvalidShapeError('thresholds', thresholds.shape, [(self._n_gram_count,)])
if thresholds is None and y_length is None:
raise NoThresholdsError()
def _round_weights(self, n_gram_weights, thresholds, y_length):
if y_length is None:
rounded_weights = self._round_weights_with_thresholds(n_gram_weights, thresholds)
else:
n_gram_count = y_length - self.n + 1
rounded_weights = self._round_weights_to_n_gram_count(n_gram_weights, n_gram_count)
return rounded_weights
def _round_weights_to_n_gram_count(self, weights, n_gram_count_in_y):
weights_copy = numpy.copy(weights)
weights_copy[weights_copy < 0] = 0.
rounded_weights = numpy.round(weights_copy)
positive_weights = weights_copy[weights > 0]
if rounded_weights.sum() < n_gram_count_in_y:
rounded_weights = self._add_n_grams_to_rounded_sum(weights_copy, positive_weights, n_gram_count_in_y)
return rounded_weights
def _add_n_grams_to_rounded_sum(self, weights_copy, positive_weights, n_gram_count_in_y):
while numpy.round(weights_copy).sum() < n_gram_count_in_y:
multiplicative_factors = (numpy.ceil(positive_weights + 0.5) - 0.49) / positive_weights
min_factor = numpy.min(multiplicative_factors[multiplicative_factors > 1.])
weights_copy = min_factor * weights_copy
positive_weights = min_factor * positive_weights
return numpy.round(weights_copy)
def _round_weights_with_thresholds(self, weights, thresholds):
rounded_weights = numpy.asarray(weights > thresholds, dtype=numpy.int)
non_zero_weight_count = rounded_weights.sum()
# Avoid having zero n gram predicted
if non_zero_weight_count < self.min_n_gram_count:
kth_indexes = numpy.arange(0, self.min_n_gram_count)
best_weight_indexes = numpy.argpartition(-weights, kth_indexes)[0:self.min_n_gram_count]
best_zero_weight_indexes = best_weight_indexes[rounded_weights[best_weight_indexes] == 0]
rounded_weights[best_zero_weight_indexes[0:self.min_n_gram_count - non_zero_weight_count]] = 1
return rounded_weights
def _get_n_grams_in_selected_indexes(self, selected_n_gram_indexes, rounded_weights):
repeated_n_grams = [self._index_to_n_gram[index] for index in selected_n_gram_indexes
for _ in range(int(rounded_weights[index]))]
return numpy.array(repeated_n_grams)
def _find_y_corresponding_to_n_grams(self, n_grams):
nodes, leaving_edges, marked_edges = self._get_nodes_and_edges(n_grams)
if self.is_merging_path:
path = self._merge_best_paths(nodes, leaving_edges, marked_edges, n_grams)
else:
path, marked_edges = self._find_best_path(nodes, leaving_edges, marked_edges, n_grams)
y = self._transform_path_in_word(path)
return y
def _get_nodes_and_edges(self, n_grams):
nodes = numpy.unique([n_gram[j:j + self.n - 1] for n_gram in n_grams for j in range(2)])
nodes = nodes[numpy.random.permutation(nodes.shape[0])]
random_n_grams = n_grams[numpy.random.permutation(n_grams.shape[0])]
leaving_edges = {node: [] for node in nodes}
marked_edges = {node: [] for node in nodes}
self._update_leaving_and_marked_edges(leaving_edges, marked_edges, random_n_grams)
return nodes, leaving_edges, marked_edges
def _update_leaving_and_marked_edges(self, leaving_edges, marked_edges, random_n_grams):
for n_gram in random_n_grams:
leaving_edges[n_gram[0:self.n - 1]].append(n_gram[1:])
marked_edges[n_gram[0:self.n - 1]].append(False)
def _merge_best_paths(self, nodes, leaving_edges, marked_edges, n_grams):
path = []
while len(nodes) > 0:
best_path, marked_edges = self._find_best_path(nodes, leaving_edges, marked_edges, n_grams)
path += best_path
nodes = [node for node, edges in marked_edges.items() if sum(edges) < len(edges)]
return path
def _find_best_path(self, nodes, leaving_edges, marked_edges, n_grams):
best_path = []
best_marked_edges = {}
for node in nodes:
path_marked_edges = deepcopy(marked_edges)
path = [node] + self._euler(node, leaving_edges, path_marked_edges)
best_path, best_marked_edges = self._update_best_path(path, path_marked_edges, best_path, best_marked_edges)
if len(path) == len(n_grams) + 1:
break
return best_path, best_marked_edges
def _euler(self, node, leaving_edges, marked_edges):
path = []
for edge_index, destination_node in enumerate(leaving_edges[node]):
if not marked_edges[node][edge_index]:
marked_edges[node][edge_index] = True
path = [destination_node] + self._euler(destination_node, leaving_edges, marked_edges) + path
return path
def _update_best_path(self, path, path_marked_edges, best_path, best_marked_edges):
if len(path) >= len(best_path):
best_path = path
best_marked_edges = deepcopy(path_marked_edges)
return best_path, best_marked_edges
def _transform_path_in_word(self, path):
y = path[0]
for node in path[1:]:
y += node[-1:]
return y | {
"repo_name": "a-ro/preimage",
"path": "preimage/inference/euler.py",
"copies": "1",
"size": "10176",
"license": "bsd-2-clause",
"hash": 454338347416798900,
"line_mean": 47.2322274882,
"line_max": 120,
"alpha_frac": 0.6411163522,
"autogenerated": false,
"ratio": 3.657800143781452,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47989164959814523,
"avg_score": null,
"num_lines": null
} |
__author__ = 'amelie'
from itertools import product
import numpy
from preimage.exceptions.n_gram import InvalidNGramLengthError
class Alphabet:
latin = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u',
'v', 'w', 'x', 'y', 'z']
def get_n_gram_to_index(alphabet, n):
n_grams = get_n_grams(alphabet, n)
indexes = numpy.arange(len(n_grams))
n_gram_to_index = dict(zip(n_grams, indexes))
return n_gram_to_index
def get_index_to_n_gram(alphabet, n):
n_grams = get_n_grams(alphabet, n)
indexes = numpy.arange(len(n_grams))
index_to_n_gram = dict(zip(indexes, n_grams))
return index_to_n_gram
def get_n_grams(alphabet, n):
n = int(n)
if n <= 0:
raise InvalidNGramLengthError(n)
n_grams = [''.join(n_gram) for n_gram in product(alphabet, repeat=n)]
return n_grams
def transform_strings_to_integer_lists(Y, alphabet):
letter_to_int = get_n_gram_to_index(alphabet, 1)
n_examples = numpy.array(Y).shape[0]
max_length = numpy.max([len(y) for y in Y])
Y_int = numpy.zeros((n_examples, max_length), dtype=numpy.int8) - 1
for y_index, y in enumerate(Y):
for letter_index, letter in enumerate(y):
Y_int[y_index, letter_index] = letter_to_int[letter]
return Y_int | {
"repo_name": "a-ro/preimage",
"path": "preimage/utils/alphabet.py",
"copies": "1",
"size": "1331",
"license": "bsd-2-clause",
"hash": -2887639538992597000,
"line_mean": 28.6,
"line_max": 117,
"alpha_frac": 0.6033057851,
"autogenerated": false,
"ratio": 2.7330595482546203,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.38363653333546205,
"avg_score": null,
"num_lines": null
} |
__author__ = 'amelie'
from math import sqrt
import unittest2
import numpy.testing
from mock import patch
from preimage.kernels.generic_string import GenericStringKernel, element_wise_kernel
class TestGenericStringKernel(unittest2.TestCase):
def setUp(self):
self.setup_alphabet()
self.setup_positions()
self.setup_loader()
self.setup_similarity()
self.setup_sigma_c_similarity()
def setup_alphabet(self):
self.alphabet = ['a', 'b']
self.aba = ['aba']
self.bbbb = ['bbbb']
self.aba_bbbb = ['aba', 'bbbb']
self.aba_int = numpy.array([[0, 1, 0]], dtype=numpy.int8)
self.bbbb_int = numpy.array([[1, 1, 1, 1]], dtype=numpy.int8)
self.aba_bbbb_int = numpy.array([[0, 1, 0, -1], [1, 1, 1, 1]],
dtype=numpy.int8)
self.string_to_int_patch = patch('preimage.kernels.generic_string.transform_strings_to_integer_lists')
def setup_positions(self):
self.small_sigma = 1e-8
self.medium_sigma = 1
self.large_sigma = 1e8
self.positions_small_sigma = numpy.eye(4)
self.positions_medium_sigma = numpy.array([[1, 0.5, 0, 0], [0.5, 1, 0.5, 0], [0, 0.5, 1, 0.5], [0, 0, 0.5, 1]])
self.positions_large_sigma = numpy.ones((4, 4))
self.position_patch = patch('preimage.kernels.generic_string.compute_position_weights_matrix')
def setup_loader(self):
self.loader_patch = patch('preimage.kernels.generic_string.load_amino_acids_and_descriptors')
self.loader_patch.start().return_value = (['a', 'b'], [[1., 3], [3, 2]])
def setup_similarity(self):
self.aba_small_sigma_similarity = [3]
self.bbbb_medium_sigma_similarity = [7]
self.aba_large_sigma_similarity = [5]
self.aba_large_sigma_p_sigma_c_similarity = [9]
self.bbbb_two_gram_large_sigma_similarity = [9]
self.aba_bbbb_small_sigma_similarity = [3, 4]
def setup_sigma_c_similarity(self):
normalized_descriptors = numpy.array([[1. / sqrt(10), 3. / sqrt(10)], [3. / sqrt(13), 2. / sqrt(13)]])
a_b_distance = (normalized_descriptors[0, 0] - normalized_descriptors[1, 0]) ** 2 + \
(normalized_descriptors[0, 1] - normalized_descriptors[1, 1]) ** 2
a_b_sigma_c_similarity = numpy.exp(-a_b_distance / 2)
ab_ba_two_gram_medium_sigma_c_similarity = numpy.exp(-a_b_distance / 2) ** 2
self.aba_medium_sigma_c_similarity = 5 + 4 * a_b_sigma_c_similarity
self.aba_two_gram_medium_sigma_c_similarity = self.aba_medium_sigma_c_similarity + 2 + \
2 * ab_ba_two_gram_medium_sigma_c_similarity
self.aba_bbbb_medium_sigma_c_small_sigma_p = 1 + 2 * a_b_sigma_c_similarity
self.aba_bbbb_normalized_small_sigma_similarity = 1. / sqrt(3 * 4)
self.aba_bbbb_gram_matrix_normalized = [[1., self.aba_bbbb_normalized_small_sigma_similarity],
[self.aba_bbbb_normalized_small_sigma_similarity, 1.]]
def test_one_gram_one_x_small_sigma_element_wise_gs_kernel_returns_expected_value(self):
self.position_patch.start().return_value = self.positions_small_sigma
self.string_to_int_patch.start().return_value = self.aba_int
similarities = element_wise_kernel(self.aba, self.small_sigma, n=1, alphabet=self.alphabet)
numpy.testing.assert_array_equal(similarities, self.aba_small_sigma_similarity)
def test_one_gram_two_x_small_sigma_element_wise_gs_kernel_returns_expected_values(self):
self.position_patch.start().return_value = self.positions_small_sigma
self.string_to_int_patch.start().return_value = self.aba_bbbb_int
similarities = element_wise_kernel(self.aba_bbbb, self.small_sigma, n=1, alphabet=self.alphabet)
numpy.testing.assert_array_equal(similarities, self.aba_bbbb_small_sigma_similarity)
def test_one_gram_one_x_large_sigma_element_wise_gs_kernel_returns_expected_value(self):
self.position_patch.start().return_value = self.positions_large_sigma
self.string_to_int_patch.start().return_value = self.aba_int
similarities = element_wise_kernel(self.aba, self.large_sigma, n=1, alphabet=self.alphabet)
numpy.testing.assert_array_equal(similarities, self.aba_large_sigma_similarity)
def test_one_gram_one_x_medium_sigma_element_wise_gs_kernel_returns_expected_value(self):
self.position_patch.start().return_value = self.positions_medium_sigma
self.string_to_int_patch.start().return_value = self.bbbb_int
similarities = element_wise_kernel(self.bbbb, self.medium_sigma, n=1, alphabet=self.alphabet)
numpy.testing.assert_array_equal(similarities, self.bbbb_medium_sigma_similarity)
def test_two_gram_one_x_small_sigma_element_wise_gs_kernel_returns_expected_value(self):
self.position_patch.start().return_value = self.positions_large_sigma
self.string_to_int_patch.start().return_value = self.bbbb_int
similarities = element_wise_kernel(self.bbbb, self.large_sigma, n=2, alphabet=self.alphabet)
numpy.testing.assert_array_equal(similarities, self.bbbb_two_gram_large_sigma_similarity)
def test_same_string_normalized_gs_kernel_returns_one(self):
self.position_patch.start().return_value = self.positions_small_sigma
self.string_to_int_patch.start().side_effect = [self.aba_int, self.aba_int]
kernel = GenericStringKernel()
gram_matrix = kernel(self.aba, self.aba)
numpy.testing.assert_array_equal(gram_matrix, [[1]])
def test_same_string_not_normalized_small_sigma_position_gs_kernel_returns_expected_value(self):
self.position_patch.start().return_value = self.positions_small_sigma
self.string_to_int_patch.start().side_effect = [self.aba_int, self.aba_int]
kernel = GenericStringKernel(is_normalized=False, sigma_position=self.small_sigma, n=1)
gram_matrix = kernel(self.aba, self.aba)
numpy.testing.assert_array_equal(gram_matrix, [self.aba_small_sigma_similarity])
def test_same_string_not_normalized_large_sigma_p_small_sigma_c_gs_kernel_returns_expected_value(self):
self.position_patch.start().return_value = self.positions_large_sigma
self.string_to_int_patch.start().side_effect = [self.aba_int, self.aba_int]
kernel = GenericStringKernel(is_normalized=False, sigma_position=self.large_sigma,
sigma_amino_acid=self.small_sigma, n=1)
gram_matrix = kernel(self.aba, self.aba)
numpy.testing.assert_array_equal(gram_matrix, [self.aba_large_sigma_similarity])
def test_same_string_not_normalized_large_sigma_p_large_sigma_c_gs_kernel_returns_expected_value(self):
self.position_patch.start().return_value = self.positions_large_sigma
self.string_to_int_patch.start().side_effect = [self.aba_int, self.aba_int]
kernel = GenericStringKernel(is_normalized=False, sigma_position=self.large_sigma,
sigma_amino_acid=self.large_sigma, n=1)
gram_matrix = kernel(self.aba, self.aba)
numpy.testing.assert_array_equal(gram_matrix, [self.aba_large_sigma_p_sigma_c_similarity])
def test_same_string_large_sigma_p_medium_sigma_c_gs_kernel_returns_expected_value(self):
self.position_patch.start().return_value = self.positions_large_sigma
self.string_to_int_patch.start().side_effect = [self.aba_int, self.aba_int]
kernel = GenericStringKernel(is_normalized=False, sigma_position=self.large_sigma,
sigma_amino_acid=self.medium_sigma, n=1)
gram_matrix = kernel(self.aba, self.aba)
numpy.testing.assert_almost_equal(gram_matrix, [[self.aba_medium_sigma_c_similarity]])
def test_two_gram_same_string_large_sigma_p_medium_sigma_c_gs_kernel_returns_expected_value(self):
self.position_patch.start().return_value = self.positions_large_sigma
self.string_to_int_patch.start().side_effect = [self.aba_int, self.aba_int]
kernel = GenericStringKernel(is_normalized=False, sigma_position=self.large_sigma,
sigma_amino_acid=self.medium_sigma, n=2)
gram_matrix = kernel(self.aba, self.aba)
numpy.testing.assert_almost_equal(gram_matrix, [[self.aba_two_gram_medium_sigma_c_similarity]])
def test_one_gram_different_string_small_sigma_p_medium_sigma_c_gs_kernel_returns_expected_value(self):
self.position_patch.start().return_value = self.positions_small_sigma
self.string_to_int_patch.start().side_effect = [self.aba_int, self.bbbb_int]
kernel = GenericStringKernel(is_normalized=False, sigma_position=self.small_sigma,
sigma_amino_acid=self.medium_sigma, n=1)
gram_matrix = kernel(self.aba, self.bbbb)
numpy.testing.assert_almost_equal(gram_matrix, [[self.aba_bbbb_medium_sigma_c_small_sigma_p]])
def test_one_gram_normalized_two_strings_small_sigmas_gs_kernel_returns_expected_value(self):
self.position_patch.start().return_value = self.positions_small_sigma
self.string_to_int_patch.start().side_effect = [self.aba_int, self.bbbb_int]
kernel = GenericStringKernel(is_normalized=True, sigma_position=self.small_sigma,
sigma_amino_acid=self.small_sigma, n=1)
gram_matrix = kernel(self.aba, self.bbbb)
numpy.testing.assert_almost_equal(gram_matrix, [[self.aba_bbbb_normalized_small_sigma_similarity]])
def test_one_gram_symmetric_normalized_gs_kernel_returns_expected_value(self):
self.position_patch.start().return_value = self.positions_small_sigma
self.string_to_int_patch.start().side_effect = [self.aba_bbbb_int, self.aba_bbbb_int]
kernel = GenericStringKernel(is_normalized=True, sigma_position=self.small_sigma,
sigma_amino_acid=self.small_sigma, n=1)
gram_matrix = kernel(self.aba_bbbb, self.aba_bbbb)
numpy.testing.assert_almost_equal(gram_matrix, self.aba_bbbb_gram_matrix_normalized)
def test_two_gram_same_string_large_sigma_p_medium_sigma_c_gs_element_wise_kernel_returns_expected_value(self):
self.position_patch.start().return_value = self.positions_large_sigma
self.string_to_int_patch.start().side_effect = [self.aba_int, self.aba_int]
kernel = GenericStringKernel(sigma_position=self.large_sigma, sigma_amino_acid=self.medium_sigma, n=2)
similarities = kernel.element_wise_kernel(self.aba)
numpy.testing.assert_almost_equal(similarities, [self.aba_two_gram_medium_sigma_c_similarity])
def test_one_gram_two_strings_small_sigmas_gs_kernel_returns_expected_value(self):
self.position_patch.start().return_value = self.positions_small_sigma
self.string_to_int_patch.start().return_value = self.aba_bbbb_int
kernel = GenericStringKernel(sigma_position=self.small_sigma, sigma_amino_acid=self.small_sigma, n=1)
gram_matrix = kernel.element_wise_kernel(self.aba_bbbb)
numpy.testing.assert_almost_equal(gram_matrix, self.aba_bbbb_small_sigma_similarity)
if __name__ == '__main__':
unittest2.main() | {
"repo_name": "a-ro/preimage",
"path": "preimage/tests/kernels/test_generic_string.py",
"copies": "1",
"size": "11366",
"license": "bsd-2-clause",
"hash": -2260100661746290200,
"line_mean": 51.8697674419,
"line_max": 119,
"alpha_frac": 0.6679570649,
"autogenerated": false,
"ratio": 3.334115576415371,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4502072641315371,
"avg_score": null,
"num_lines": null
} |
__author__ = 'amelie'
from preimage.features.gs_feature_space import GenericStringFeatureSpace
from preimage.models.model import Model
from preimage.inference.graph_builder import GraphBuilder
from preimage.inference.branch_and_bound import branch_and_bound, branch_and_bound_no_length
from preimage.inference.bound_factory import get_gs_node_creator
from preimage.utils.alphabet import get_n_gram_to_index, get_n_grams
class GenericStringModel(Model):
def __init__(self, alphabet, n, is_using_length=True, seed=42, max_time=30, sigma_position=1.):
Model.__init__(self, alphabet, n, is_using_length)
self._graph_builder = GraphBuilder(alphabet, n)
self._is_normalized = True
self._seed = seed
self._max_time = max_time
self._sigma_position = sigma_position
self._n_grams = list(get_n_grams(alphabet, n))
self._n_gram_to_index = get_n_gram_to_index(alphabet, n)
def fit(self, inference_parameters):
Model.fit(self, inference_parameters)
self._feature_space_ = GenericStringFeatureSpace(self._alphabet, self._n, inference_parameters.Y_train,
self._sigma_position, self._is_normalized)
def predict(self, Y_weights, y_lengths):
if self._is_using_length:
self._verify_y_lengths_is_not_none_when_use_length(y_lengths)
Y_predictions = self._predict_with_length(Y_weights, y_lengths)
else:
Y_predictions = self._predict_without_length(Y_weights)
return Y_predictions
def _predict_with_length(self, Y_weights, y_lengths):
Y_predictions = []
for y_weights, y_length in zip(Y_weights, y_lengths):
n_gram_weights = self._feature_space_.compute_weights(y_weights, y_length)
graph = self._graph_builder.build_graph(n_gram_weights, y_length)
node_creator = get_gs_node_creator(self._n, graph, n_gram_weights, y_length, self._n_gram_to_index,
self._n_grams, self._sigma_position)
y_predicted, y_bound = branch_and_bound(node_creator, y_length, self._alphabet, self._max_time)
Y_predictions.append(y_predicted)
return Y_predictions
def _predict_without_length(self, Y_weights):
Y_predictions = []
for y_weights in Y_weights:
n_gram_weights = self._feature_space_.compute_weights(y_weights, self._max_length_)
graph = self._graph_builder.build_graph(n_gram_weights, self._max_length_)
node_creator = get_gs_node_creator(self._n, graph, n_gram_weights, self._max_length_,
self._n_gram_to_index, self._n_grams, self._sigma_position)
y_predicted, y_bound = branch_and_bound_no_length(node_creator, self._min_length_, self._max_length_,
self._alphabet, self._max_time)
Y_predictions.append(y_predicted)
return Y_predictions | {
"repo_name": "a-ro/preimage",
"path": "preimage/models/generic_string_model.py",
"copies": "1",
"size": "3049",
"license": "bsd-2-clause",
"hash": -964787651570508200,
"line_mean": 53.4642857143,
"line_max": 113,
"alpha_frac": 0.6247950148,
"autogenerated": false,
"ratio": 3.700242718446602,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9809157964135587,
"avg_score": 0.0031759538222029303,
"num_lines": 56
} |
__author__ = 'amelie'
from preimage.features.n_gram_feature_space import NGramFeatureSpace
from preimage.models.model import Model
from preimage.inference.graph_builder import GraphBuilder
from preimage.inference.branch_and_bound import branch_and_bound, branch_and_bound_no_length
from preimage.inference.bound_factory import get_n_gram_node_creator
from preimage.utils.alphabet import get_n_gram_to_index, get_n_grams
class NGramModel(Model):
def __init__(self, alphabet, n, is_using_length=True, seed=42, max_time=30):
Model.__init__(self, alphabet, n, is_using_length)
self._graph_builder = GraphBuilder(list(alphabet), n)
self._is_normalized = True
self._seed = seed
self._max_time = max_time
self._n_grams = list(get_n_grams(alphabet, n))
self._n_gram_to_index = get_n_gram_to_index(alphabet, n)
def fit(self, inference_parameters):
Model.fit(self, inference_parameters)
self._feature_space_ = NGramFeatureSpace(self._alphabet, self._n, inference_parameters.Y_train,
self._is_normalized)
def predict(self, Y_weights, y_lengths):
if self._is_using_length:
self._verify_y_lengths_is_not_none_when_use_length(y_lengths)
Y_predictions = self._predict_with_length(Y_weights, y_lengths)
else:
Y_predictions = self._predict_without_length(Y_weights)
return Y_predictions
def _predict_with_length(self, Y_weights, y_lengths):
Y_predictions = []
for y_weights, y_length in zip(Y_weights, y_lengths):
n_gram_weights = self._feature_space_.compute_weights(y_weights)
graph = self._graph_builder.build_graph(n_gram_weights, y_length)
node_creator = get_n_gram_node_creator(self._n, graph, n_gram_weights, y_length, self._n_gram_to_index,
self._n_grams)
y_predicted, y_bound = branch_and_bound(node_creator, y_length, self._alphabet, self._max_time)
Y_predictions.append(y_predicted)
return Y_predictions
def _predict_without_length(self, Y_weights):
Y_predictions = []
for y_weights in Y_weights:
n_gram_weights = self._feature_space_.compute_weights(y_weights)
graph = self._graph_builder.build_graph(n_gram_weights, self._max_length_)
node_creator = get_n_gram_node_creator(self._n, graph, n_gram_weights, self._max_length_,
self._n_gram_to_index, self._n_grams)
y_predicted, y_bound = branch_and_bound_no_length(node_creator, self._min_length_, self._max_length_,
self._alphabet, self._max_time)
Y_predictions.append(y_predicted)
return Y_predictions | {
"repo_name": "a-ro/preimage",
"path": "preimage/models/n_gram_model.py",
"copies": "1",
"size": "2887",
"license": "bsd-2-clause",
"hash": -4681046931271861000,
"line_mean": 51.5090909091,
"line_max": 115,
"alpha_frac": 0.6172497402,
"autogenerated": false,
"ratio": 3.6452020202020203,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.476245176040202,
"avg_score": null,
"num_lines": null
} |
__author__ = 'amelie'
from preimage.features.weighted_degree_feature_space import WeightedDegreeFeatureSpace
from preimage.inference.graph_builder import GraphBuilder
from preimage.models.model import Model
class WeightedDegreeModel(Model):
def __init__(self, alphabet, n, is_using_length=True):
self._graph_builder = GraphBuilder(alphabet, n)
self._is_normalized = True
Model.__init__(self, alphabet, n, is_using_length)
def fit(self, inference_parameters):
Model.fit(self, inference_parameters)
self._feature_space_ = WeightedDegreeFeatureSpace(self._alphabet, self._n, inference_parameters.Y_train,
self._is_normalized)
def predict(self, Y_weights, y_lengths):
if self._is_using_length:
self._verify_y_lengths_is_not_none_when_use_length(y_lengths)
Y_predictions = self._predict_with_length(Y_weights, y_lengths)
else:
Y_predictions = self._predict_without_length(Y_weights)
return Y_predictions
def _predict_with_length(self, Y_weights, y_lengths):
Y_predictions = []
for y_weights, y_length in zip(Y_weights, y_lengths):
n_gram_weights = self._feature_space_.compute_weights(y_weights, y_length)
y_predicted = self._graph_builder.find_max_string(n_gram_weights, y_length)
Y_predictions.append(y_predicted)
return Y_predictions
def _predict_without_length(self, Y_weights):
Y_predictions = []
for y_weights in Y_weights:
n_gram_weights = self._feature_space_.compute_weights(y_weights, self._max_length_)
y_predicted = self._graph_builder.find_max_string_in_length_range(n_gram_weights, self._min_length_,
self._max_length_, self._is_normalized)
Y_predictions.append(y_predicted)
return Y_predictions | {
"repo_name": "a-ro/preimage",
"path": "preimage/models/weighted_degree_model.py",
"copies": "1",
"size": "1987",
"license": "bsd-2-clause",
"hash": 3192388092389131300,
"line_mean": 46.3333333333,
"line_max": 117,
"alpha_frac": 0.6240563664,
"autogenerated": false,
"ratio": 3.880859375,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9992218504994387,
"avg_score": 0.0025394472811226046,
"num_lines": 42
} |
__author__ = 'amelie'
from sklearn.base import BaseEstimator
from preimage.inference.graph_builder import GraphBuilder
from preimage.inference.branch_and_bound import branch_and_bound_multiple_solutions
from preimage.inference.bound_factory import get_gs_similarity_node_creator
from preimage.features.gs_similarity_feature_space import GenericStringSimilarityFeatureSpace
class StringMaximizationModel(BaseEstimator):
def __init__(self, alphabet, n, gs_kernel, max_time):
self._n = int(n)
self._alphabet = alphabet
self._graph_builder = GraphBuilder(self._alphabet, self._n)
self._gs_kernel = gs_kernel
self._max_time = max_time
self._is_normalized = True
self._node_creator_ = None
self._y_length_ = None
def fit(self, X, learned_weights, y_length):
feature_space = GenericStringSimilarityFeatureSpace(self._alphabet, self._n, X, self._is_normalized,
self._gs_kernel)
gs_weights = feature_space.compute_weights(learned_weights, y_length)
graph = self._graph_builder.build_graph(gs_weights, y_length)
self._node_creator_ = get_gs_similarity_node_creator(self._alphabet, self._n, graph, gs_weights, y_length,
self._gs_kernel)
self._y_length_ = y_length
def predict(self, n_predictions):
strings, bounds = branch_and_bound_multiple_solutions(self._node_creator_, self._y_length_, n_predictions,
self._alphabet, self._max_time)
return strings, bounds | {
"repo_name": "a-ro/preimage",
"path": "preimage/models/string_max_model.py",
"copies": "1",
"size": "1669",
"license": "bsd-2-clause",
"hash": -5382630819978568000,
"line_mean": 48.1176470588,
"line_max": 114,
"alpha_frac": 0.6279209107,
"autogenerated": false,
"ratio": 4.070731707317073,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0027376560958002256,
"num_lines": 34
} |
__author__ = 'amelie'
from sklearn.kernel_ridge import KernelRidge
from preimage.datasets.loader import load_bpps_dataset, AminoAcidFile
from preimage.kernels.generic_string import GenericStringKernel
from preimage.models.string_max_model import StringMaximizationModel
if __name__ == '__main__':
# Best parameters found by cross-validation
alpha = 1. / 6.4
n = 3
sigma_position = 0.4
sigma_amino_acid = 0.8
# Choose the number of predicted peptides and their length
n_predictions = 1000
y_length = 5
# Max time (seconds) for the branch and bound search
max_time = 500
print('String maximization model on BPPs dataset')
gs_kernel = GenericStringKernel(AminoAcidFile.blosum62_natural, sigma_position, sigma_amino_acid, n,
is_normalized=True)
alphabet = gs_kernel.alphabet
dataset = load_bpps_dataset()
# Use a regression algorithm to learn the weights first
print('Learning the regression weights ...')
learner = KernelRidge(alpha, kernel='precomputed')
gram_matrix = gs_kernel(dataset.X, dataset.X)
learner.fit(gram_matrix, dataset.y)
learned_weights = learner.dual_coef_
# We can then use the string maximization model with the learned weights
print('Branch and bound search for the top {} peptides of length {} ...'.format(n_predictions, y_length))
model = StringMaximizationModel(alphabet, n, gs_kernel, max_time)
model.fit(dataset.X, learned_weights, y_length)
peptides, bioactivities = model.predict(n_predictions)
print('\n')
print('Peptides | Predicted bioactivities')
for peptide, bioactivity in zip(peptides, bioactivities):
print(peptide, bioactivity) | {
"repo_name": "a-ro/preimage",
"path": "preimage/examples/peptide_bpps.py",
"copies": "1",
"size": "1727",
"license": "bsd-2-clause",
"hash": -5205383543192448000,
"line_mean": 36.5652173913,
"line_max": 109,
"alpha_frac": 0.7017950203,
"autogenerated": false,
"ratio": 3.6511627906976742,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4852957810997674,
"avg_score": null,
"num_lines": null
} |
__author__ = 'amelie'
from sklearn.kernel_ridge import KernelRidge
from preimage.datasets.loader import load_camps_dataset, AminoAcidFile
from preimage.kernels.generic_string import GenericStringKernel
from preimage.models.string_max_model import StringMaximizationModel
if __name__ == '__main__':
# Best parameters found by cross-validation
alpha = 1. / 1250
n = 3
sigma_position = 0.8
sigma_amino_acid = 12.8
# Choose the number of predicted peptides and their length
n_predictions = 20
y_length = 15
# Max time (seconds) for the branch and bound search
max_time = 500
print('String maximization model on CAMPs dataset')
gs_kernel = GenericStringKernel(AminoAcidFile.blosum62_natural, sigma_position, sigma_amino_acid, n,
is_normalized=True)
alphabet = gs_kernel.alphabet
dataset = load_camps_dataset()
# Use a regression algorithm to learn the weights first
print('Learning the regression weights ...')
learner = KernelRidge(alpha, kernel='precomputed')
gram_matrix = gs_kernel(dataset.X, dataset.X)
learner.fit(gram_matrix, dataset.y)
learned_weights = learner.dual_coef_
# We can then use the string maximization model with the learned weights
print('Branch and bound search for the top {} peptides of length {} ...'.format(n_predictions, y_length))
model = StringMaximizationModel(alphabet, n, gs_kernel, max_time)
model.fit(dataset.X, learned_weights, y_length)
peptides, bioactivities = model.predict(n_predictions)
print('\n')
print('Peptides | Predicted bioactivities')
for peptide, bioactivity in zip(peptides, bioactivities):
print(peptide, bioactivity) | {
"repo_name": "a-ro/preimage",
"path": "preimage/examples/peptide_camps.py",
"copies": "1",
"size": "1731",
"license": "bsd-2-clause",
"hash": 5851986476180057000,
"line_mean": 36.652173913,
"line_max": 109,
"alpha_frac": 0.703061814,
"autogenerated": false,
"ratio": 3.6829787234042555,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48860405374042554,
"avg_score": null,
"num_lines": null
} |
__author__ = 'amelie'
import abc
from sklearn.base import BaseEstimator
import numpy
from preimage.exceptions.n_gram import NoYLengthsError
class Model(BaseEstimator):
__metaclass__ = abc.ABCMeta
def __init__(self, alphabet, n, is_using_length=True):
self._n = n
self._alphabet = alphabet
self._is_using_length = is_using_length
self._feature_space_ = None
self._min_length_ = None
self._max_length_ = None
def fit(self, inference_parameters):
self._find_min_max_length(inference_parameters.y_lengths, inference_parameters.Y_train)
def _find_min_max_length(self, y_lengths, Y):
if y_lengths is None:
y_lengths = numpy.array([len(y) for y in Y])
self._min_length_ = numpy.min(y_lengths)
self._max_length_ = numpy.max(y_lengths)
@abc.abstractmethod
def predict(self, Y_weights, y_lengths):
return
def _verify_y_lengths_is_not_none_when_use_length(self, y_lengths):
if self._is_using_length and y_lengths is None:
raise NoYLengthsError() | {
"repo_name": "a-ro/preimage",
"path": "preimage/models/model.py",
"copies": "1",
"size": "1092",
"license": "bsd-2-clause",
"hash": 782442731200210600,
"line_mean": 28.5405405405,
"line_max": 95,
"alpha_frac": 0.6401098901,
"autogenerated": false,
"ratio": 3.466666666666667,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4606776556766667,
"avg_score": null,
"num_lines": null
} |
__author__ = 'amelie'
import numpy
from preimage.features.gs_similarity_weights import compute_gs_similarity_weights
from preimage.utils.alphabet import transform_strings_to_integer_lists, get_n_grams
# Shouldn't label this as "feature-space" since we don't use a sparse matrix representation here.
class GenericStringSimilarityFeatureSpace:
"""Output space for the Generic String kernel with position and n-gram similarity.
Doesn't use a sparse matrix representation because it takes in account the similarity between the n-grams.
This is used to compute the weights of the graph during the inference phase.
Attributes
----------
n : int
N-gram length.
is_normalized : bool
True if the feature space should be normalized, False otherwise.
max_train_length : int
Length of the longest string in the training dataset.
gs_kernel : GenericStringKernel
Generic string kernel.
"""
def __init__(self, alphabet, n, Y, is_normalized, gs_kernel):
self.n = int(n)
self.is_normalized = is_normalized
self._y_lengths = numpy.array([len(y) for y in Y])
self.max_train_length = numpy.max(self._y_lengths)
self.gs_kernel = gs_kernel
self._Y_int = transform_strings_to_integer_lists(Y, alphabet)
self._n_grams_int = transform_strings_to_integer_lists(get_n_grams(alphabet, n), alphabet)
self._n_gram_similarity_matrix = gs_kernel.get_alphabet_similarity_matrix()
if is_normalized:
self._normalization = numpy.sqrt(gs_kernel.element_wise_kernel(Y))
def compute_weights(self, y_weights, y_length):
"""Compute the inference graph weights
Parameters
----------
y_weights : array, [n_samples]
Weight of each training example.
y_length : int
Length of the string to predict.
Returns
-------
gs_weights : [len(alphabet)**n, y_n_gram_count * len(alphabet)**n]
Weight of each n-gram at each position, where y_n_gram_count is the number of n-gram in y_length.
"""
normalized_weights = numpy.copy(y_weights)
max_length = max(y_length, self.max_train_length)
if self.is_normalized:
normalized_weights *= 1. / self._normalization
n_partitions = y_length - self.n + 1
position_matrix = self.gs_kernel.get_position_matrix(max_length)
gs_weights = compute_gs_similarity_weights(n_partitions, self._n_grams_int, self._Y_int, normalized_weights,
self._y_lengths, position_matrix, self._n_gram_similarity_matrix)
return numpy.array(gs_weights) | {
"repo_name": "a-ro/preimage",
"path": "preimage/features/gs_similarity_feature_space.py",
"copies": "1",
"size": "2709",
"license": "bsd-2-clause",
"hash": 8423391177972769000,
"line_mean": 42.7096774194,
"line_max": 116,
"alpha_frac": 0.6474713917,
"autogenerated": false,
"ratio": 3.9779735682819384,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002284828936257013,
"num_lines": 62
} |
__author__ = 'amelie'
import numpy
from scipy import linalg
from sklearn.base import BaseEstimator
class StructuredKernelRidgeRegression(BaseEstimator):
"""Structured Kernel Ridge Regression.
Attributes
----------
alpha : float
Regularization term.
kernel : Callable
Kernel function that computes the similarity between the samples.
inference_model : Model
Inference model used to solve the pre-image problem.
weights_ : array, shape=[n_samples, n_samples]
Learned weights, where n_samples is the number of training samples.
X_train_ : array, shape=[n_samples, n_features]
Training samples.
"""
def __init__(self, alpha, kernel, inference_model):
self.alpha = alpha
self.kernel = kernel
self.inference_model = inference_model
self.weights_ = None
self.X_train_ = None
def fit(self, X, Y, y_lengths=None):
"""Learn the weights.
Parameters
----------
X : array, shape=[n_samples, n_features]
Training vectors, where n_samples is the number of samples and and n_features is the number of features
in X.
Y : array, shape=[n_samples, ]
Target strings, where n_samples is the number of training samples.
y_lengths : array, shape=[n_samples]
Length of the training strings.
Returns
-------
gram_matrix : array, shape = [n_samples_x1, n_samples_x2]
Similarity of each string of X1 with each string of X2, n_samples_x1 is the number of samples in X1 and
n_samples_x2 is the number of samples in X2.
"""
gram_matrix = self.kernel(X, X)
self.weights_ = self._solve(gram_matrix)
self.X_train_ = X
inference_parameters = InferenceFitParameters(self.weights_, gram_matrix, Y, y_lengths)
self.inference_model.fit(inference_parameters)
return self
def _solve(self, gram_matrix):
diagonal = numpy.copy(gram_matrix.diagonal())
numpy.fill_diagonal(gram_matrix, diagonal + self.alpha)
weights = linalg.inv(gram_matrix)
numpy.fill_diagonal(gram_matrix, diagonal)
return weights
def predict(self, X, y_lengths=None):
"""Predict the target strings.
Parameters
----------
X : array, shape=[n_samples, n_features]
Testing vectors, where n_samples is the number of samples and and n_features is the number of features
in X.
y_lengths : array, shape=[n_samples]
Length of the strings to predict, where n_samples is the number of testing samples.
Returns
-------
Y_predicted : array, shape = [n_samples]
Predicted strings, where n_samples is the number of testing samples.
"""
if self.weights_ is None:
raise ValueError("The fit function must be called before predict")
gram_matrix = self.kernel(self.X_train_, X)
Y_weights = numpy.dot(self.weights_, gram_matrix).T
Y_predicted = self.inference_model.predict(Y_weights, y_lengths)
return Y_predicted
class InferenceFitParameters:
"""Parameters for the inference model.
That way inference_model.fit(parameters) doesn't have unused parameters but only access the one it needs
.
Attributes
----------
weights : array, shape = [n_samples, n_samples]
Learned weights, where n_samples is the number of training samples.
gram_matrix : array, shape = [n_samples, n_samples]
Gram_matrix of the training samples.
Y_train : array, shape = [n_samples, ]
Training strings.
y_lengths : array, shape = [n_samples]
Length of each training string in Y_train.
"""
def __init__(self, weights, gram_matrix, Y, y_lengths):
self.weights = weights
self.gram_matrix = gram_matrix
self.Y_train = Y
self.y_lengths = y_lengths | {
"repo_name": "a-ro/preimage",
"path": "preimage/learners/structured_krr.py",
"copies": "1",
"size": "3990",
"license": "bsd-2-clause",
"hash": 7025225755281436000,
"line_mean": 35.9537037037,
"line_max": 115,
"alpha_frac": 0.622556391,
"autogenerated": false,
"ratio": 4.109165808444902,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5231722199444903,
"avg_score": null,
"num_lines": null
} |
__author__ = 'amelie'
import numpy
from sklearn.base import BaseEstimator
class PolynomialKernel(BaseEstimator):
"""Polynomial kernel.
Attributes
----------
degree : int
Degree.
bias : float
Bias.
is_normalized : bool
True if the kernel should be normalized, False otherwise.
"""
def __init__(self, degree=2, bias=1., is_normalized=True):
self.degree = degree
self.bias = bias
self.is_normalized = is_normalized
def __call__(self, X_one, X_two):
"""Compute the similarity of all the vectors in X1 with all the vectors in X2.
Parameters
----------
X1 : array, shape=[n_samples, n_features]
Vectors, where n_samples is the number of samples in X1 and n_features is the number of features.
X2 : array, shape=[n_samples, n_features]
Vectors, where n_samples is the number of samples in X2 and n_features is the number of features.
Returns
-------
gram_matrix : array, shape = [n_samples_x1, n_samples_x2]
Similarity of each vector of X1 with each vector of X2, where n_samples_x1 is the number of samples in X1
and n_samples_x2 is the number of samples in X2.
"""
X_one = numpy.array(X_one)
X_two = numpy.array(X_two)
gram_matrix = (numpy.dot(X_one, X_two.T) + self.bias) ** self.degree
if self.is_normalized:
gram_matrix = self._normalize_gram_matrix(X_one, X_two, gram_matrix)
return gram_matrix
def _normalize_gram_matrix(self, X_one, X_two, gram_matrix):
x_one_diagonal = self._compute_element_wise_similarity(X_one)
x_two_diagonal = self._compute_element_wise_similarity(X_two)
gram_matrix = ((gram_matrix / numpy.sqrt(x_one_diagonal)).T / numpy.sqrt(x_two_diagonal)).T
return gram_matrix
def _compute_element_wise_similarity(self, X):
x_x_similarity = ((X * X).sum(axis=1) + self.bias) ** self.degree
x_x_similarity = x_x_similarity.reshape(-1, 1)
return x_x_similarity | {
"repo_name": "a-ro/preimage",
"path": "preimage/kernels/polynomial.py",
"copies": "1",
"size": "2100",
"license": "bsd-2-clause",
"hash": -3924141016036158500,
"line_mean": 36.5178571429,
"line_max": 117,
"alpha_frac": 0.6114285714,
"autogenerated": false,
"ratio": 3.5413153456998314,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46527439170998314,
"avg_score": null,
"num_lines": null
} |
__author__ = 'amelie'
import numpy
from preimage.datasets.loader import load_amino_acids_and_descriptors
from preimage.kernels._generic_string import element_wise_generic_string_kernel, generic_string_kernel_with_sigma_c
from preimage.kernels._generic_string import element_wise_generic_string_kernel_with_sigma_c
from preimage.datasets.amino_acid_file import AminoAcidFile
from preimage.utils.position import compute_position_weights_matrix
from preimage.utils.alphabet import transform_strings_to_integer_lists
def element_wise_kernel(X, sigma_position, n, alphabet):
"""Compute the similarity of each string in X with itself in the Generic String kernel.
Takes only in account the position penalties and the n-gram of length n. No n-gram penalties (no sigma_c).
Parameters
----------
X : array, shape = [n_samples]
Strings, where n_samples is the number of examples in X.
sigma_position : float
Controls the penalty incurred when two n-grams are not sharing the same position.
n : int
N-gram length.
alphabet : list
List of letters.
Returns
-------
kernel : array, shape = [n_samples]
Similarity of each string with itself in the GS kernel, where n_samples is the number of examples in X.
"""
X = numpy.array(X)
x_lengths = numpy.array([len(x) for x in X], dtype=numpy.int64)
max_length = numpy.max(x_lengths) - n + 1
position_matrix = compute_position_weights_matrix(max_length, sigma_position)
X_int = transform_strings_to_integer_lists(X, alphabet)
kernel = element_wise_generic_string_kernel(X_int, x_lengths, position_matrix, n)
return kernel
class GenericStringKernel:
"""Generic String Kernel.
Computes the similarity between two strings by comparing each of their l-gram of length 1 to n. Each l-gram
comparison yields a score that depends on the similarity of their respective amino acids (letters) and a shifting
contribution term that decays exponentially rapidly with the distance between the starting positions of the two
substrings. The sigma_position parameter controls the shifting contribution term. The sigma_amino_acid parameter
controls the amount of penalty incurred when the encoding vectors differ as measured by the squared Euclidean
distance between these two vectors. The GS kernel outputs the sum of all the l-gram-comparison scores.
Attributes
----------
amino_acid_file_name : string
Name of the file containing the amino acid substitution matrix.
sigma_position : float
Controls the penalty incurred when two n-grams are not sharing the same position.
sigma_amino_acid : float
Controls the penalty incurred when the encoding vectors of two amino acids differ.
n : int
N-gram length.
is_normalized : bool
True if the kernel should be normalized, False otherwise.
Notes
-----
See http://graal.ift.ulaval.ca/bioinformatics/gs-kernel/ for the original code developed by Sebastien Giguere [1]_.
References
----------
.. [1] Sebastien Giguere, Mario Marchand, Francois Laviolette, Alexandre Drouin, and Jacques Corbeil. "Learning a
peptide-protein binding affinity predictor with kernel ridge regression." BMC bioinformatics 14, no. 1 (2013):
82.
"""
def __init__(self, amino_acid_file_name=AminoAcidFile.blosum62_natural, sigma_position=1.0, sigma_amino_acid=1.0,
n=2, is_normalized=True):
self.amino_acid_file_name = amino_acid_file_name
self.sigma_position = sigma_position
self.sigma_amino_acid = sigma_amino_acid
self.n = n
self.is_normalized = is_normalized
self.alphabet, self.descriptors = self._load_amino_acids_and_normalized_descriptors()
def __call__(self, X1, X2):
"""Compute the similarity of all the strings of X1 with all the strings of X2 in the Generic String Kernel.
Parameters
----------
X1 : array, shape=[n_samples, ]
Strings, where n_samples is the number of samples in X1.
X2 : array, shape=[n_samples, ]
Strings, where n_samples is the number of samples in X2.
Returns
-------
gram_matrix : array, shape = [n_samples_x1, n_samples_x2]
Similarity of each string of X1 with each string of X2, n_samples_x1 is the number of samples in X1 and
n_samples_x2 is the number of samples in X2.
"""
X1 = numpy.array(X1)
X2 = numpy.array(X2)
amino_acid_similarity_matrix = self.get_alphabet_similarity_matrix()
is_symmetric = bool(X1.shape == X2.shape and numpy.all(X1 == X2))
max_length, x1_lengths, x2_lengths = self._get_lengths(X1, X2)
position_matrix = self.get_position_matrix(max_length)
X1_int = transform_strings_to_integer_lists(X1, self.alphabet)
X2_int = transform_strings_to_integer_lists(X2, self.alphabet)
gram_matrix = generic_string_kernel_with_sigma_c(X1_int, x1_lengths, X2_int, x2_lengths, position_matrix,
amino_acid_similarity_matrix, self.n, is_symmetric)
gram_matrix = self._normalize(gram_matrix, X1_int, x1_lengths, X2_int, x2_lengths, position_matrix,
amino_acid_similarity_matrix, is_symmetric)
return gram_matrix
def get_position_matrix(self, max_length):
"""Compute the position similarity weights
Parameters
----------
max_length : int
Maximum position.
Returns
-------
position_matrix : array, shape = [max_length, max_length]
Similarity of each position with all the other positions.
"""
position_matrix = compute_position_weights_matrix(max_length, self.sigma_position)
return position_matrix
def get_alphabet_similarity_matrix(self):
"""Compute the alphabet similarity weights
Returns
-------
similarity_matrix : array, shape = [len(alphabet), len(alphabet)]
Similarity of each amino acid (letter) with all the other amino acids.
"""
distance_matrix = numpy.zeros((len(self.alphabet), len(self.alphabet)))
numpy.fill_diagonal(distance_matrix, 0)
for index_one, descriptor_one in enumerate(self.descriptors):
for index_two, descriptor_two in enumerate(self.descriptors):
distance = descriptor_one - descriptor_two
squared_distance = numpy.dot(distance, distance)
distance_matrix[index_one, index_two] = squared_distance
distance_matrix /= 2. * (self.sigma_amino_acid ** 2)
return numpy.exp(-distance_matrix)
def _load_amino_acids_and_normalized_descriptors(self):
amino_acids, descriptors = load_amino_acids_and_descriptors(self.amino_acid_file_name)
normalization = numpy.array([numpy.dot(descriptor, descriptor) for descriptor in descriptors],
dtype=numpy.float)
normalization = normalization.reshape(-1, 1)
descriptors /= numpy.sqrt(normalization)
return amino_acids, descriptors
def _get_lengths(self, X1, X2):
x1_lengths = numpy.array([len(x) for x in X1], dtype=numpy.int64)
x2_lengths = numpy.array([len(x) for x in X2], dtype=numpy.int64)
max_length = max(numpy.max(x1_lengths), numpy.max(x2_lengths))
return max_length, x1_lengths, x2_lengths
def _normalize(self, gram_matrix, X1, x1_lengths, X2, x2_lengths, position_matrix, similarity_matrix, is_symmetric):
if self.is_normalized:
if is_symmetric:
x1_norm = gram_matrix.diagonal()
x2_norm = x1_norm
else:
x1_norm = element_wise_generic_string_kernel_with_sigma_c(X1, x1_lengths, position_matrix,
similarity_matrix, self.n)
x2_norm = element_wise_generic_string_kernel_with_sigma_c(X2, x2_lengths, position_matrix,
similarity_matrix, self.n)
gram_matrix = ((gram_matrix / numpy.sqrt(x2_norm)).T / numpy.sqrt(x1_norm)).T
return gram_matrix
def element_wise_kernel(self, X):
"""Compute the similarity of each string of X with itself in the Generic String kernel.
Parameters
----------
X : array, shape = [n_samples]
Strings, where n_samples is the number of examples in X.
Returns
-------
kernel : array, shape = [n_samples]
Similarity of each string with itself in the GS kernel, where n_samples is the number of examples in X.
"""
X = numpy.array(X)
X_int = transform_strings_to_integer_lists(X, self.alphabet)
x_lengths = numpy.array([len(x) for x in X], dtype=numpy.int64)
max_length = numpy.max(x_lengths)
similarity_matrix = self.get_alphabet_similarity_matrix()
position_matrix = self.get_position_matrix(max_length)
kernel = element_wise_generic_string_kernel_with_sigma_c(X_int, x_lengths, position_matrix, similarity_matrix,
self.n)
return kernel | {
"repo_name": "a-ro/preimage",
"path": "preimage/kernels/generic_string.py",
"copies": "1",
"size": "9418",
"license": "bsd-2-clause",
"hash": 2481450509282818000,
"line_mean": 46.8121827411,
"line_max": 120,
"alpha_frac": 0.6403695052,
"autogenerated": false,
"ratio": 3.842513259893921,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9971838554849384,
"avg_score": 0.0022088420489076014,
"num_lines": 197
} |
__author__ = 'amelie'
import numpy
from preimage.features.string_feature_space import build_feature_space_without_positions
class NGramFeatureSpace:
"""Output feature space for the N-Gram Kernel
Creates a sparse matrix representation of the n-grams in each training string. This is used to compute the weights
of the graph during the inference phase.
Attributes
----------
feature_space : sparse matrix, shape = [n_samples, len(alphabet)**n]
Sparse matrix representation of the n-grams in each training string, where n_samples is the number of training
samples.
"""
def __init__(self, alphabet, n, Y, is_normalized):
"""Create the output feature space for the N-Gram Kernel
Parameters
----------
alphabet : list
list of letters
n : int
N-gram length.
Y : array, [n_samples, ]
The training strings.
is_normalized : bool
True if the feature space should be normalized, False otherwise.
"""
self.feature_space = build_feature_space_without_positions(alphabet, n, Y)
self._normalize(is_normalized, self.feature_space)
def _normalize(self, is_normalized, feature_space):
if is_normalized:
y_normalization = self._get_y_normalization(feature_space)
data_normalization = y_normalization.repeat(numpy.diff(feature_space.indptr))
feature_space.data *= data_normalization
def _get_y_normalization(self, feature_space):
y_normalization = (feature_space.multiply(feature_space)).sum(axis=1)
y_normalization = 1. / numpy.sqrt(numpy.array((y_normalization.reshape(1, -1))[0]))
return y_normalization
def compute_weights(self, y_weights):
"""Compute the inference graph weights
Parameters
----------
y_weights : array, [n_samples]
Weight of each training example.
Returns
-------
n_gram_weights : [len(alphabet)**n]
Weight of each n-gram.
"""
data_copy = numpy.copy(self.feature_space.data)
self.feature_space.data *= self._repeat_each_y_weight_by_y_column_count(y_weights)
n_gram_weights = numpy.array(self.feature_space.sum(axis=0))[0]
self.feature_space.data = data_copy
return n_gram_weights
def _repeat_each_y_weight_by_y_column_count(self, y_weights):
return y_weights.repeat(numpy.diff(self.feature_space.indptr)) | {
"repo_name": "a-ro/preimage",
"path": "preimage/features/n_gram_feature_space.py",
"copies": "1",
"size": "2516",
"license": "bsd-2-clause",
"hash": -6160463989368870000,
"line_mean": 35.4782608696,
"line_max": 118,
"alpha_frac": 0.6323529412,
"autogenerated": false,
"ratio": 4.064620355411955,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5196973296611954,
"avg_score": null,
"num_lines": null
} |
__author__ = 'amelie'
import numpy
from preimage.features.string_feature_space import build_feature_space_with_positions
from preimage.utils.position import compute_position_weights
from preimage.kernels.generic_string import element_wise_kernel
class GenericStringFeatureSpace:
"""Output feature space for the Generic String kernel with position weights
Creates a sparse matrix representation of the n-grams in each training string. The representation takes in account
the positions of the n-grams in the strings, This is used to compute the weights of the graph during the inference
phase. This doesn't take in account the similarity between the n-grams (no sigma_c).
Attributes
----------
n : int
N-gram length.
sigma_position : float
Parameter of the Generic String Kernel controlling the penalty incurred when two n-grams are not sharing the
same position.
max_n_gram_count : int
The number of n-grams in the training string of highest length.
feature_space : sparse matrix, shape = [n_samples, max_n_gram_count * len(alphabet)**n]
Sparse matrix representation of the n-grams in each training string, where n_samples is the number of training
samples.
"""
def __init__(self, alphabet, n, Y, sigma_position, is_normalized):
"""Create the output feature space for the Generic String kernel
Parameters
----------
alphabet : list
list of letters
n : int
n-gram length
Y : array, [n_samples, ]
The training strings.
sigma_position : float
Parameter of the Generic String Kernel controlling the penalty incurred when two n-grams are not sharing the
same position.
is_normalized : bool
True if the feature space should be normalized, False otherwise.
"""
self.n = int(n)
self.sigma_position = sigma_position
self._alphabet_n_gram_count = len(alphabet) ** n
self.feature_space = build_feature_space_with_positions(alphabet, self.n, Y)
self.max_n_gram_count = self._get_max_n_gram_count(self._alphabet_n_gram_count, self.feature_space)
self._normalize(self.feature_space, self.n, Y, sigma_position, is_normalized, alphabet)
def _get_max_n_gram_count(self, alphabet_n_gram_count, feature_space):
n_columns = feature_space.shape[1]
max_n_gram_count = int(n_columns / alphabet_n_gram_count)
return max_n_gram_count
def _normalize(self, feature_space, n, Y, sigma_position, is_normalized, alphabet):
if is_normalized:
y_y_similarity = element_wise_kernel(Y, sigma_position, n, alphabet)
y_normalization = 1. / numpy.sqrt(y_y_similarity)
data_normalization = y_normalization.repeat(numpy.diff(feature_space.indptr))
feature_space.data *= data_normalization
def _get_n_gram_count_in_each_y(self, n, Y):
y_n_gram_counts = numpy.array([len(y) - n + 1 for y in Y])
return y_n_gram_counts
def compute_weights(self, y_weights, y_length):
"""Compute the inference graph weights
Parameters
----------
y_weights : array, [n_samples]
Weight of each training example.
y_length : int
Length of the string to predict.
Returns
-------
gs_weights : [len(alphabet)**n, y_n_gram_count * len(alphabet)**n]
Weight of each n-gram at each position.
"""
y_n_gram_count = y_length - self.n + 1
data_copy = numpy.copy(self.feature_space.data)
self.feature_space.data *= self._repeat_each_y_weight_by_y_column_count(y_weights)
weighted_degree_weights = numpy.array(self.feature_space.sum(axis=0))[0].reshape(self.max_n_gram_count, -1)
self.feature_space.data = data_copy
gs_weights = self._transform_in_gs_weights(y_n_gram_count, weighted_degree_weights)
return gs_weights
def _transform_in_gs_weights(self, y_n_gram_count, weighted_degree_weights):
gs_weights = numpy.empty((y_n_gram_count, self._alphabet_n_gram_count))
for i in range(y_n_gram_count):
position_weights = compute_position_weights(i, self.max_n_gram_count, self.sigma_position).reshape(-1, 1)
gs_weights[i, :] = (weighted_degree_weights * position_weights).sum(axis=0)
return gs_weights
def _repeat_each_y_weight_by_y_column_count(self, y_weights):
return y_weights.repeat(numpy.diff(self.feature_space.indptr)) | {
"repo_name": "a-ro/preimage",
"path": "preimage/features/gs_feature_space.py",
"copies": "1",
"size": "4589",
"license": "bsd-2-clause",
"hash": -3927799469521612000,
"line_mean": 44,
"line_max": 120,
"alpha_frac": 0.6513401613,
"autogenerated": false,
"ratio": 3.817803660565724,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4969143821865724,
"avg_score": null,
"num_lines": null
} |
__author__ = 'amelie'
import numpy
from preimage.features.string_feature_space import build_feature_space_with_positions
class WeightedDegreeFeatureSpace:
"""Output feature space for the Weighted Degree kernel
Creates a sparse matrix representation of the n-grams in each training string. The representation takes in account
the positions of the n-grams in the strings, This is used to compute the weights of the graph during the inference
phase.
Attributes
----------
n : int
N-gram length.
max_n_gram_count : int
Number of n-grams in the training string of highest length.
feature_space : sparse matrix, shape = [n_samples, max_n_gram_count * len(alphabet)**n]
Sparse matrix representation of the n-grams in each training string, where n_samples is the number of training
samples.
"""
def __init__(self, alphabet, n, Y, is_normalized):
"""Create the output feature space for the Weighted Degree kernel
Parameters
----------
alphabet : list
list of letters
n : int
n-gram length
Y : array, [n_samples, ]
The training strings.
is_normalized : bool
True if the feature space should be normalized, False otherwise.
"""
self.n = int(n)
self._alphabet_n_gram_count = len(alphabet) ** n
self.feature_space = build_feature_space_with_positions(alphabet, self.n, Y)
self._normalize(is_normalized, self.feature_space)
self.max_n_gram_count = self._get_max_n_gram_count(self._alphabet_n_gram_count, self.feature_space)
def _get_max_n_gram_count(self, alphabet_n_gram_count, feature_space):
n_columns = feature_space.shape[1]
max_n_gram_count = int(n_columns / alphabet_n_gram_count)
return max_n_gram_count
def _normalize(self, is_normalized, feature_space):
if is_normalized:
y_normalization = 1. / numpy.sqrt(numpy.array(feature_space.sum(axis=1).reshape(1, -1))[0])
data_normalization = y_normalization.repeat(numpy.diff(feature_space.indptr))
feature_space.data *= data_normalization
def compute_weights(self, y_weights, y_length):
"""Compute the inference graph weights
Parameters
----------
y_weights : array, [n_samples]
Weight of each training example.
y_length : int
Length of the string to predict.
Returns
-------
weighted_degree_weights : [len(alphabet)**n, y_n_gram_count * len(alphabet)**n]
Weight of each n-gram at each position.
"""
y_n_gram_count = y_length - self.n + 1
data_copy = numpy.copy(self.feature_space.data)
self.feature_space.data *= self._repeat_each_y_weight_by_y_column_count(y_weights)
weights_vector = numpy.array(self.feature_space.sum(axis=0))[0]
self.feature_space.data = data_copy
weighted_degree_weights = self._get_weight_for_each_graph_partition(y_n_gram_count, weights_vector)
return weighted_degree_weights
def _get_weight_for_each_graph_partition(self, y_n_gram_count, weights_vector):
weights_matrix = weights_vector.reshape(self.max_n_gram_count, -1)
if y_n_gram_count <= self.max_n_gram_count:
weights_matrix = weights_matrix[0:y_n_gram_count, :]
else:
zero_weight_partitions = numpy.zeros((y_n_gram_count - self.max_n_gram_count, self._alphabet_n_gram_count))
weights_matrix = numpy.concatenate((weights_matrix, zero_weight_partitions), axis=0)
return weights_matrix
def _repeat_each_y_weight_by_y_column_count(self, y_weights):
return y_weights.repeat(numpy.diff(self.feature_space.indptr)) | {
"repo_name": "a-ro/preimage",
"path": "preimage/features/weighted_degree_feature_space.py",
"copies": "1",
"size": "3801",
"license": "bsd-2-clause",
"hash": 4884380261159694000,
"line_mean": 41.7191011236,
"line_max": 119,
"alpha_frac": 0.6400947119,
"autogenerated": false,
"ratio": 3.7522211253701876,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48923158372701875,
"avg_score": null,
"num_lines": null
} |
__author__ = 'amelie'
import numpy
from preimage.inference.euler import EulerianPath
from preimage.features.n_gram_feature_space import NGramFeatureSpace
from preimage.models.model import Model
from preimage.inference.graph_builder import GraphBuilder
from preimage.utils.alphabet import get_n_gram_to_index, get_n_grams
class EulerianPathModel(Model):
def __init__(self, alphabet, n, is_using_length=True, seed=42):
Model.__init__(self, alphabet, n, is_using_length)
self._graph_builder = GraphBuilder(alphabet, n)
self._is_normalized = False
self._is_merging_path = True
self._seed = seed
self._n_grams = list(get_n_grams(alphabet, n))
self._n_gram_to_index = get_n_gram_to_index(alphabet, n)
self._thresholds_ = None
def fit(self, inference_parameters):
Model.fit(self, inference_parameters)
self._feature_space_ = NGramFeatureSpace(self._alphabet, self._n, inference_parameters.Y_train,
self._is_normalized)
if not self._is_using_length:
Y_weights = numpy.dot(inference_parameters.weights, inference_parameters.gram_matrix).T
self._find_thresholds(Y_weights)
def _find_thresholds(self, Y_weights):
n_examples = Y_weights.shape[0]
Y_n_gram_weights = self._get_n_gram_weights(Y_weights, n_examples)
n_gram_counts = self._feature_space_.compute_weights(numpy.ones(n_examples))
n_gram_counts = numpy.array(n_gram_counts, dtype=numpy.int)
self._thresholds_ = self._find_weights_where_sum_weights_above_is_n_gram_count(n_gram_counts, Y_n_gram_weights)
def _get_n_gram_weights(self, Y_weights, n_training_examples):
Y_n_gram_weights = numpy.empty((n_training_examples, len(self._alphabet) ** self._n))
for y_index, y_weight in enumerate(Y_weights):
Y_n_gram_weights[y_index] = self._feature_space_.compute_weights(y_weight)
return Y_n_gram_weights
def _find_weights_where_sum_weights_above_is_n_gram_count(self, n_gram_counts, Y_n_gram_weights):
thresholds = numpy.zeros(len(self._alphabet) ** self._n)
for n_gram_index, n_gram_count in enumerate(n_gram_counts):
if n_gram_count > 0:
n_gram_weights = Y_n_gram_weights[:, n_gram_index]
threshold_index = numpy.argpartition(-n_gram_weights, n_gram_count)[n_gram_count]
thresholds[n_gram_index] = n_gram_weights[threshold_index]
return thresholds
def predict(self, Y_weights, y_lengths):
if self._is_using_length:
self._verify_y_lengths_is_not_none_when_use_length(y_lengths)
Y_predictions = self._predict_with_length(Y_weights, y_lengths)
else:
Y_predictions = self._predict_without_length(Y_weights)
return Y_predictions
def _predict_with_length(self, Y_weights, y_lengths):
Y_predictions = []
eulerian_path = EulerianPath(self._alphabet, self._n, self._min_length_, self._is_merging_path)
for y_weights, y_length in zip(Y_weights, y_lengths):
n_gram_weights = self._feature_space_.compute_weights(y_weights)
y_predicted = eulerian_path.find_eulerian_path(n_gram_weights, y_length=y_length)
Y_predictions.append(y_predicted)
return Y_predictions
def _predict_without_length(self, Y_weights):
Y_predictions = []
eulerian_path = EulerianPath(self._alphabet, self._n, self._min_length_, self._is_merging_path)
for y_weights in Y_weights:
n_gram_weights = self._feature_space_.compute_weights(y_weights)
y_predicted = eulerian_path.find_eulerian_path(n_gram_weights, thresholds=self._thresholds_)
Y_predictions.append(y_predicted)
return Y_predictions | {
"repo_name": "a-ro/preimage",
"path": "preimage/models/eulerian_path_model.py",
"copies": "1",
"size": "3854",
"license": "bsd-2-clause",
"hash": 4846979557600620000,
"line_mean": 48.4230769231,
"line_max": 119,
"alpha_frac": 0.6502335236,
"autogenerated": false,
"ratio": 3.4166666666666665,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45669001902666667,
"avg_score": null,
"num_lines": null
} |
__author__ = 'amelie'
import numpy
from preimage.utils.alphabet import get_index_to_n_gram
from preimage.exceptions.shape import InvalidShapeError
from preimage.exceptions.n_gram import InvalidYLengthError, InvalidMinLengthError
class GraphBuilder:
"""Graph builder for the pre-image of multiple string kernels.
Solves the pre-image problem of string kernels with constant norms (Hamming, Weighted Degree). For string kernel
where the norm is not constant, it builds a graph that can be used to compute the bounds of partial solutions in
a branch and bound search. The graph is constructed by dynamic programming.
Attributes
----------
alphabet : list
List of letters.
n : int
N-gram length.
"""
def __init__(self, alphabet, n):
self.alphabet = alphabet
self.n = int(n)
self._n_gram_count = len(self.alphabet) ** self.n
self._entering_edges = self._get_entering_edge_indexes(self._n_gram_count, alphabet, n)
self._index_to_n_gram = get_index_to_n_gram(alphabet, self.n)
self._n_gram_indexes = 0 if n == 1 else numpy.arange(0, self._n_gram_count)
def _get_entering_edge_indexes(self, n_gram_count, alphabet, n):
if n == 1:
entering_edges = numpy.array([numpy.arange(0, len(alphabet))])
else:
step_size = len(self.alphabet) ** (n - 1)
entering_edges = [numpy.tile(numpy.arange(i, n_gram_count, step_size), len(alphabet))
for i in range(step_size)]
entering_edges = numpy.array(entering_edges).reshape(n_gram_count, len(alphabet))
return entering_edges
def build_graph(self, graph_weights, y_length):
"""Build the graph for the bound computation of the branch and bound search.
Parameters
----------
graph_weights : array, shape=[len(alphabet)**n] or [n_partitions, len(alphabet)**n]
Weight of each n-gram at each position, where n_partitions is the number of n_gram in y_length. If all
positions have the same weight (n-gram kernel), the array has the shape=[len(alphabet)**n].
y_length : int
Length of the string to predict.
Returns
-------
graph: array, [n_partitions, len(alphabet)**n]
Array representation of the graph. graph[i, j] represents the maximum value of a string of length i + n
ending with the jth n-gram.
"""
n_partitions = y_length - self.n + 1
self._verify_graph_weights_and_y_length(graph_weights, n_partitions)
graph = self._initialize_graph(n_partitions, graph_weights)
self._build_graph(n_partitions, graph, graph_weights)
return graph
def _build_graph(self, n_partitions, graph, graph_weights):
for i in range(1, n_partitions):
graph[i, :] = numpy.max(graph[i - 1, self._entering_edges], axis=1) + self._get_weights(i, graph_weights)
def find_max_string(self, graph_weights, y_length):
"""Construct the graph and find the string of maximum value.
Solves the pre-image of string kernels with constant norms (Hamming, Weighted Degree).
Parameters
----------
graph_weights : array, shape=[len(alphabet)**n] or [n_partitions, len(alphabet)**n]
Weight of each n-gram at each position, where n_partitions is the number of n_gram in y_length. If all
positions have the same weight (n-gram kernel), the array has the shape=[len(alphabet)**n].
y_length : int
Length of the string to predict.
Returns
-------
y: string
The predicted string.
"""
n_partitions = y_length - self.n + 1
self._verify_graph_weights_and_y_length(graph_weights, n_partitions)
graph = self._initialize_graph(2, graph_weights)
predecessors = numpy.empty((n_partitions - 1, self._n_gram_count), dtype=numpy.int)
self._build_graph_with_predecessors(n_partitions, graph, graph_weights, predecessors)
partition_index, n_gram_index = self._get_max_string_end_indexes(graph, n_partitions)
max_string = self._build_max_string(partition_index, n_gram_index, predecessors)
return max_string
def _build_graph_with_predecessors(self, n_partitions, graph, graph_weights, predecessors):
for i in range(1, n_partitions):
max_entering_edge_indexes = numpy.argmax(graph[0, self._entering_edges], axis=1)
predecessors[i - 1, :] = self._entering_edges[self._n_gram_indexes, max_entering_edge_indexes]
graph[1, :] = graph[0, predecessors[i - 1, :]] + self._get_weights(i, graph_weights)
graph[0, :] = graph[1, :]
def _get_max_string_end_indexes(self, graph, n_partitions):
n_gram_index = numpy.argmax(graph[0, :])
partition_index = n_partitions - 2
return partition_index, n_gram_index
def find_max_string_in_length_range(self, graph_weights, min_y_length, max_y_length, is_normalized):
"""Construct the graph and find the string of maximum value in a given length range.
Solves the pre-image of string kernels with constant norms (Hamming, Weighted Degree) when the length of the
string to predict is unknown.
Parameters
----------
graph_weights : array, shape=[len(alphabet)**n] or [n_partitions, len(alphabet)**n]
Weight of each n-gram at each position, where n_partitions is the number of n_gram in y_length. If all
positions have the same weight (n-gram kernel), the array has the shape=[len(alphabet)**n].
min_y_length : int
Minimum length of the string to predict.
max_y_length : int
Maximum length of the string to predict.
is_normalized : bool
True if it solves the pre-image of the normalized kernel, False otherwise.
(They have a different optimisation problem).
Returns
-------
y: string
The predicted string.
"""
min_partition_index, n_partitions = self._get_min_max_partition(graph_weights, max_y_length, min_y_length)
graph = self._initialize_graph(n_partitions, graph_weights)
predecessors = numpy.empty((n_partitions - 1, self._n_gram_count), dtype=numpy.int)
self._build_complete_graph_with_predecessors(n_partitions, graph, graph_weights, predecessors)
end_indexes = self._get_max_string_end_indexes_in_range(graph, min_partition_index, n_partitions, is_normalized)
max_string = self._build_max_string(end_indexes[0], end_indexes[1], predecessors)
return max_string
def _get_min_max_partition(self, graph_weights, max_y_length, min_y_length):
n_partitions = max_y_length - self.n + 1
min_y_length = max(self.n, min_y_length)
self._verify_graph_weights_and_y_length(graph_weights, n_partitions)
self._verify_min_max_length(min_y_length, max_y_length)
min_partition_index = min_y_length - self.n
return min_partition_index, n_partitions
def _initialize_graph(self, n_partitions, graph_weights):
graph = numpy.empty((n_partitions, self._n_gram_count))
graph[0, :] = self._get_weights(0, graph_weights)
return graph
def _build_complete_graph_with_predecessors(self, n_partitions, graph, graph_weights, predecessors):
for i in range(1, n_partitions):
max_entering_edge_indexes = numpy.argmax(graph[i - 1, self._entering_edges], axis=1)
predecessors[i - 1, :] = self._entering_edges[self._n_gram_indexes, max_entering_edge_indexes]
graph[i, :] = graph[i - 1, predecessors[i - 1, :]] + self._get_weights(i, graph_weights)
def _get_weights(self, i, graph_weights):
if graph_weights.ndim == 1:
partition_weights = graph_weights
else:
partition_weights = graph_weights[i, :]
return partition_weights
def _get_max_string_end_indexes_in_range(self, graph, min_partition, n_partitions, is_normalized):
norm = [n_gram_count for n_gram_count in range(min_partition + 1, n_partitions + 1)]
norm = numpy.array(norm).reshape(-1, 1)
if is_normalized:
graph[min_partition:, :] *= 1. / numpy.sqrt(norm)
end_indexes = numpy.unravel_index(numpy.argmax(graph[min_partition:, :]), graph[min_partition:, :].shape)
else:
graph[min_partition:, :] = norm - 2 * graph[min_partition:, :]
end_indexes = numpy.unravel_index(numpy.argmin(graph[min_partition:, :]), graph[min_partition:, :].shape)
partition_index = end_indexes[0] + min_partition - 1
return partition_index, end_indexes[1]
def _build_max_string(self, partition_index, n_gram_index, predecessors):
max_string = self._index_to_n_gram[n_gram_index]
best_index = n_gram_index
for i in range(partition_index, -1, -1):
best_index = predecessors[i, best_index]
max_string = self._index_to_n_gram[best_index][0] + max_string
return max_string
def _verify_graph_weights_and_y_length(self, graph_weights, n_partitions):
if n_partitions <= 0:
raise InvalidYLengthError(self.n, n_partitions + self.n - 1)
valid_shapes = [(self._n_gram_count,), (n_partitions, self._n_gram_count)]
if graph_weights.shape not in valid_shapes:
raise InvalidShapeError('graph_weights', graph_weights.shape, valid_shapes)
def _verify_min_max_length(self, min_length, max_length):
if min_length > max_length:
raise InvalidMinLengthError(min_length, max_length) | {
"repo_name": "a-ro/preimage",
"path": "preimage/inference/graph_builder.py",
"copies": "1",
"size": "9729",
"license": "bsd-2-clause",
"hash": 8672618099265572000,
"line_mean": 48.8974358974,
"line_max": 120,
"alpha_frac": 0.6354198787,
"autogenerated": false,
"ratio": 3.6397306397306397,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.477515051843064,
"avg_score": null,
"num_lines": null
} |
__author__ = 'amelie'
import unittest2
import numpy
import numpy.testing
from mock import patch, Mock
from preimage.models.generic_string_model import GenericStringModel
from preimage.learners.structured_krr import InferenceFitParameters
def branch_and_bound_side_effect(node_creator, y_length, alphabet, max_time):
solution_dict = {1: ('a', 1), 2: ('ba', 3)}
return solution_dict[y_length]
class TestGenericStringModel(unittest2.TestCase):
def setUp(self):
self.setup_feature_space()
self.setup_fit_parameters()
self.setup_graph_builder()
self.setup_branch_and_bound()
self.alphabet = ['a', 'b', 'c']
self.sigma_position = 0.5
self.model_with_length = GenericStringModel(self.alphabet, n=1, is_using_length=True,
sigma_position=self.sigma_position)
self.model_no_length = GenericStringModel(self.alphabet, n=1, is_using_length=False,
sigma_position=self.sigma_position)
self.Y_weights = numpy.array([[1], [0]])
self.y_lengths = [1, 2]
def setup_fit_parameters(self):
self.max_train_length = 2
self.min_train_length = 1
self.weights = numpy.array([[1, 2]])
self.gram_matrix = numpy.array([[1, 0], [0, 1]])
self.fit_parameters = InferenceFitParameters(self.weights, self.gram_matrix, Y=['a', 'ab'],
y_lengths=[1, 2])
def setup_feature_space(self):
self.n_gram_weights = numpy.array([0, 1, 0], dtype=numpy.float64)
self.feature_space_mock = Mock()
self.feature_space_mock.compute_weights.return_value = self.n_gram_weights
self.feature_space_patch = patch('preimage.models.generic_string_model.GenericStringFeatureSpace')
self.feature_space_patch.start().return_value = self.feature_space_mock
def setup_graph_builder(self):
self.graph = numpy.array([[0, 1, 1], [2, 3, 1]], dtype=numpy.float64)
self.graph_builder_mock = Mock()
self.graph_builder_mock.build_graph.return_value = self.graph
self.graph_builder_path = patch('preimage.models.generic_string_model.GraphBuilder')
self.graph_builder_path.start().return_value = self.graph_builder_mock
def setup_branch_and_bound(self):
self.Y_test_with_length = ['a', 'ba']
self.Y_test_no_length = ['bb', 'aaa']
self.bound_factory_patch = patch('preimage.models.generic_string_model.get_gs_node_creator')
self.bound_factory_patch.start().return_value = Mock()
self.branch_and_bound_patch = patch('preimage.models.generic_string_model.branch_and_bound')
self.branch_and_bound_patch.start().side_effect = branch_and_bound_side_effect
self.branch_and_bound_no_length_patch = patch('preimage.models.generic_string_model.branch_and_bound_no_length')
self.branch_and_bound_no_length_patch.start().side_effect = [['bb', 1], ['aaa', 2]]
def test_model_with_length_predict_returns_expected_y(self):
self.model_with_length.fit(self.fit_parameters)
Y = self.model_with_length.predict(self.Y_weights, y_lengths=self.y_lengths)
numpy.testing.assert_array_equal(Y, self.Y_test_with_length)
def test_model_with_length_predict_sends_correct_parameters_to_feature_space(self):
self.model_with_length.fit(self.fit_parameters)
self.model_with_length.predict(self.Y_weights[0:1], y_lengths=self.y_lengths[0:1])
self.feature_space_mock.compute_weights.assert_called_with(self.Y_weights[0], self.y_lengths[0])
def test_model_with_length_predict_sends_correct_parameters_to_graph_builder(self):
self.model_with_length.fit(self.fit_parameters)
self.model_with_length.predict(self.Y_weights[0:1], y_lengths=self.y_lengths[0:1])
self.graph_builder_mock.build_graph.assert_called_with(self.n_gram_weights, self.y_lengths[0])
def test_model_no_length_predict_returns_expected_y(self):
self.model_no_length.fit(self.fit_parameters)
Y = self.model_no_length.predict(self.Y_weights, y_lengths=self.y_lengths)
numpy.testing.assert_array_equal(Y, self.Y_test_no_length)
def test_model_no_length_predict_sends_correct_parameters_to_feature_space(self):
self.model_no_length.fit(self.fit_parameters)
self.model_no_length.predict(self.Y_weights[0:1], y_lengths=self.y_lengths[0:1])
self.feature_space_mock.compute_weights.assert_called_with(self.Y_weights[0], self.max_train_length)
def test_model_no_length_predict_sends_correct_parameters_to_graph_builder(self):
self.model_no_length.fit(self.fit_parameters)
self.model_no_length.predict(self.Y_weights[0:1], y_lengths=self.y_lengths[0:1])
self.graph_builder_mock.build_graph.assert_called_with(self.n_gram_weights, self.max_train_length)
if __name__ == '__main__':
unittest2.main() | {
"repo_name": "a-ro/preimage",
"path": "preimage/tests/models/test_generic_string_model.py",
"copies": "1",
"size": "4989",
"license": "bsd-2-clause",
"hash": -2732621493150583000,
"line_mean": 45.2037037037,
"line_max": 120,
"alpha_frac": 0.664461816,
"autogenerated": false,
"ratio": 3.2952443857331573,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9444013700022467,
"avg_score": 0.0031385003421379844,
"num_lines": 108
} |
__author__ = 'amelie'
import unittest2
import numpy.testing
from mock import patch
from preimage.inference.graph_builder import GraphBuilder
from preimage.exceptions.shape import InvalidShapeError
from preimage.exceptions.n_gram import InvalidYLengthError, InvalidMinLengthError
class TestGraphBuilder(unittest2.TestCase):
def setUp(self):
self.setup_strings()
self.setup_weights()
self.setup_graph()
def setup_strings(self):
self.alphabet = ['a', 'b', 'c']
self.two_letter_alphabet = ['a', 'b']
self.y_max_one_gram_length_one = 'c'
self.y_max_two_gram_length_two = 'cc'
self.y_max_one_gram_length_two = 'ca'
self.y_max_one_gram_length_three = 'caa'
self.y_max_one_gram_length_three_same_weight = 'ccc'
self.y_max_two_gram_length_three = 'acc'
self.index_to_n_gram_patch = patch('preimage.inference.graph_builder.get_index_to_n_gram')
self.index_to_one_gram = {0: 'a', 1: 'b', 2: 'c'}
self.index_to_two_gram = {0: 'aa', 1: 'ab', 2: 'ac', 3: 'ba', 4: 'bb', 5: 'bc', 6: 'ca', 7: 'cb', 8: 'cc'}
def setup_weights(self):
self.weights_one_gram = numpy.array([1, 2, 3])
self.weights_two_gram = numpy.arange(9)
self.weights_matrix_one_gram_length_one = numpy.array([[1, 2, 3]])
self.weights_matrix_one_gram_length_two = numpy.array([[1, 2, 3], [3, 2, 0]])
self.weights_matrix_small_last_weights = numpy.array([[1, 2, 3], [3, 2, 0], [1.3, 1, 1]])
self.weights_matrix_big_last_weights = numpy.array([[1, 2, 3], [3, 2, 0], [1.5, 1, 1]])
self.weights_matrix_one_gram_length_two_zero_weights = numpy.array([[1, 2, 3], [0, 0, 0]])
self.weights_matrix_two_gram_length_two = numpy.array([numpy.arange(9)])
self.weights_matrix_two_gram_length_three = numpy.array([numpy.arange(8, -1, -1), numpy.arange(9)])
self.weights_matrix_three_gram_length_four = numpy.array([numpy.arange(8), numpy.zeros(8)])
def setup_graph(self):
self.graph_one_gram_two_partitions = numpy.array([[1, 2, 3], [4, 5, 6]])
self.graph_one_gram_three_partitions = numpy.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
self.graph_one_gram_two_partitions_2d_weights = numpy.array([[1, 2, 3], [6, 5, 3]])
self.graph_two_gram_two_partitions = numpy.array([numpy.arange(9), [6, 7, 8, 10, 11, 12, 14, 15, 16]])
self.graph_two_gram_two_partitions_2d_weights = numpy.array([numpy.arange(8, -1, -1),
[8, 9, 10, 10, 11, 12, 12, 13, 14]])
self.graph_three_gram_two_partitions_2d_weights = numpy.array([numpy.arange(8), [4, 4, 5, 5, 6, 6, 7, 7]])
def test_one_gram_length_one_build_graph_returns_graph_with_one_partition(self):
graph_builder = GraphBuilder(self.alphabet, n=1)
graph = graph_builder.build_graph(self.weights_one_gram, y_length=1)
numpy.testing.assert_array_equal(graph, [self.weights_one_gram])
def test_two_gram_length_two_build_graph_returns_graph_with_one_partition(self):
graph_builder = GraphBuilder(self.alphabet, n=2)
graph = graph_builder.build_graph(self.weights_two_gram, y_length=2)
numpy.testing.assert_array_equal(graph, [self.weights_two_gram])
def test_one_gram_length_one_2d_weights_build_graph_returns_graph_with_one_partition(self):
graph_builder = GraphBuilder(self.alphabet, n=1)
graph = graph_builder.build_graph(self.weights_matrix_one_gram_length_one, y_length=1)
numpy.testing.assert_array_equal(graph, self.weights_matrix_one_gram_length_one)
def test_two_gram_length_two_2d_weights_build_graph_returns_graph_with_one_partition(self):
graph_builder = GraphBuilder(self.alphabet, n=2)
graph = graph_builder.build_graph(self.weights_matrix_two_gram_length_two, y_length=2)
numpy.testing.assert_array_equal(graph, self.weights_matrix_two_gram_length_two)
def test_one_gram_length_two_build_graph_returns_graph_with_two_partition(self):
graph_builder = GraphBuilder(self.alphabet, n=1)
graph = graph_builder.build_graph(self.weights_one_gram, y_length=2)
numpy.testing.assert_array_equal(graph, self.graph_one_gram_two_partitions)
def test_one_gram_length_three_build_graph_returns_graph_with_three_partition(self):
graph_builder = GraphBuilder(self.alphabet, n=1)
graph = graph_builder.build_graph(self.weights_one_gram, y_length=3)
numpy.testing.assert_array_equal(graph, self.graph_one_gram_three_partitions)
def test_one_gram_length_two_2d_weights_build_graph_returns_graph_with_two_partition(self):
graph_builder = GraphBuilder(self.alphabet, n=1)
graph = graph_builder.build_graph(self.weights_matrix_one_gram_length_two, y_length=2)
numpy.testing.assert_array_equal(graph, self.graph_one_gram_two_partitions_2d_weights)
def test_two_gram_length_three_build_graph_returns_graph_with_two_partition(self):
graph_builder = GraphBuilder(self.alphabet, n=2)
graph = graph_builder.build_graph(self.weights_two_gram, y_length=3)
numpy.testing.assert_array_equal(graph, self.graph_two_gram_two_partitions)
def test_two_gram_length_three_2d_weights_build_graph_returns_graph_with_two_partition(self):
graph_builder = GraphBuilder(self.alphabet, n=2)
graph = graph_builder.build_graph(self.weights_matrix_two_gram_length_three, y_length=3)
numpy.testing.assert_array_equal(graph, self.graph_two_gram_two_partitions_2d_weights)
def test_three_gram_length_four_2d_weights_build_graph_returns_graph_with_two_partition(self):
graph_builder = GraphBuilder(self.two_letter_alphabet, n=3)
graph = graph_builder.build_graph(self.weights_matrix_three_gram_length_four, y_length=4)
numpy.testing.assert_array_equal(graph, self.graph_three_gram_two_partitions_2d_weights)
def test_one_gram_length_one_find_max_string_returns_n_gram_with_max_weight(self):
self.index_to_n_gram_patch.start().return_value = self.index_to_one_gram
graph_builder = GraphBuilder(self.alphabet, n=1)
y_max = graph_builder.find_max_string(self.weights_matrix_one_gram_length_one, y_length=1)
numpy.testing.assert_array_equal(y_max, self.y_max_one_gram_length_one)
def test_two_gram_length_two_find_max_string_returns_n_gram_with_max_weight(self):
self.index_to_n_gram_patch.start().return_value = self.index_to_two_gram
graph_builder = GraphBuilder(self.alphabet, n=2)
y_max = graph_builder.find_max_string(self.weights_matrix_two_gram_length_two, y_length=2)
numpy.testing.assert_array_equal(y_max, self.y_max_two_gram_length_two)
def test_one_gram_length_two_find_max_string_returns_expected_string(self):
self.index_to_n_gram_patch.start().return_value = self.index_to_one_gram
graph_builder = GraphBuilder(self.alphabet, n=1)
y_max = graph_builder.find_max_string(self.weights_matrix_one_gram_length_two, y_length=2)
numpy.testing.assert_array_equal(y_max, self.y_max_one_gram_length_two)
def test_one_gram_length_three_same_weights_find_max_string_returns_expected_string(self):
self.index_to_n_gram_patch.start().return_value = self.index_to_one_gram
graph_builder = GraphBuilder(self.alphabet, n=1)
y_max = graph_builder.find_max_string(self.weights_one_gram, y_length=3)
numpy.testing.assert_array_equal(y_max, self.y_max_one_gram_length_three_same_weight)
def test_two_gram_length_three_find_max_string_returns_expected_string(self):
self.index_to_n_gram_patch.start().return_value = self.index_to_two_gram
graph_builder = GraphBuilder(self.alphabet, n=2)
y_max = graph_builder.find_max_string(self.weights_matrix_two_gram_length_three, y_length=3)
numpy.testing.assert_array_equal(y_max, self.y_max_two_gram_length_three)
def test_one_gram_one_length_in_range_find_max_string_returns_expected_string(self):
self.index_to_n_gram_patch.start().return_value = self.index_to_one_gram
graph_builder = GraphBuilder(self.alphabet, n=1)
y_max = graph_builder.find_max_string_in_length_range(self.weights_matrix_one_gram_length_two, min_y_length=2,
max_y_length=2, is_normalized=False)
numpy.testing.assert_array_equal(y_max, self.y_max_one_gram_length_two)
def test_one_gram_one_length_in_range_normalized_find_max_string_returns_expected_string(self):
self.index_to_n_gram_patch.start().return_value = self.index_to_one_gram
graph_builder = GraphBuilder(self.alphabet, n=1)
y_max = graph_builder.find_max_string_in_length_range(self.weights_matrix_one_gram_length_two, min_y_length=2,
max_y_length=2, is_normalized=True)
numpy.testing.assert_array_equal(y_max, self.y_max_one_gram_length_two)
def test_one_gram_two_lengths_in_range_find_max_string_returns_length_two_string(self):
self.index_to_n_gram_patch.start().return_value = self.index_to_one_gram
graph_builder = GraphBuilder(self.alphabet, n=1)
y_max = graph_builder.find_max_string_in_length_range(self.weights_matrix_one_gram_length_two, min_y_length=1,
max_y_length=2, is_normalized=False)
numpy.testing.assert_array_equal(y_max, self.y_max_one_gram_length_two)
def test_one_gram_three_lengths_in_range_find_max_string_returns_length_two_string(self):
self.index_to_n_gram_patch.start().return_value = self.index_to_one_gram
graph_builder = GraphBuilder(self.alphabet, n=1)
y_max = graph_builder.find_max_string_in_length_range(self.weights_matrix_small_last_weights, min_y_length=1,
max_y_length=3, is_normalized=True)
numpy.testing.assert_array_equal(y_max, self.y_max_one_gram_length_two)
def test_one_gram_three_lengths_in_range_find_max_string_returns_length_three_string(self):
self.index_to_n_gram_patch.start().return_value = self.index_to_one_gram
graph_builder = GraphBuilder(self.alphabet, n=1)
y_max = graph_builder.find_max_string_in_length_range(self.weights_matrix_big_last_weights, min_y_length=1,
max_y_length=3, is_normalized=True)
numpy.testing.assert_array_equal(y_max, self.y_max_one_gram_length_three)
def test_zero_weights_for_length_two_find_max_string_without_length_returns_length_one_string(self):
self.index_to_n_gram_patch.start().return_value = self.index_to_one_gram
graph_builder = GraphBuilder(self.alphabet, n=1)
y_max = graph_builder.find_max_string_in_length_range(self.weights_matrix_one_gram_length_two_zero_weights,
min_y_length=1, max_y_length=2, is_normalized=False)
numpy.testing.assert_array_equal(y_max, self.y_max_one_gram_length_one)
def test_normalized_zero_weights_for_length_two_find_max_string_without_length_returns_length_one_string(self):
self.index_to_n_gram_patch.start().return_value = self.index_to_one_gram
graph_builder = GraphBuilder(self.alphabet, n=1)
y_max = graph_builder.find_max_string_in_length_range(self.weights_matrix_one_gram_length_two_zero_weights,
min_y_length=1, max_y_length=2, is_normalized=True)
numpy.testing.assert_array_equal(y_max, self.y_max_one_gram_length_one)
def test_two_gram_two_lengths_find_max_string_without_length_returns_length_three_string(self):
self.index_to_n_gram_patch.start().return_value = self.index_to_two_gram
graph_builder = GraphBuilder(self.alphabet, n=2)
y_max = graph_builder.find_max_string_in_length_range(self.weights_matrix_two_gram_length_three,
min_y_length=2, max_y_length=3, is_normalized=False)
numpy.testing.assert_array_equal(y_max, self.y_max_two_gram_length_three)
def test_wrong_graph_weights_partition_count_build_graph_raises_shape_error(self):
graph_builder = GraphBuilder(self.alphabet, n=1)
with self.assertRaises(InvalidShapeError):
graph_builder.build_graph(self.weights_matrix_one_gram_length_one, y_length=2)
def test_wrong_graph_weights_n_gram_count_build_graph_raises_shape_error(self):
graph_builder = GraphBuilder(self.alphabet, n=2)
with self.assertRaises(InvalidShapeError):
graph_builder.build_graph(self.weights_one_gram, y_length=2)
def test_y_length_smaller_than_n_build_graph_raises_length_error(self):
graph_builder = GraphBuilder(self.alphabet, n=2)
with self.assertRaises(InvalidYLengthError):
graph_builder.build_graph(self.weights_two_gram, y_length=1)
def test_wrong_graph_weights_partition_count_find_max_string_raises_shape_error(self):
graph_builder = GraphBuilder(self.alphabet, n=1)
with self.assertRaises(InvalidShapeError):
graph_builder.find_max_string(self.weights_matrix_one_gram_length_one, y_length=2)
def test_wrong_graph_weights_n_gram_count_find_max_string_raises_shape_error(self):
graph_builder = GraphBuilder(self.alphabet, n=2)
with self.assertRaises(InvalidShapeError):
graph_builder.find_max_string(self.weights_one_gram, y_length=2)
def test_y_length_smaller_than_n_find_max_string_raises_length_error(self):
graph_builder = GraphBuilder(self.alphabet, n=2)
with self.assertRaises(InvalidYLengthError):
graph_builder.find_max_string(self.weights_two_gram, y_length=1)
def test_wrong_graph_weights_partition_count_find_max_string_in_range_raises_shape_error(self):
graph_builder = GraphBuilder(self.alphabet, n=1)
with self.assertRaises(InvalidShapeError):
graph_builder.find_max_string_in_length_range(self.weights_matrix_one_gram_length_one, 2, 2, True)
def test_wrong_graph_weights_n_gram_count_find_max_string_in_range_raises_shape_error(self):
graph_builder = GraphBuilder(self.alphabet, n=2)
with self.assertRaises(InvalidShapeError):
graph_builder.find_max_string_in_length_range(self.weights_one_gram, 2, 2, True)
def test_y_length_smaller_than_n_find_max_string_in_range_raises_length_error(self):
graph_builder = GraphBuilder(self.alphabet, n=2)
with self.assertRaises(InvalidYLengthError):
graph_builder.find_max_string_in_length_range(self.weights_two_gram, 1, 1, True)
def test_min_length_larger_than_max_length_find_max_string_in_range_raises_length_error(self):
graph_builder = GraphBuilder(self.alphabet, n=2)
with self.assertRaises(InvalidMinLengthError):
graph_builder.find_max_string_in_length_range(self.weights_two_gram, 3, 2, True)
if __name__ == '__main__':
unittest2.main() | {
"repo_name": "a-ro/preimage",
"path": "preimage/tests/inference/test_graph_builder.py",
"copies": "1",
"size": "15256",
"license": "bsd-2-clause",
"hash": -516558613194166900,
"line_mean": 50.5439189189,
"line_max": 118,
"alpha_frac": 0.6672128998,
"autogenerated": false,
"ratio": 3.173044925124792,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43402578249247914,
"avg_score": null,
"num_lines": null
} |
__author__ = 'amelie'
import unittest2
import numpy.testing
from preimage.kernels.polynomial import PolynomialKernel
class TestPolynomialKernel(unittest2.TestCase):
def setUp(self):
self.X_one = [[1, 2]]
self.X_two = [[1, 0], [1, 3]]
self.gram_matrix_degree_one_x_one_x_one = [[5.]]
self.gram_matrix_degree_two_x_one_x_one = [[25.]]
self.gram_matrix_degree_one_bias_one_x_one_x_one = [[6.]]
self.gram_matrix_normalized_x_one_x_one = [[1.]]
self.gram_matrix_normalized_x_one_x_two = [[0.447213595, 0.989949494]]
def test_degree_one_x_one_x_one_polynomial_kernel_returns_expected_value(self):
kernel = PolynomialKernel(degree=1, bias=0, is_normalized=False)
gram_matrix = kernel(self.X_one, self.X_one)
numpy.testing.assert_array_equal(gram_matrix, self.gram_matrix_degree_one_x_one_x_one)
def test_degree_two_x_one_x_one_polynomial_kernel_returns_expected_value(self):
kernel = PolynomialKernel(degree=2, bias=0, is_normalized=False)
gram_matrix = kernel(self.X_one, self.X_one)
numpy.testing.assert_array_equal(gram_matrix, self.gram_matrix_degree_two_x_one_x_one)
def test_degree_one_bias_one_x_one_x_one_polynomial_kernel_returns_expected_value(self):
kernel = PolynomialKernel(degree=1, bias=1, is_normalized=False)
gram_matrix = kernel(self.X_one, self.X_one)
numpy.testing.assert_array_equal(gram_matrix, self.gram_matrix_degree_one_bias_one_x_one_x_one)
def test_x_one_x_one_normalized_polynomial_kernel_returns_expected_value(self):
kernel = PolynomialKernel(degree=1, bias=0, is_normalized=True)
gram_matrix = kernel(self.X_one, self.X_one)
numpy.testing.assert_array_equal(gram_matrix, self.gram_matrix_normalized_x_one_x_one)
def test_degree_one_x_one_x_two_normalized_polynomial_kernel_returns_expected_value(self):
kernel = PolynomialKernel(degree=1, bias=0, is_normalized=True)
gram_matrix = kernel(self.X_one, self.X_two)
numpy.testing.assert_almost_equal(gram_matrix, self.gram_matrix_normalized_x_one_x_two)
if __name__ == '__main__':
unittest2.main() | {
"repo_name": "a-ro/preimage",
"path": "preimage/tests/kernels/test_polynomial.py",
"copies": "1",
"size": "2192",
"license": "bsd-2-clause",
"hash": 103531104209910180,
"line_mean": 38.1607142857,
"line_max": 103,
"alpha_frac": 0.6797445255,
"autogenerated": false,
"ratio": 2.9986320109439126,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9164215439271923,
"avg_score": 0.0028322194343977634,
"num_lines": 56
} |
__author__ = 'amelie'
import unittest2
import numpy.testing
from preimage.utils import alphabet
from preimage.exceptions.n_gram import InvalidNGramLengthError
class TestAlphabet(unittest2.TestCase):
def setUp(self):
self.a_b_alphabet = ['a', 'b']
self.abc_alphabet = ['a', 'b', 'c']
self.two_grams = ['aa', 'ab', 'ba', 'bb']
self.one_gram_to_index = {'a': 0, 'b': 1}
self.two_gram_to_index = {'aa': 0, 'ab': 1, 'ba': 2, 'bb': 3}
self.index_to_one_gram = {0: 'a', 1: 'b'}
self.index_to_two_gram = {0: 'aa', 1: 'ab', 2: 'ba', 3: 'bb'}
self.cab = ['cab']
self.cab_aa = ['cab', 'aa']
self.cab_int = [[2, 0, 1]]
self.cab_aa_int = [[2, 0, 1], [0, 0, -1]]
def test_integer_n_is_zero_get_n_grams_raises_value_error(self):
with self.assertRaises(ValueError):
alphabet.get_n_grams(self.a_b_alphabet, n=0.5)
def test_get_one_grams_returns_alphabet(self):
n_grams = alphabet.get_n_grams(self.a_b_alphabet, n=1)
numpy.testing.assert_array_equal(n_grams, self.a_b_alphabet)
def test_get_two_grams_returns_expected_two_grams(self):
n_grams = alphabet.get_n_grams(self.a_b_alphabet, n=2)
numpy.testing.assert_array_equal(n_grams, self.two_grams)
def test_get_one_gram_to_index_returns_expected_dict(self):
n_gram_to_index = alphabet.get_n_gram_to_index(self.a_b_alphabet, n=1)
self.assertDictEqual(n_gram_to_index, self.one_gram_to_index)
def test_get_two_gram_to_index_returns_expected_dict(self):
n_gram_to_index = alphabet.get_n_gram_to_index(self.a_b_alphabet, n=2)
self.assertDictEqual(n_gram_to_index, self.two_gram_to_index)
def test_n_zero_get_n_gram_to_index_raises_value_error(self):
with self.assertRaises(InvalidNGramLengthError):
alphabet.get_n_gram_to_index(self.a_b_alphabet, n=0)
def test_get_index_to_one_gram_returns_expected_dict(self):
index_to_n_gram = alphabet.get_index_to_n_gram(self.a_b_alphabet, n=1)
self.assertDictEqual(index_to_n_gram, self.index_to_one_gram)
def test_get_index_to_two_gram_returns_expected_dict(self):
index_to_n_gram = alphabet.get_index_to_n_gram(self.a_b_alphabet, n=1)
self.assertDictEqual(index_to_n_gram, self.index_to_one_gram)
def test_n_zero_get_index_to_n_gram_raises_value_error(self):
with self.assertRaises(InvalidNGramLengthError):
alphabet.get_index_to_n_gram(self.a_b_alphabet, n=0)
def test_one_string_transform_strings_to_integer_returns_integer_string(self):
Y_int = alphabet.transform_strings_to_integer_lists(self.cab, self.abc_alphabet)
numpy.testing.assert_array_equal(Y_int, self.cab_int)
def test_two_strings_different_length_transform_strings_to_integer_returns_integer_strings(self):
Y_int = alphabet.transform_strings_to_integer_lists(self.cab_aa, self.abc_alphabet)
numpy.testing.assert_array_equal(Y_int, self.cab_aa_int)
if __name__ == '__main__':
unittest2.main() | {
"repo_name": "a-ro/preimage",
"path": "preimage/tests/utils/test_alphabet.py",
"copies": "1",
"size": "3079",
"license": "bsd-2-clause",
"hash": 4831348079894199000,
"line_mean": 38.4871794872,
"line_max": 101,
"alpha_frac": 0.646638519,
"autogenerated": false,
"ratio": 2.869524697110904,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4016163216110904,
"avg_score": null,
"num_lines": null
} |
__author__ = 'amelie'
class InvalidNGramError(ValueError):
def __init__(self, n, n_gram):
self.n = n
self.n_gram = n_gram
def __str__(self):
error_message = "{} is not a possible {:d}_gram for this alphabet".format(self.n_gram, self.n)
return error_message
class InvalidNGramLengthError(ValueError):
def __init__(self, n, min_n=0):
self.n = n
self.min_n = min_n
def __str__(self):
error_message = 'n must be greater than {:d}. Got: n={:d}'.format(self.min_n, self.n)
return error_message
class InvalidYLengthError(ValueError):
def __init__(self, n, y_length):
self.n = n
self.y_length = y_length
def __str__(self):
error_message = 'y_length must be >= n. Got: y_length={:d}, n={:d}'.format(self.y_length, self.n)
return error_message
class InvalidMinLengthError(ValueError):
def __init__(self, min_length, max_length):
self.min_length = min_length
self.max_length = max_length
def __str__(self):
error_message = 'min_length must be <= max_length. ' \
'Got: min_length={:d}, max_length={:d}'.format(self.min_length, self.max_length)
return error_message
class NoThresholdsError(ValueError):
def __str__(self):
error_message = 'thresholds must be provided when y_length is None'
return error_message
class NoYLengthsError(ValueError):
def __str__(self):
error_message = "y_lengths can't be None if is_using_length = True"
return error_message | {
"repo_name": "a-ro/preimage",
"path": "preimage/exceptions/n_gram.py",
"copies": "1",
"size": "1580",
"license": "bsd-2-clause",
"hash": 6513612622599046000,
"line_mean": 28.2777777778,
"line_max": 105,
"alpha_frac": 0.5981012658,
"autogenerated": false,
"ratio": 3.4273318872017353,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45254331530017355,
"avg_score": null,
"num_lines": null
} |
__author__ = 'amentis'
from RxAPI import RxObject
class RxGUIObject(RxObject):
""" The main RxGUI class. Used as a superclass for all RxGUI classes"""
def __init__(self, name, parent):
"""
@param parent: RxGUIObject parent object
@param name: str name of the REXI object
"""
RxObject.__init__(self, name)
self._parent = parent
self.__children = list()
def get_children(self):
"""
get a list of the children of the REXI GUI object. Children are of type RxGUIObject.
@return : list list of child objects
"""
return self.__children
def add_child(self, child):
"""
append a RxGUIObject to the Children list.
@param child: RxGUIObject
"""
self.__children.append(child)
def delete_child(self, child):
"""
remove a child from the Children list.
@param child: RxGUIObject child to be removed
@return: bool True if a child is found and removed, False on fail.
"""
for i in range(0, len(self.__children)):
if self.__children[i] == child:
self.__children.remove(i)
return True
return False | {
"repo_name": "amentis/Rexi",
"path": "RxAPI/RxGUI/RxGUIObject.py",
"copies": "1",
"size": "1243",
"license": "apache-2.0",
"hash": -1302139562896896800,
"line_mean": 30.1,
"line_max": 92,
"alpha_frac": 0.5695897023,
"autogenerated": false,
"ratio": 4.102310231023102,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5171899933323102,
"avg_score": null,
"num_lines": null
} |
__author__ = 'amentis'
from RxAPI.RxGUI import Color, RxGUIObject
class Border(RxGUIObject):
"""
Drawable border for any drawable RxGUIObject.
"""
def __init__(self, name, color=None, style="solid", width="1px"):
"""
@param name: str name of the REXI object
@param color: Color color of the border
@param style: str style of the border, acceptable values:
none, hidden, dotted, dashed, solid, double, groove, ridge, inset, outset, inherit
@param width: str width of the border, acceptable values:
thin, medium, thick, N%/px/cm/etc., inherit
"""
RxGUIObject.__init__(self, name, None)
self._style = style
self._width = width
if color is None:
self._color = Color.Color('borderColor')
self._color.color_by_name('Black')
else:
self._color = Color.Color('borderColor', color)
def get(self):
"""
@return : str CSS code defining the border
"""
return "border: %s %s %s ;" % (self._width, self._style, self._color.get()) | {
"repo_name": "amentis/Rexi",
"path": "RxAPI/RxGUI/Border.py",
"copies": "1",
"size": "1108",
"license": "apache-2.0",
"hash": -4780893838523832000,
"line_mean": 34.7741935484,
"line_max": 90,
"alpha_frac": 0.583032491,
"autogenerated": false,
"ratio": 3.7306397306397305,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48136722216397304,
"avg_score": null,
"num_lines": null
} |
__author__ = 'amentis'
from RxAPI.RxGUI import Event
class KeyEvent(Event):
"""
definition for an event, being called upon keyboard event
"""
def __init__(self, parent, sender, key_name, actions, event_type="keypress", modifiers=""):
"""
@param parent: RxGUIObject parent object
@param sender: str name of the object sending the event
@param key_name: name of the keyboard key. Acceptable values:
'Backspace', 'Tab', 'Enter', 'Shift', 'Ctrl', 'Alt', 'PauseBreak', 'CapsLock', 'Esc', 'PageUp',
'PageDown', 'End', 'Home', 'Left', 'Up', 'Right', 'Down', 'Insert', 'Delete', '0', '1', '2', '3', '4',
'5', '6', '7', '8', '9', 'colon', 'equals', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l',
'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'Meta', 'Super', 'Win', 'RightClick',
'Num0', 'Num1', 'Num2', 'Num3', 'Num4', 'Num5', 'Num6', 'Num7', 'Num8', 'Num9', 'Num*', 'Num+', 'Num-',
'Num.', 'Num/', 'F1', 'F2', 'F3', 'F4', 'F5', 'F6', 'F7', 'F8', 'F9', 'F10', 'F11', 'F12', 'NumLock',
ScrollLock', ',', '.', '/', '`', '[', ']', '/', '"'
@param actions: list list of str, methods to be called on this event
@param modifiers: list list of str, keyboard modifiers (alt, shift and/or ctrl)
@param event_type: str type of event. Acceptable values: keydown, keypress, keyup
"""
Event.__init__(self, parent, sender, modifiers, actions, event_type)
self.__key_numbers = dict()
self.__key_numbers['Backspace'] = 8
self.__key_numbers['Tab'] = 9
self.__key_numbers['Enter'] = 13
self.__key_numbers['Shift'] = 16
self.__key_numbers['Ctrl'] = 17
self.__key_numbers['Alt'] = 18
self.__key_numbers['PauseBreak'] = 19
self.__key_numbers['CapsLock'] = 20
self.__key_numbers['Esc'] = 27
self.__key_numbers['PageUp'] = 33
self.__key_numbers['PageDown'] = 34
self.__key_numbers['End'] = 35
self.__key_numbers['Home'] = 36
self.__key_numbers['Left'] = 37
self.__key_numbers['Up'] = 38
self.__key_numbers['Right'] = 39
self.__key_numbers['Down'] = 40
self.__key_numbers['Insert'] = 45
self.__key_numbers['Delete'] = 46
self.__key_numbers['0'] = 48
self.__key_numbers['1'] = 49
self.__key_numbers['2'] = 50
self.__key_numbers['3'] = 51
self.__key_numbers['4'] = 52
self.__key_numbers['5'] = 53
self.__key_numbers['6'] = 54
self.__key_numbers['7'] = 55
self.__key_numbers['8'] = 56
self.__key_numbers['9'] = 57
self.__key_numbers['colon'] = 59
self.__key_numbers['equals'] = 61
self.__key_numbers['a'] = 65
self.__key_numbers['b'] = 66
self.__key_numbers['c'] = 67
self.__key_numbers['d'] = 68
self.__key_numbers['e'] = 69
self.__key_numbers['f'] = 70
self.__key_numbers['g'] = 71
self.__key_numbers['h'] = 72
self.__key_numbers['i'] = 73
self.__key_numbers['j'] = 74
self.__key_numbers['k'] = 75
self.__key_numbers['l'] = 76
self.__key_numbers['m'] = 77
self.__key_numbers['n'] = 78
self.__key_numbers['o'] = 79
self.__key_numbers['p'] = 80
self.__key_numbers['q'] = 81
self.__key_numbers['r'] = 82
self.__key_numbers['s'] = 83
self.__key_numbers['t'] = 84
self.__key_numbers['u'] = 85
self.__key_numbers['v'] = 86
self.__key_numbers['w'] = 87
self.__key_numbers['x'] = 88
self.__key_numbers['y'] = 89
self.__key_numbers['z'] = 90
self.__key_numbers['Meta'] = 91
self.__key_numbers['Super'] = 91
self.__key_numbers['Win'] = 91
self.__key_numbers['RightClick'] = 93
self.__key_numbers['Num0'] = 96
self.__key_numbers['Num1'] = 97
self.__key_numbers['Num2'] = 98
self.__key_numbers['Num3'] = 99
self.__key_numbers['Num4'] = 100
self.__key_numbers['Num5'] = 101
self.__key_numbers['Num6'] = 102
self.__key_numbers['Num7'] = 103
self.__key_numbers['Num8'] = 104
self.__key_numbers['Num9'] = 105
self.__key_numbers['Num*'] = 106
self.__key_numbers['Num+'] = 107
self.__key_numbers['Num-'] = 109
self.__key_numbers['Num.'] = 110
self.__key_numbers['Num/'] = 111
self.__key_numbers['F1'] = 112
self.__key_numbers['F2'] = 113
self.__key_numbers['F3'] = 114
self.__key_numbers['F4'] = 115
self.__key_numbers['F5'] = 116
self.__key_numbers['F6'] = 117
self.__key_numbers['F7'] = 118
self.__key_numbers['F8'] = 119
self.__key_numbers['F9'] = 120
self.__key_numbers['F10'] = 121
self.__key_numbers['F11'] = 122
self.__key_numbers['F12'] = 123
self.__key_numbers['NumLock'] = 144
self.__key_numbers['ScrollLock'] = 145
self.__key_numbers[','] = 188
self.__key_numbers['.'] = 190
self.__key_numbers['/'] = 191
self.__key_numbers['`'] = 192
self.__key_numbers['['] = 219
self.__key_numbers[']'] = 221
self.__key_numbers['/'] = 220
self.__key_numbers['"'] = 220
self._key = self.__key_numbers[key_name]
def set_key(self, key_name):
"""
set the key which triggers the event
@param key_name: name of the keyboard key. Acceptable values:
'Backspace', 'Tab', 'Enter', 'Shift', 'Ctrl', 'Alt', 'PauseBreak', 'CapsLock', 'Esc', 'PageUp',
'PageDown', 'End', 'Home', 'Left', 'Up', 'Right', 'Down', 'Insert', 'Delete', '0', '1', '2', '3', '4',
'5', '6', '7', '8', '9', 'colon', 'equals', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l',
'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'Meta', 'Super', 'Win', 'RightClick',
'Num0', 'Num1', 'Num2', 'Num3', 'Num4', 'Num5', 'Num6', 'Num7', 'Num8', 'Num9', 'Num*', 'Num+', 'Num-',
'Num.', 'Num/', 'F1', 'F2', 'F3', 'F4', 'F5', 'F6', 'F7', 'F8', 'F9', 'F10', 'F11', 'F12', 'NumLock',
ScrollLock', ',', '.', '/', '`', '[', ']', '/', '"'
"""
self._key = self.__key_numbers[key_name]
def get(self):
"""
setup the key event
"""
functions = ""
for s in self._actions:
functions += s + "; \n"
self._javascript = """
$("#%s").%s(function(event){
if (event.which == %d) {
event.preventDefault();
%s
}
}
)
""" % (self._sender, self._type, self._key, functions)
self._parent.append_javascript(self._javascript)
return "" | {
"repo_name": "amentis/Rexi",
"path": "RxAPI/RxGUI/KeyEvent.py",
"copies": "1",
"size": "6916",
"license": "apache-2.0",
"hash": -3538725533437029000,
"line_mean": 41.6975308642,
"line_max": 115,
"alpha_frac": 0.4764314633,
"autogenerated": false,
"ratio": 3.218241042345277,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9188496542249955,
"avg_score": 0.001235192679064289,
"num_lines": 162
} |
__author__ = 'amentis'
from RxAPI.RxGUI import Event
class MouseEvent(Event):
def __init__(self, parent, sender, button_name, modifiers, actions, event_type="click"):
"""
@param parent: RxGUIObject parent object
@param sender: str name of the object sending the event
@param button_name: str name of the mouse button. Acceptable values: left, middle, right
@param actions: list list of str, methods to be called on this event
@param modifiers: list list of str, keyboard modifiers (alt, shift and/or ctrl)
@param event_type: str type of event. Acceptable values:
"""
Event.__init__(self, parent, sender, modifiers, actions, event_type)
self.__button_numbers = dict
self.__button_numbers['left'] = 1
self.__button_numbers['middle'] = 2
self.__button_numbers['right'] = 3
self._button = self.__button_numbers[button_name]
def set_button(self, button_name):
"""
set the button which triggers the event
@param button_name: str name of the mouse button. Acceptable values: left, middle, right
"""
self._button = self.__button_numbers[button_name]
def get(self):
"""
setup the mouse event
"""
functions = ""
for s in self._actions.values:
functions += s
self._javascript = """
$("#%s").%s(function(event){
if (event.which == %d) {
event.preventDefault();
%s
}
}
)
""" % (self._sender, self._type, self._button, functions) | {
"repo_name": "amentis/Rexi",
"path": "RxAPI/RxGUI/MouseEvent.py",
"copies": "1",
"size": "1659",
"license": "apache-2.0",
"hash": -4978891421162307000,
"line_mean": 36.7272727273,
"line_max": 96,
"alpha_frac": 0.5647980711,
"autogenerated": false,
"ratio": 4.232142857142857,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5296940928242857,
"avg_score": null,
"num_lines": null
} |
__author__ = 'amentis'
from RxAPI.RxGUI import *
class Console(Screen):
"""
A simple full-screen command-line user interface
"""
def __init__(self):
Screen.__init__(self, "REXI Console")
self._window = Window(self, 'consoleWindow')
self._output = TextView(self._window, 'output')
self._input = TextEdit(self._window, 'input')
self._window.set_size('800px', '600px')
self._input.set_size('100%', '10%')
self._output.set_size('100%', '90%')
self._screen_bckg_color = Color("screenBackgroundColor")
self._screen_bckg_color.color_by_name("Lavender")
self.set_background_color(self._screen_bckg_color)
self._window.center()
self._fields_bckg_color = Color("outputBackgroundColor")
self._fields_bckg_color.color_by_rgba(200, 200, 200, 0.7)
self._output.set_background_color(self._fields_bckg_color)
self._input.set_background_color(self._fields_bckg_color)
self._output.set_border(Border("OutputBorder", Color("OutputBorderColor", "Black")))
self._input.set_border(Border("InputBorder", Color("InputBorderColor", "Black")))
self._on_enter = KeyEvent(self, "input", "Enter",
(
"output.append_text(%s.get_text() + \"<br />\")" % self._input.get_name(),
"$(\"#output\").scrollTop($(\"#output\")[0].scrollHeight)",
"input.clear_text()"
))
def get(self):
return Screen.get(self) | {
"repo_name": "amentis/Rexi",
"path": "UI/Console.py",
"copies": "1",
"size": "1625",
"license": "apache-2.0",
"hash": -5375768058629626000,
"line_mean": 44.1666666667,
"line_max": 112,
"alpha_frac": 0.5495384615,
"autogenerated": false,
"ratio": 3.676470588235294,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47260090497352936,
"avg_score": null,
"num_lines": null
} |
__author__ = 'amentis'
from RxAPI.RxGUI import RxGUIObject
class Color(RxGUIObject):
"""
color definition for wherever such is needed
"""
def __init__(self, name, color="Black"):
"""
@param name: str name of the REXI object
@param color: str color name. Acceptable values - HTML colors or the following predefined color names:
AliceBlue, AntiqueWhile, Aqua, Aquamarine, Azure, Beige, Bisque, Black, BlanchedAlmond, Blue,
BlueViolet, Brown, BurlyWood, Chartreuse, Chocolate, Coral, CornflowerBlue, Cornsilk, Crimson,
Cyan, DarkBlue, DarkCyan, DarkGoldenRod, DarkGray, DarkGreen, DarkKhaki, DarkMagenta, DarkOliveGreen,
DarkOrange, DarkOrchid, DarkRed, DarkSalmon, DarkSeaGreen, DarkSlateBlue, DarkSlateGray, DarkTurquoise,
DarkViolet, DeepPink, DeepSkyBlue, DimGray, DodgerBlue, FireBrick, FloralWhite, ForestGreen, Fuchsia,
Gainsboro, GhostWhite, Gold, GoldenRod, Gray, Green, GreenYellow, HoneyDew, HotPink, IndianRed, Indigo,
Ivory, Khaki, Lavender, LavenderBlush, LawnGreen, LemonChiffon, LightBlue, LightCoral, LightCyan,
LightGoldenRodYellow, LightGray, LightGreen, LightPink, LightSalmon, LightSeaGreen, LightSkyBlue,
LightSlateGray, LightSteelBlue, LightYellow, Lime, LimeGreen, Linen, Magenta, Maroon, MediumAquaMarine,
MediumBlue, MediumOrchid, MediumPurple, MediumSeaGreen, MediumSlateBlue, MediumSpringGreen,
MediumTurquoise, MediumVioletRed, MidnightBlue, MintCream, MistyRose, Moccasin, NavajoWhite, Navy,
OldLace, Olive, OliveDrab, Orange, OrangeRed, Orchid, PaleGoldenRod, PaleGreen, PaleTurquoise,
PaleVioletRed, PapayaWhip, PeachPuff, Peru, Pink, Plum, PowderBlue, Purple, Red, RosyBrown, RoyalBlue,
SaddleBrown, Salmon, SandyBrown, SeaGreen, SeaShell, Sienna, Silver, SkyBlue, SlateBlue, SlateGray,
Snow, SpringGreen, SteelBlue, Tan, Teal, Thistle, Tomato, Turquoise, Violet, Wheat, White, WhiteSmoke,
Yellow, YellowGreen
"""
RxGUIObject.__init__(self, name, None)
self.__color_names = dict()
self.__color_names['AliceBlue'] = "#F0F8FF"
self.__color_names['AntiqueWhite'] = "#FAEBD7"
self.__color_names['Aqua'] = "#00FFFF"
self.__color_names['Aquamarine'] = "#7FFFD4"
self.__color_names['Azure'] = "#F0FFFF"
self.__color_names['Beige'] = "#F5F5DC"
self.__color_names['Bisque'] ="#FFE4C4"
self.__color_names['Black'] = "#000000"
self.__color_names['BlanchedAlmond'] = "#FFEBCD"
self.__color_names['Blue'] = "#0000FF"
self.__color_names['BlueViolet'] = "#8A2BE2"
self.__color_names['Brown'] = "#A52A2A"
self.__color_names['BurlyWood'] = "#DEB887"
self.__color_names['Chartreuse'] = "#7FFF00"
self.__color_names['Chocolate'] = "##D2691E"
self.__color_names['Coral'] = "#FF7F50"
self.__color_names['CornflowerBlue'] = "#6495ED"
self.__color_names['Cornsilk'] = "#FFF8DC"
self.__color_names['Crimson'] = "#DC143C"
self.__color_names['Cyan'] = "#00FFFF"
self.__color_names['DarkBlue'] = "#00008B"
self.__color_names['DarkCyan'] = "#008B8B"
self.__color_names['DarkGoldenRod'] = "#B8860B"
self.__color_names['DarkGray'] = "#A9A9A9"
self.__color_names['DarkGreen'] = "#006400"
self.__color_names['DarkKhaki'] = "#BDB76B"
self.__color_names['DarkMagenta'] = "#8B008B"
self.__color_names['DarkOliveGreen'] = "#556B2F"
self.__color_names['DarkOrange'] = "#FF8C00"
self.__color_names['DarkOrchid'] = "#9932CC"
self.__color_names['DarkRed'] = "#8B0000"
self.__color_names['DarkSalmon'] = "#E9967A"
self.__color_names['DarkSeaGreen'] = "#8FBC8F"
self.__color_names['DarkSlateBlue'] = "#483D8B"
self.__color_names['DarkSlateGray'] = "#2F4F4F"
self.__color_names['DarkTurquoise'] = "#00CED1"
self.__color_names['DarkViolet'] = "#9400D3"
self.__color_names['DeepPink'] = "#FF1493"
self.__color_names['DeepSkyBlue'] = "#00BFFF"
self.__color_names['DimGray'] = "#696969"
self.__color_names['DodgerBlue'] = "#1E90FF"
self.__color_names['FireBrick'] = "#B22222"
self.__color_names['FloralWhite'] = "#FFFAF0"
self.__color_names['ForestGreen'] = "#228B22"
self.__color_names['Fuchsia'] = "#FF00FF"
self.__color_names['Gainsboro'] = "#DCDCDC"
self.__color_names['GhostWhite'] = "#F8F8FF"
self.__color_names['Gold'] = "##FFD700"
self.__color_names['GoldenRod'] = "#DAA520"
self.__color_names['Gray'] = "#808080"
self.__color_names['Green'] = "#008000"
self.__color_names['GreenYellow'] = "#ADFF2F"
self.__color_names['HoneyDew'] = "#F0FFF0"
self.__color_names['HotPink'] = "#FF69B4"
self.__color_names['IndianRed'] = "#CD5C5C"
self.__color_names['Indigo'] = "#4B0082"
self.__color_names['Ivory'] = "#FFFFF0"
self.__color_names['Khaki'] = "#F0E68C"
self.__color_names['Lavender'] = "#E6E6FA"
self.__color_names['LavenderBlush'] = "#FFF0F5"
self.__color_names['LawnGreen'] = "#7CFC00"
self.__color_names['LemonChiffon'] = "#FFFACD"
self.__color_names['LightBlue'] = "#ADD8E6"
self.__color_names['LightCoral'] = "#F08080"
self.__color_names['LightCyan'] = "#E0FFFF"
self.__color_names['LightGoldenRodYellow'] = "#FAFAD2"
self.__color_names['LightGray'] = "#D3D3D3"
self.__color_names['LightGreen'] = "#90EE90"
self.__color_names['LightPink'] = "#FFB6C1"
self.__color_names['LightSalmon'] = "#FFA07A"
self.__color_names['LightSeaGreen'] = "#20B2AA"
self.__color_names['LightSkyBlue'] = "#87CEFA"
self.__color_names['LightSlateGray'] = "#778899"
self.__color_names['LightSteelBlue'] = "#B0C4DE"
self.__color_names['LightYellow'] = "#FFFFE0"
self.__color_names['Lime'] = "#00FF00"
self.__color_names['LimeGreen'] = "#32CD32"
self.__color_names['Linen'] = "#FAF0E6"
self.__color_names['Magenta'] = "#FF00FF"
self.__color_names['Maroon'] = "#800000"
self.__color_names['MediumAquaMarine'] = "#66CDAA"
self.__color_names['MediumBlue'] = "#0000CD"
self.__color_names['MediumOrchid'] = "#BA55D3"
self.__color_names['MediumPurple'] = "#9370DB"
self.__color_names['MediumSeaGreen'] = "#3CB371"
self.__color_names['MediumSlateBlue'] = "#7B68EE"
self.__color_names['MediumSpringGreen'] = "#00FA9A"
self.__color_names['MediumTurquoise'] = "#48D1CC"
self.__color_names['MediumVioletRed'] = "#C71585"
self.__color_names['MidnightBlue'] = "#191970"
self.__color_names['MintCream'] = "#F5FFFA"
self.__color_names['MistyRose'] = "#FFE4E1"
self.__color_names['Moccasin'] = "#FFE4B5"
self.__color_names['NavajoWhite'] = "#FFDEAD"
self.__color_names['Navy'] = "#000080"
self.__color_names['OldLace'] = "#FDF5E6"
self.__color_names['Olive'] = "#808000"
self.__color_names['OliveDrab'] = "#6B8E23"
self.__color_names['Orange'] = "#FFA500"
self.__color_names['OrangeRed'] = "#FF4500"
self.__color_names['Orchid'] = "#DA70D6"
self.__color_names['PaleGoldenRod'] = "#EEE8AA"
self.__color_names['PaleGreen'] = "#98FB98"
self.__color_names['PaleTurquoise'] = "#AFEEEE"
self.__color_names['PaleVioletRed'] = "#DB7093"
self.__color_names['PapayaWhip'] = "#FFEFD5"
self.__color_names['PeachPuff'] = "#FFDAB9"
self.__color_names['Peru'] = "#CD853F"
self.__color_names['Pink'] = "#FFC0CB"
self.__color_names['Plum'] = "#DDA0DD"
self.__color_names['PowderBlue'] = "#B0E0E6"
self.__color_names['Purple'] = "#800080"
self.__color_names['Red'] = "#FF0000"
self.__color_names['RosyBrown'] = "#BC8F8F"
self.__color_names['RoyalBlue'] = "#4169E1"
self.__color_names['SaddleBrown'] = "#8B4513"
self.__color_names['Salmon'] = "#FA8072"
self.__color_names['SandyBrown'] = "#F4A460"
self.__color_names['SeaGreen'] = "#2E8B57"
self.__color_names['SeaShell'] = "#FFF5EE"
self.__color_names['Sienna'] = "#A0522D"
self.__color_names['Silver'] = "#C0C0C0"
self.__color_names['SkyBlue'] = "#87CEEB"
self.__color_names['SlateBlue'] = "#6A5ACD"
self.__color_names['SlateGray'] = "#708090"
self.__color_names['Snow'] = "#FFFAFA"
self.__color_names['SpringGreen'] = "#00FF7F"
self.__color_names['SteelBlue'] = "#4682B4"
self.__color_names['Tan'] = "#D2B48C"
self.__color_names['Teal'] = "#008080"
self.__color_names['Thistle'] = "#D8BFD8"
self.__color_names['Tomato'] = "#FF6347"
self.__color_names['Turquoise'] = "#40E0D0"
self.__color_names['Violet'] = "#EE82EE"
self.__color_names['Wheat'] = "#F5DEB3"
self.__color_names['White'] = "#FFFFFF"
self.__color_names['WhiteSmoke'] = "#F5F5F5"
self.__color_names['Yellow'] = "#FFFF00"
self.__color_names['YellowGreen'] = "#9ACD32"
if color in self.__color_names:
self._result = self.__color_names[color]
else:
self._result = color
def color_by_name(self, name):
"""
sets the Color color to an HTML color by using an HTML name
@param name: str color name. Acceptable values: HTML colors of the following color names:
Values: AliceBlue, AntiqueWhite, Aqua, Aquamarine, Azure, Beige, Bisque, Black,
BlanchedAlmond, Blue, BlueViolet, Brown, BurlyWood, CadetBlue, Chartreuse, Chocolate,
Coral, CornflowerBlue, Cornsilk, Crimson, Cyan, DarkBlue, DarkCyan, DarkGoldenRod,
DarkGray, DarkGreen, DarkKhaki, DarkMagenta, DarkOliveGreen, DarkOrange, DarkOrchid,
DarkRed, DarkSalmon, DarkSeaGreen, DarkSlateBlue, DarkSlateGray, DarkTurquoise,
DarkViolet, DeepPink, DeepSkyBlue, DimGray, DodgerBlue, FireBrick, FloralWhite,
ForestGreen, Fuchsia, Gainsboro, GhostWhite, Gold, GoldenRod, Gray, Green,
GreenYellow, HoneyDew, HotPink, IndianRed, Indigo, Ivory, Khaki, Lavender, LavenderBlush,
LawnGreen, LemonChiffon, LightBlue, LightCoral, LightCyan, LightGoldenRodYellow,
LightGray, LightGreen, LightPink, LightSalmon, LightSeaGreen, LightSkyBlue,
LightSlateGray, LightSteelBlue, LightYellow, Lime, LimeGreen, Linen,Magenta, Maroon,
MediumAquaMarine, MediumBlue, MediumOrchid, MediumPurple, MediumSeaGreen, MediumSlateBlue,
MediumSpringGreen, MediumTurquoise, MediumVioletRed, MidnightBlue, MintCream, MistyRose,
Moccasin, NavajoWhite, Navy, OldLace, Olive, OliveDrab, Orange, OrangeRed, Orchid,
PaleGoldenRod, PaleGreen, PaleTurquoise, PaleVioletRed, PapayaWhip, PeachPuff, Peru, Pink,
Plum, PowderBlue, Purple, Red, RosyBrown, RoyalBlue, SaddleBrown, Salmon, SandyBrown,
SeaGreen, SeaShell, Sienna, Silver, SkyBlue, SlateBlue, SlateGray, Snow, SpringGreen,
SteelBlue, Tan, Teal, Thistle, Tomato, Turquoise, Violet, Wheat, White, WhiteSmoke,
Yellow, YellowGreen
"""
self._result = self.__color_names[name]
def color_by_rgb(self, r, g, b):
"""
sets the Color color by provided Red Green and Blue values
@param r: int Red
@param g: int Green
@param b: int Blue
"""
self._result = "rgb({0:d}, {1:d}, {2:d}".format(r, g, b)
def color_by_rgba(self, r, g, b, a):
"""
sets the Color color by provided Red Green Blue and Alpha values
@param r: int Red
@param g: int Green
@param b: int Blue
@param a: float alpha
"""
self._result = "rgba({0:d}, {1:d}, {2:d}, {3:1.1f})".format(r, g, b, a)
def get(self):
"""
@return: str CSS code defining the color
"""
return self._result | {
"repo_name": "amentis/Rexi",
"path": "RxAPI/RxGUI/Color.py",
"copies": "1",
"size": "12217",
"license": "apache-2.0",
"hash": 1780007010630904600,
"line_mean": 53.3022222222,
"line_max": 111,
"alpha_frac": 0.6044855529,
"autogenerated": false,
"ratio": 2.892282196969697,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39967677498696963,
"avg_score": null,
"num_lines": null
} |
__author__ = 'amentis'
from RxAPI.RxGUI import RxGUIObject
class Font(RxGUIObject):
"""
text font definition for wherever such is needed
"""
def __init__(self, parent, name,
family="Arial, sans-serif", style="normal", size="medium", variant="normal",
weight="normal", line_height="normal"):
"""
@param parent: RxGUIObject parent object
@param name: str name of the REXI object
@param family: str font-family - list of font names, ending with a base name
acceptable values: list of any fonts + one these bases: serif, sans-serif, monospace
@param style: str font-style acceptable values: normal, italic, oblique, inherit
@param size: str font-size acceptable values:
xx-small, x-small, small, medium, large, x-large, xx-large, smaller, larger,
N%, Npx/cm,etc., inherit
@param variant: str font-variant acceptable values: normal, small-caps, inherit
@param weight: str font-variant acceptable values:
normal, bold, bolder, lighter, 100, 200, 300 ... 900, inherit
@param line_height: str acceptable values: normal, number multiplied with current font size,
Npx,cm,etc., %, inherit
"""
RxGUIObject.__init__(self, name, parent)
self._family = family
self._style = style
self._size = size
self._variant = variant
self._weight = weight
self._line_height = line_height
def get(self):
"""
@return: str CSS code defining the font
"""
return """%s %s %s %s/%s %s"""%(
self._style, self._variant, self._weight, self._size, self._line_height, self._family) | {
"repo_name": "amentis/Rexi",
"path": "RxAPI/RxGUI/Font.py",
"copies": "1",
"size": "1721",
"license": "apache-2.0",
"hash": 3252251198082808300,
"line_mean": 41,
"line_max": 100,
"alpha_frac": 0.6095293434,
"autogenerated": false,
"ratio": 3.9563218390804598,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.506585118248046,
"avg_score": null,
"num_lines": null
} |
__author__ = 'amentis'
from RxAPI.RxGUI import RxGUIObject, RxDynamic
from RxAPI import RxObject
class Event(RxDynamic, RxGUIObject):
"""
Superclass for different dynamic events
"""
def __init__(self, parent, sender, modifiers, actions, event_type):
"""
@param parent: RxGUIObject parent object
@param modifiers: list list of str, keyboard modifiers (alt, shift and/or ctrl)
@param actions: list list of str, methods to be called on this event
@param event_type: str type of event (types defined in children)
"""
RxGUIObject.__init__(self, "__event__", parent)
RxDynamic.__init__(RxObject(self))
self._parent.add_child(self)
if "alt" in modifiers:
self._alt = True
else:
self._alt = False
if "shift" in modifiers:
self._shift = True
else:
self._shift = False
if "ctrl" in modifiers:
self._ctrl = True
else:
self._ctrl = False
self._actions = actions
self._sender = sender
self._type = event_type
def set_sender(self, sender):
"""
set the sender of the event
@param sender: str name of the object sending the event
"""
self._sender = sender
def get_sender(self):
"""
@return : str name of the object sending the event
"""
return self._sender
def add_action(self, function):
"""
append action to the list of actions to be done on this event
@param function: str action - method to be called on this event
"""
self._actions.append(function)
def get_action(self, number):
"""
@param number: int index of the action
@return : str action in the specified index of the list of actions
"""
return self._actions[number]
def get_all_actions(self):
"""
@return : list list of str, all off the actions in the event object
"""
return self._actions
def remove_action(self, number):
"""
remove the action on this index of the list of actions
@param number: int index of the action to be removed
"""
self._actions.pop(number)
def remove_all_actions(self):
"""
empty the list of actions
"""
self._actions.clear()
def get_modifiers(self):
"""
@return : str string showing which modifier keys are being used for this event
"""
return u"ctrl: {0:s}, alt: {1:s}, shift: {2:s}".format(self._ctrl, self._alt, self._shift)
def set_modifiers(self, modifiers):
"""
set modifier keys usage in calling this event
@param modifiers: str string of the modifier keys to be used, separated by spaces
"""
if "alt" in modifiers:
self._alt = True
else:
self._alt = False
if "shift" in modifiers:
self._shift = True
else:
self._shift = False
if "ctrl" in modifiers:
self._ctrl = True
else:
self._ctrl = False | {
"repo_name": "amentis/Rexi",
"path": "RxAPI/RxGUI/Event.py",
"copies": "1",
"size": "3191",
"license": "apache-2.0",
"hash": 7075634123362006000,
"line_mean": 29.4,
"line_max": 98,
"alpha_frac": 0.5596991539,
"autogenerated": false,
"ratio": 4.359289617486339,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5418988771386338,
"avg_score": null,
"num_lines": null
} |
__author__ = 'amentis'
from RxAPI.RxGUI import RxGUIObject, RxDynamic
class Screen(RxGUIObject, RxDynamic):
"""
The main holder for GUI elements. Represents the entire HTML body
"""
def __init__(self, title, body=""):
"""
@param title: str Page __title
@param body: str HTML body
"""
RxGUIObject.__init__(self, "screen", None)
RxDynamic.__init__(self)
self.__css = ""
self.__title = title
self.__body = body
def get(self):
"""
@return: str complete HTML 5 page
"""
for element in self.get_children():
self.__body += element.get()
return """<!DOCTYPE HTML>
<html>
<head>
<title> %s</title>
<script src="jquery-2.0.3.min.js"></script>
<script> $(document).ready(function(){
%s
});</script>
<style> %s </style>
</head>
<body> %s </body>
</html>
""" % (self.__title, self._javascript, self.__css, self.__body)
def get_title(self):
"""
@return : str page __title
"""
return self.__title
def set_title(self, title):
"""
set the __title of the Screen object - page
@param title: str page __title
"""
self.__title = title
def get_css(self):
"""
get the current CSS appended to this Screen
@return: str complete code CSS of the page
"""
return self.__css
def add_css(self, css):
"""
append CSS to this Screen
@param css: str CSS to be appended
"""
self.__css += css
def get_body(self):
"""
@return: str HTML body content
"""
return self.__body
def set_background_color(self, color):
"""
set the background color of the Screen
@param color: Color
"""
self.__css += """
html body {background-color: %s ;}
""" % color.get() | {
"repo_name": "amentis/Rexi",
"path": "RxAPI/RxGUI/Screen.py",
"copies": "1",
"size": "2033",
"license": "apache-2.0",
"hash": 8306816733274683000,
"line_mean": 24.1111111111,
"line_max": 71,
"alpha_frac": 0.4904082636,
"autogenerated": false,
"ratio": 4.107070707070707,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5097478970670707,
"avg_score": null,
"num_lines": null
} |
__author__ = 'amentis'
from RxAPI.RxGUI import StylableObject, RxDynamic
class Button(StylableObject, RxDynamic):
"""A button for starting actions on push"""
def __init__(self, parent, name, value="Button"):
"""
@param parent: RxGUIObject parent REXI object
@param name: str name of the REXI object
@param value: str text label of the button
"""
StylableObject.__init__(self, name, parent)
RxDynamic.__init__(self)
self._parent.add_child(self)
self._parent.append_javascript("var %s = new Button(\"%s\"); \n" % (self.get_name(), self.get_name()))
self.__value = value
def get_value(self):
"""
@return str text label of the button
"""
return self.__value
def set_value(self, value):
"""
@param value: str text label for the button
"""
self.__value = value
def get(self):
"""
@return: str HTML of the button
"""
style = " "
if self._style_internal_enabled:
style += self._style_internal
style += """
#%s {color: %s;font: %s; %s background-color: %s; }
""" % (self.get_name(), self._text_color.get(), self._font.get(), self._border.get(),
self._background_color.get())
style += self._css
self._parent.add_css(style)
self._parent.append_javascript(self.get_javascript())
return """
<button id="{0}" class="Button" type=\"button\">{1}</button>
""".format(self.get_name(), self.__value)
javascript_class="""
function Button (name) {
this.name = name;
this.set_size = function(width, height) {
$(\"#\" + this.name).style.width = width;
$(\"#\" + this.name).style.height = height;
};
this.get_font = function() {
return $(\"#\" + this.name).style.font;
};
this.get_colors = function() {
return [$(\"#\" + this.name).style.color, $(\"#\" + this.name).background-color];
};
this.get_value = function () {
$(\"#\" + this.name).html();
};
this.set_size = function (width, height) {
$(\"#\" + this.name).style.width = width;
$(\"#\" + this.name).style.width = height;
};
this.set_value = function (text) {
$(\"#\" + this.name).text(text).button(\"Refresh\");
};
}
""" | {
"repo_name": "amentis/Rexi",
"path": "RxAPI/RxGUI/Button.py",
"copies": "1",
"size": "2387",
"license": "apache-2.0",
"hash": 3722104164739765000,
"line_mean": 30.4210526316,
"line_max": 110,
"alpha_frac": 0.5295349811,
"autogenerated": false,
"ratio": 3.5206489675516224,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4550183948651622,
"avg_score": null,
"num_lines": null
} |
__author__ = 'amentis'
from RxAPI.RxGUI import StylableObject, RxDynamic
class Window(StylableObject, RxDynamic):
"""
an element holder to organize elements
"""
def __init__(self, parent, name):
"""
@param parent: RxGUIObject parent object
@param name: str name of the REXI object
"""
StylableObject.__init__(self, name, parent)
RxDynamic.__init__(self)
self.__body = ""
self.__css = ""
self._parent.add_child(self)
def get(self):
"""
@return: str HTML code of the Window
"""
for element in self.get_children():
self.__body += element.get()
self._parent.append_javascript(self.get_javascript())
self._parent.add_css(self.get_css())
return """
<div id="{0}">
{1}
</div>""".format(self.get_name(), self.__body)
def get_body(self):
"""
@return: str HTML body of the window
"""
return self.__body
def get_css(self):
"""
@return: str CSS of the Window
"""
return self.__css
def add_css(self, css):
"""
add CSS code to the window object
@param css: str CSS code to be appended
"""
self.__css += css
def center(self):
"""
orient all the elements in the window to the center
"""
self.__css += "#%s {margin-left: auto; margin-right: auto;}" % self.get_name() | {
"repo_name": "amentis/Rexi",
"path": "RxAPI/RxGUI/Window.py",
"copies": "1",
"size": "1493",
"license": "apache-2.0",
"hash": -7121578131655640000,
"line_mean": 25.2105263158,
"line_max": 86,
"alpha_frac": 0.5217682518,
"autogenerated": false,
"ratio": 3.9083769633507854,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4930145215150785,
"avg_score": null,
"num_lines": null
} |
__author__ = 'amentis'
from RxAPI.RxGUI import StylableObject, RxDynamic, TextContainer
class TextEdit(StylableObject, RxDynamic, TextContainer):
"""A GUI field for working with user-inputted multi-line text"""
def __init__(self, parent, name, text=" "):
"""
@param parent: RxGUIObject parent REXI object
@param name: str name of the REXI object
@param text: str value of the text input field
"""
StylableObject.__init__(self, name, parent)
RxDynamic.__init__(self)
TextContainer.__init__(self, text)
self._parent.add_child(self)
self._parent.append_javascript("var %s = new TextEdit(\"%s\"); \n" % (self.get_name(), self.get_name()))
def get(self):
"""
@return: str HTML of the text edit field
"""
style = " "
if self._style_internal_enabled:
style += self._style_internal
style += """
#%s {color: %s;font: %s; %s background-color: %s; }
""" % (self.get_name(), self._text_color.get(), self._font.get(), self._border.get(),
self._background_color.get())
style += self._css
self._parent.add_css(style)
self._parent.append_javascript(self.get_javascript())
return """
<textarea id="{0}" class="TextEdit">{1}</textarea>
""".format(self.get_name(), self._text)
javascript_class="""
function TextEdit (name) {
this.name = name;
this.set_size = function(width, height) {
$(\"#\" + this.name).style.width = width;
$(\"#\" + this.name).style.height = height;
};
this.get_font = function() {
return $(\"#\" + this.name).style.font;
};
this.get_colors = function() {
return [$(\"#\" + this.name).style.color, $(\"#\" + this.name).background-color];
};
this.get_text = function () {
$(\"#\" + this.name).html();
};
this.set_size = function (width, height) {
$(\"#\" + this.name).style.width = width;
$(\"#\" + this.name).style.width = height;
};
this.get_text = function () {
return $(\"#\" + this.name).val();
};
this.set_text = function (text) {
$(\"#\" + this.name).val(text);
};
this.append_text = function (text) {
$(\"#\" + this.name).val($(\"#\" + this.name).val() + text);
};
this.prepend_text = function (text) {
$(\"#\" + this.name).val(text + $(\"#\" + this.name).val());
};
this.clear_text = function () {
$(\"#\" + this.name).val(\"\");
};
}
""" | {
"repo_name": "amentis/Rexi",
"path": "RxAPI/RxGUI/TextEdit.py",
"copies": "1",
"size": "2573",
"license": "apache-2.0",
"hash": -6768855730866793000,
"line_mean": 32,
"line_max": 112,
"alpha_frac": 0.5254566654,
"autogenerated": false,
"ratio": 3.3502604166666665,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9327968064067225,
"avg_score": 0.009549803599888329,
"num_lines": 78
} |
__author__ = 'amentis'
from RxAPI.RxGUI import StylableObject, RxDynamic, TextContainer
class TextView(StylableObject, RxDynamic, TextContainer):
def __init__(self, parent, name, text=" "):
"""
@param parent: RxGUIObject parent REXI object
@param name: str name of the REXI object
@param text: str value of the text input field
"""
StylableObject.__init__(self, name, parent)
RxDynamic.__init__(self)
TextContainer.__init__(self, text)
self._parent.add_child(self)
self._parent.append_javascript("var %s = new TextView(\"%s\"); \n" % (self.get_name(), self.get_name()))
def get(self):
"""
@return: str HTML of the text edit field
"""
style = " "
if self._style_internal_enabled:
style += self._style_internal
style += """
#%s {color: %s;font: %s; %s background-color: %s; overflow: auto;}
""" % (self.get_name(), self._text_color.get(), self._font.get(), self._border.get(),
self._background_color.get())
style += self._css
self._parent.add_css(style)
self._parent.append_javascript(self.get_javascript())
return """
<p id=\"{0}\" class=\"TextView\">{1}</p>
""" .format(self.get_name(), self._text)
javascript_class = """
function TextView (name) {
this.name = name;
this.set_size = function(width, height) {
$(\"#\" + this.name).style.width = width;
$(\"#\" + this.name).style.height = height;
};
this.get_font = function() {
$(\"#\" + this.name).style.font;
};
this.get_colors = function() {
return [$(\"#\" + this.name).style.color, $(\"#\" + this.name).background-color];
};
this.get_text = function () {
return $(\"#\" + this.name).html();
};
this.set_size = function (width, height) {
$(\"#\" + this.name).style.width = width;
$(\"#\" + this.name).style.width = height;
};
this.set_text = function (text) {
$(\"#\" + this.name).text(text);
};
this.append_text = function (text) {
$(\"#\" + this.name).html($(\"#\" + this.name).html() + text);
};
this.prepend_text = function (text) {
$(\"#\" + this.name).text(text + $(\"#\" + this.name).text());
};
this.clear_text = function () {
$(\"#\" + this.name).text(\"\");
};
}
""" | {
"repo_name": "amentis/Rexi",
"path": "RxAPI/RxGUI/TextView.py",
"copies": "1",
"size": "2434",
"license": "apache-2.0",
"hash": -2118369161051558000,
"line_mean": 31.9054054054,
"line_max": 112,
"alpha_frac": 0.5225965489,
"autogenerated": false,
"ratio": 3.316076294277929,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43386728431779287,
"avg_score": null,
"num_lines": null
} |
__author__ = 'amentis'
from RxAPI.RxGUI import LineEdit
class PasswordEdit(LineEdit):
"""
password input field
"""
def __init__(self, parent, name, text=" "):
"""
@param parent: RxGUIObject parent REXI object
@param name: str name of the REXI object
@param text: str value of the line edit field
"""
LineEdit.__init__(self, parent, name, text)
def get(self):
"""
@return: str HTML of the password edit field
"""
style = " "
if self._style_internal_enabled:
style += self._style_internal
style += """
#%s {color: %s;font: %s; %s background-color: %s; }
""" % (self.get_name(), self._text_color.get(), self._font.get(), self._border.get(),
self._background_color.get())
style += self._css
self._parent.add_css(style)
self._parent.append_javascript(self.get_javascript())
return """
<input type=\"password\" id="{0}" class="LineEdit" value=\"{1}\" />
""".format(self.get_name(), self._text)
javascript_class = """
function PasswordEdit (name) {
this.name = name;
this.set_size = function(width, height) {
$(\"#\" + this.name).style.width = width;
$(\"#\" + this.name).style.height = height;
};
this.get_font = function() {
return $(\"#\" + this.name).style.font;
};
this.get_colors = function() {
return [$(\"#\" + this.name).style.color, $(\"#\" + this.name).background-color];
};
this.get_text = function () {
$(\"#\" + this.name).html();
};
this.set_size = function (width, height) {
$(\"#\" + this.name).style.width = width;
$(\"#\" + this.name).style.width = height;
};
this.get_text = function () {
return $(\"#\" + this.name).val();
};
this.set_text = function (text) {
$(\"#\" + this.name).val(text);
};
this.append_text = function (text) {
$(\"#\" + this.name).val($(\"#\" + this.name).val() + text);
};
this.prepend_text = function (text) {
$(\"#\" + this.name).val(text + $(\"#\" + this.name).val());
};
this.clear_text = function () {
$(\"#\" + this.name).val(\"\");
};
}
""" | {
"repo_name": "amentis/Rexi",
"path": "RxAPI/RxGUI/PasswordEdit.py",
"copies": "1",
"size": "2488",
"license": "apache-2.0",
"hash": 2143450686837399300,
"line_mean": 32.1866666667,
"line_max": 105,
"alpha_frac": 0.463022508,
"autogenerated": false,
"ratio": 3.675036927621861,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4638059435621861,
"avg_score": null,
"num_lines": null
} |
__author__ = 'amentis'
from RxAPI.RxGUI import RxDynamic, StylableObject, TextContainer
class LineEdit(StylableObject, RxDynamic, TextContainer):
"""
text input field of one line
"""
def __init__(self, parent, name, text=" "):
"""
@param parent: RxGUIObject parent REXI object
@param name: str name of the REXI object
@param text: str value of the line edit field
"""
StylableObject.__init__(self, name, parent)
RxDynamic.__init__(self)
TextContainer.__init__(self, text)
self._max_length = ' '
self._parent.add_child(self)
self._parent.append_javascript("var %s = new TextEdit(\"%s\"); \n" % (self.get_name(), self.get_name()))
def set_max_length(self, max_length):
"""
set maximum length of the text in the field
@param max_length: int number of symbols
"""
self._max_length = max_length
def get_max_length(self):
"""
@return: int maximum number of symbols that fit in the field
"""
return self._max_length
def get(self):
"""
@return: str HTML of the text edit field
"""
style = " "
if self._style_internal_enabled:
style += self._style_internal
style += """
#%s {color: %s;font: %s; %s background-color: %s; }
""" % (self.get_name(), self._text_color.get(), self._font.get(), self._border.get(),
self._background_color.get())
style += self._css
self._parent.add_css(style)
self._parent.append_javascript(self.get_javascript())
return """
<input type=\"text\" id="{0}" class="LineEdit" value=\"{1}\" />
""".format(self.get_name(), self._text)
javascript_class = """
function LineEdit (name) {
this.name = name;
this.set_size = function(width, height) {
$(\"#\" + this.name).style.width = width;
$(\"#\" + this.name).style.height = height;
};
this.get_font = function() {
return $(\"#\" + this.name).style.font;
};
this.get_colors = function() {
return [$(\"#\" + this.name).style.color, $(\"#\" + this.name).background-color];
};
this.get_text = function () {
$(\"#\" + this.name).html();
};
this.set_size = function (width, height) {
$(\"#\" + this.name).style.width = width;
$(\"#\" + this.name).style.width = height;
};
this.get_text = function () {
return $(\"#\" + this.name).val();
};
this.set_text = function (text) {
$(\"#\" + this.name).val(text);
};
this.append_text = function (text) {
$(\"#\" + this.name).val($(\"#\" + this.name).val() + text);
};
this.prepend_text = function (text) {
$(\"#\" + this.name).val(text + $(\"#\" + this.name).val());
};
this.clear_text = function () {
$(\"#\" + this.name).val(\"\");
};
}
""" | {
"repo_name": "amentis/Rexi",
"path": "RxAPI/RxGUI/LineEdit.py",
"copies": "1",
"size": "3124",
"license": "apache-2.0",
"hash": 4382366400535514600,
"line_mean": 32.2446808511,
"line_max": 112,
"alpha_frac": 0.4942381562,
"autogenerated": false,
"ratio": 3.6241299303944317,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.959927848640236,
"avg_score": 0.0038179200384145565,
"num_lines": 94
} |
__author__ = 'amentis'
from RxAPI.RxGUI import StylableObject, RxDynamic, TextContainer
class Label(StylableObject, RxDynamic, TextContainer):
"""
label object containing simple text
"""
def __init__(self, parent, name, text=None):
"""
@param parent: RxGUIObject parent object
@param name: str name of the REXI object
@param text: str text to be shown in the label
"""
StylableObject.__init__(self, name, parent)
RxDynamic.__init__(self)
if not text:
text = name
TextContainer.__init__(self, text)
self._parent.add_child(self)
def get(self):
"""
@return: str HTML code of the label
"""
if self._style_internal_enabled:
self._css += self._style_internal
self._parent.add_css(self._css)
self._parent.append_javascript(self.get_javascript())
return """
<span id=\"%s\" class=\"label\"> %s </span>
""" % (self.get_name(), self.get_text()) | {
"repo_name": "amentis/Rexi",
"path": "RxAPI/RxGUI/Label.py",
"copies": "1",
"size": "1040",
"license": "apache-2.0",
"hash": -7601019350115092000,
"line_mean": 29.6176470588,
"line_max": 64,
"alpha_frac": 0.5682692308,
"autogenerated": false,
"ratio": 3.8095238095238093,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9874964986025166,
"avg_score": 0.0005656108597285068,
"num_lines": 34
} |
__author__ = 'amentis'
class TextContainer:
"""
superclass for objects containing text
"""
def __init__(self, text):
"""
@param text: value of the text object
"""
self._text = text
def set_text(self, text):
"""
set a value for the text object
@param text: str value of the text object
"""
self._text = text
def get_text(self):
"""
@return : value of the text object
"""
return self._text
def append_text(self, text):
"""
add text to the end of the value of the text object
@param text: text to be appended
"""
self._text += text
def prepend_text(self, text):
"""
add text to the beginning of the value of the text object
@param text: text to be appended
"""
self._text = text + self._text
def clear_text(self):
"""
erase the value of the text edit field
"""
self._text = "" | {
"repo_name": "amentis/Rexi",
"path": "RxAPI/RxGUI/TextContainer.py",
"copies": "1",
"size": "1032",
"license": "apache-2.0",
"hash": -6466052319356276000,
"line_mean": 21.9555555556,
"line_max": 65,
"alpha_frac": 0.511627907,
"autogenerated": false,
"ratio": 4.410256410256411,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000966183574879227,
"num_lines": 45
} |
__author__ = 'amertis'
from django.db import models
__all__ = ["OpeniContext","Group","GroupFriend","LocationVisit","OpeniContextAwareModel",]
class OpeniContext(models.Model):
objectid = models.TextField()
# id is missing because it is the default
time_created = models.TextField(null=True)
time_edited = models.TextField(null=True)
time_deleted = models.TextField(null=True)
duration_time_started = models.TextField(null=True)
duration_time_ended = models.TextField(null=True)
# #the docs say location includes: coordinates,physical address, free-form location name
location_latitude = models.TextField(null=True)
location_longitude = models.TextField(null=True)
location_height = models.TextField(null=True)
#
address_street = models.TextField(null=True)
address_number = models.TextField(null=True)
address_apartment = models.TextField(null=True)
address_city = models.TextField(null=True)
address_locality = models.TextField(null=True)
address_country = models.TextField(null=True)
address_zip = models.TextField(null=True)
current_location_latitude = models.TextField(null=True)
current_location_longitude = models.TextField(null=True)
current_location_height = models.TextField(null=True)
# #use date
current_location_time = models.TextField(null=True)
rating_value = models.TextField(null=True)
# #happy,sad
mood_value = models.TextField(null=True)
# #3G,LTE
device_wireless_network_type = models.TextField(null=True)
# #good,bad,excellent
device_wireless_channel_quality = models.TextField(null=True)
# #...
device_accelerometers = models.TextField(null=True)
# #.... length of cell id... up to 30
device_cell_log = models.TextField(null=True)
# # sms recipients phones list up to 30
device_sms_log = models.TextField(null=True)
# # sms phonecalls list up to 30 comma separated
device_call_log = models.TextField(null=True)
#
# # a csv of openI appIds (up to 30) ---
device_running_applications = models.TextField(null=True)
#
# # installed openI apps
device_installed_applications = models.TextField(null=True)
# # tov
device_screen_state = models.TextField(null=True)
# # low,medium,full
device_battery_status = models.TextField(null=True)
#
# #red,'#fbfbfb' from W3C
application_background_color = models.TextField(null=True)
#
application_format = models.TextField(null=True)
application_font = models.TextField(null=True)
application_color = models.TextField(null=True)
application_background = models.TextField(null=True)
application_text = models.TextField(null=True)
application_box = models.TextField(null=True)
application_classification = models.TextField(null=True)
application_text_copy = models.TextField(null=True)
#
#
personalization_age_range = models.TextField(null=True)
# # 3iso
personalization_country = models.TextField(null=True)
personalization_postal_code = models.TextField(null=True)
personalization_region = models.TextField(null=True)
personalization_town = models.TextField(null=True)
# #string...two values
personalization_roaming = models.TextField(null=True)
# #string... two values
personalization_opt_out = models.TextField(null=True)
#
personalization_carrier = models.TextField(null=True)
# #Tablet,Smartphone,PDA
personalization_handset = models.TextField(null=True)
# #remove
personalization_user_ids = models.TextField(null=True)
# #UDID,an UUID
personalization_device_id = models.TextField(null=True)
#
personalization_application_id = models.TextField(null=True)
# #Samsung S4
personalization_device_type = models.TextField(null=True)
# #Android,IOS, Windows Mobile
personalization_device_os = models.TextField(null=True)
# #Any male ,
personalization_gender = models.TextField(null=True)
personalization_has_children = models.TextField(null=True)
# #
personalization_ethnicity = models.TextField(null=True)
#
personalization_income = models.TextField(null=True)
# #Any, Exactly 1 Exactly 2 Exactly 3 Exactly 4, 2 or fewer, 2 or more, 3 or more, 4 or more , 5 or more
personalization_household_size = models.TextField(null=True)
personalization_education = models.TextField(null=True)
personalization_interests = models.TextField(null=True)
# #Any, Those who received, Those who interacted with For Past Campaigns, Those who interacted with For Launched Campaigns
personalization_customer_tag = models.TextField(null=True)
# #Greek,English iso code
personalization_users_language = models.TextField(null=True)
class Meta:
app_label = "OPENiapp"
class Group(models.Model):
group_id = models.TextField(null=True)
group_name = models.TextField(null=True)
group_type = models.TextField(null=True)
context = models.ForeignKey(OpeniContext)
class Meta:
app_label = "OPENiapp"
class GroupFriend(models.Model):
person_id = models.TextField(null=True)
person_object_type = models.TextField(null=True)
person_url = models.TextField(null=True)
person_service = models.TextField(null=True)
person_to_id = models.TextField(null=True)
person_time_person_added = models.TextField(null=True)
person_target_id = models.TextField(null=True)
group = models.ForeignKey(Group)
class Meta:
app_label = "OPENiapp"
class LocationVisit(models.Model):
location_visits_latitude = models.TextField(null=True)
location_visits_longitude = models.TextField(null=True)
location_visits_height = models.TextField(null=True)
location_visits_visit = models.TextField(null=True)
location_visits_comment = models.TextField(null=True)
context = models.ForeignKey(OpeniContext)
class Meta:
app_label = "OPENiapp"
class OpeniContextAwareModel(models.Model):
context = models.ForeignKey(OpeniContext,null=True, on_delete=models.DO_NOTHING)
class Meta:
abstract = True
app_label = "OPENiapp"
def delete(self):
self.context.delete()
super(OpeniContextAwareModel, self).delete()
| {
"repo_name": "OPENi-ict/ntua_demo",
"path": "openiPrototype/openiPrototype/APIS/Context/models.py",
"copies": "1",
"size": "6233",
"license": "apache-2.0",
"hash": 8203760856477506000,
"line_mean": 38.4493670886,
"line_max": 126,
"alpha_frac": 0.7166693406,
"autogenerated": false,
"ratio": 3.5434906196702673,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47601599602702677,
"avg_score": null,
"num_lines": null
} |
__author__ = 'ameyapandilwar'
import sys
import operator
import happybase
import hdf5_getters as GETTERS
from pyspark import SparkContext
COLUMN_FAMILY_NAME = 'cf'
ARTIST_HOTTTNESSS_COLUMNID = 'artist_hotttnesss'
ARTIST_ID_COLUMNID = 'artist_id'
ARTIST_NAME_COLUMNID = 'artist_name'
DANCEABILITY_COLUMNID = 'danceability'
RELEASE_COLUMNID = 'release'
SONG_HOTTTNESSS_COLUMNID = 'song_hotttnesss'
SONG_ID_COLUMNID = 'song_id'
TEMPO_COLUMNID = 'tempo'
TITLE_COLUMNID = 'title'
TRACK_ID_COLUMNID = 'track_id'
YEAR_COLUMNID = 'year'
ARTIST_FAMILIARITY_COLUMNID = "artist_familiarity"
ARTIST_LOCATION_COLUMNID = "artist_location"
DURATION_COLUMNID = "duration"
ENERGY_COLUMNID = "energy"
LOUDNESS_COLUMNID = "loudness"
def read_and_save(h5File):
h5 = GETTERS.open_h5_file_read(h5File)
artistHotttnesss = GETTERS.get_artist_hotttnesss(h5)
artistId = GETTERS.get_artist_id(h5)
artistName = GETTERS.get_artist_name(h5)
danceability = GETTERS.get_danceability(h5)
release = GETTERS.get_release(h5)
songHotttnesss = GETTERS.get_song_hotttnesss(h5)
songId = GETTERS.get_song_id(h5)
tempo = GETTERS.get_tempo(h5)
title = GETTERS.get_title(h5)
trackId = GETTERS.get_track_id(h5)
year = GETTERS.get_year(h5)
artistFamiliarity = GETTERS.get_artist_familiarity(h5)
artistLocation = GETTERS.get_artist_location(h5)
duration = GETTERS.get_duration(h5)
energy = GETTERS.get_energy(h5)
loudness = GETTERS.get_loudness(h5)
h5.close()
return saveDataInHbase(h5File, artistHotttnesss, artistId, artistName, danceability, release, songHotttnesss,
songId, tempo, title, trackId, year, artistFamiliarity, artistLocation, duration, energy,
loudness)
def saveDataInHbase(h5FileName, artistHotttnesss, artistId, artistName, danceability, release, songHotttnesss,
songId, tempo, title, trackId, year, artistFamiliarity, artistLocation, duration, energy,
loudness):
result = str(artistHotttnesss) + "\t" + artistId + "\t" + artistName + "\t" + str(danceability) + "\t"\
+ release + "\t" + str(songHotttnesss) + "\t" + songId + "\t" + str(tempo) + "\t" + title + "\t"\
+ trackId + "\t" + str(year) + "\t" + str(artistFamiliarity) + "\t" + artistLocation + "\t"\
+ str(duration) + "\t" + str(energy) + "\t" + str(loudness) + "\n"
return result
def read_parition(files_iterator):
print("system has initialized . . . ")
result = []
for file in files_iterator:
result.append(read_and_save(file))
return result
if __name__ == '__main__':
with open(sys.argv[1], 'r+') as fileList:
fileListContent = fileList.read()
listOfFiles = fileListContent.split('\n')
listOfFiles.pop()
sc = SparkContext(appName="msd")
sc.addPyFile(sys.argv[2] + "happybase.zip")
sc.addPyFile(sys.argv[2] + "thrift.zip")
sc.addPyFile(sys.argv[2] + "hdf5_getters.py")
sc.addPyFile(sys.argv[2] + "tables.egg")
filesRDD = sc.parallelize(listOfFiles)
resultList = filesRDD.mapPartitions(read_parition).collect()
with open(sys.argv[3], "w+") as fileWriter:
fileWriter.write("".join(resultList)) | {
"repo_name": "Arulselvanmadhavan/Artist_Recognition_from_Audio_Features",
"path": "MRTasks/parsingTasks/writeMSDSubsetToFile_PySpark.py",
"copies": "1",
"size": "3243",
"license": "apache-2.0",
"hash": -7014637245730079000,
"line_mean": 34.2608695652,
"line_max": 116,
"alpha_frac": 0.667591736,
"autogenerated": false,
"ratio": 2.869911504424779,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4037503240424779,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Amine Kerkeni'
import tornado.ioloop
import tornado.web
import tornado.httpserver
from tornado.web import url
import json
import requests
from sqlalchemy import create_engine,Table, MetaData, Column, Index, String, Integer, Text
import hashlib
import base64
import uuid
import config
import time
engine = create_engine(config.sql_connection_string)
def check_permission(password, username):
user_details = engine.execute("SELECT email, password, salt FROM users WHERE email = '%s'" % username).fetchone()
if user_details is not None:
t_sha = hashlib.sha512()
t_sha.update(password+user_details.salt)
hashed_password = base64.urlsafe_b64encode(t_sha.digest())
if hashed_password == user_details.password:
return True
else:
False
return False
def check_token_validity(token, timeout):
if token is None:
return True
else:
if (time.time() - float(token)) > float(timeout):
return False
else:
return True
class BaseHandler(tornado.web.RequestHandler):
def get_current_user(self):
return self.get_secure_cookie("user")
class LoginHandler(BaseHandler):
"""
User login handler
"""
def get(self):
try:
error_message = self.get_argument("error")
except:
error_message = ""
self.render("login.html", error_message=error_message)
def post(self):
username = self.get_argument("email", "")
password = self.get_argument("password", "")
auth = check_permission(password, username)
if auth:
self.set_current_user(username)
self.redirect(self.get_argument("next", u"/list"))
else:
error_msg = u"?error=" + tornado.escape.url_escape("Login incorrect")
self.redirect(u"/login")
def set_current_user(self, user):
if user:
self.set_secure_cookie("user", tornado.escape.json_encode(user))
else:
self.clear_cookie("user")
class AssignmentListHandler(BaseHandler):
"""
Standard web handler that returns the index page
"""
@tornado.web.authenticated
def get(self,assignment_id=1):
assignments = engine.execute('SELECT id, title, level FROM coding_assignment LIMIT 0,10').fetchall()
self.render("list.html",assignment_list=assignments)
class AssignmentHandler(BaseHandler):
"""
Standard web handler that returns the index page
"""
@tornado.web.authenticated
def get(self,assignment_id=1):
user = self.get_secure_cookie("user")
user = user.replace('@', '_').replace('"', '')
token_id = '%s_assignment_%s' % (user, assignment_id)
token = self.get_secure_cookie(token_id)
if check_token_validity(token, 30):
if token is None:
self.set_secure_cookie(token_id, str(time.time()))
try:
assignment = engine.execute('SELECT title, details FROM coding_assignment WHERE id = %s'
% assignment_id).fetchone()
self.render("assignment.html", title=assignment.title, details=assignment.details,
assignment_id=assignment_id)
except Exception as ex:
print ex
else:
self.render("login.html")
@tornado.web.authenticated
def post(self,assignment_id=1):
user = self.get_secure_cookie("user")
user = user.replace('@', '_').replace('"', '')
token_id = '%s_assignment_%s' % (user, assignment_id)
timer = self.get_secure_cookie(token_id)
if timer is not None:
user_code = self.get_argument("code_area", "")
print user_code
if (time.time() - float(timer)) > 30:
self.clear_cookie(token_id)
self.redirect("/login")
else:
self.clear_cookie(token_id)
self.redirect("/list")
else:
self.redirect("/login")
class ExpiredAssignmentHandler(BaseHandler):
"""
Standard web handler that returns the index page
"""
@tornado.web.authenticated
def get(self,assignment_id=1):
pass
class DefaultSampleHandler(BaseHandler):
"""
Standard web handler that returns the index page
"""
@tornado.web.authenticated
def get(self,assignment_id, language_id):
try:
sample = engine.execute('SELECT initial_code FROM coding_assignment_default \
WHERE coding_assignment_id = %s AND language = %s'
% (assignment_id, language_id)).fetchone()
self.set_header("Content-Type", "application/json")
self.write(json.dumps({'sample': sample.initial_code}))
except Exception as ex:
print ex
class RextesterHandler(BaseHandler):
"""
This handler will relay post messages to rextester API in order to avoid cross domain issues.
Rextester API is an online service for compiling/running code
"""
@tornado.web.authenticated
def post(self):
data = json.loads(self.request.body)
code = data['Program']
language = data['LanguageChoiceWrapper']
headers = {'content-type': 'application/json'}
payload = {u'LanguageChoiceWrapper': int(language), u'Program': code}
r = requests.post("http://rextester.com/rundotnet/api", data=json.dumps(payload), headers=headers)
self.set_header("Content-Type", "application/json")
self.write(r.json())
class LogoutHandler(BaseHandler):
"""
User logout handler
"""
def get(self):
self.clear_cookie("user")
self.redirect(self.get_argument("next", "/login"))
class Application(tornado.web.Application):
"""
Main application class
"""
def __init__(self):
handlers = [
url(r'/assignment/([0-9]+)', AssignmentHandler, name='assignment'),
url(r'/list', AssignmentListHandler, name='list'),
url(r'/login', LoginHandler, name='login'),
url(r'/logout', LogoutHandler, name="logout"),
(r'/sample/([0-9]+)/([0-9]+)', DefaultSampleHandler),
(r'/run', RextesterHandler),
]
settings = {
"template_path": 'templates',
"static_path": 'static',
"cookie_secret": config.cookie_secret,
"login_url": "/login"
}
tornado.web.Application.__init__(self, handlers, **settings)
if __name__ == '__main__':
app = Application()
http_server = tornado.httpserver.HTTPServer(app)
http_server.listen(5000)
tornado.ioloop.IOLoop.instance().start() | {
"repo_name": "minus--/GuerrillaInterview",
"path": "main_tornado.py",
"copies": "1",
"size": "6812",
"license": "mit",
"hash": -7987970593194466000,
"line_mean": 31.7548076923,
"line_max": 117,
"alpha_frac": 0.5955666471,
"autogenerated": false,
"ratio": 4.141033434650456,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5236600081750457,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Amine'
import requests
import json
import pickle
import hashlib
import base64
import uuid
import config
from sqlalchemy import create_engine,Table, MetaData, Column, Index, String, Integer, Text
engine = create_engine(config.sql_connection_string)
def rextester_run():
headers = {'content-type': 'application/json'}
data = {u'LanguageChoiceWrapper': 5, u'Program': u'print "Hello Amine"'}
r = requests.post("http://rextester.com/rundotnet/api", data=json.dumps(data), headers=headers)
print r
print r.status_code
print r.encoding
print r.text
print r.json()
salt = base64.urlsafe_b64encode(uuid.uuid4().bytes)
print salt
def hash_password(msg):
password = msg
t_sha = hashlib.sha512()
t_sha.update(password+salt)
hashed_password = base64.urlsafe_b64encode(t_sha.digest())
return hashed_password
def validate_password(msg, hash):
t_sha = hashlib.sha512()
t_sha.update(msg+salt)
hashed_password = base64.urlsafe_b64encode(t_sha.digest())
if hashed_password == hash:
return True
else:
return False
def save_password(email, password, user_salt):
try:
hashed_password = hash_password(password)
engine.execute("INSERT INTO users (email,password,salt) VALUES ('%s' , '%s', '%s')"
% (email, hashed_password, user_salt))
except Exception as ex:
print ex
print validate_password('toto',hash_password('toto'))
print validate_password('tito',hash_password('toto'))
save_password('toto.toto@gmail.com','toto', salt) | {
"repo_name": "minus--/GuerrillaInterview",
"path": "test.py",
"copies": "1",
"size": "1573",
"license": "mit",
"hash": 3551477177038700500,
"line_mean": 26.6140350877,
"line_max": 99,
"alpha_frac": 0.6795931341,
"autogenerated": false,
"ratio": 3.4344978165938866,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9591305408275703,
"avg_score": 0.004557108483636761,
"num_lines": 57
} |
__author__ = 'Amin'
# COMPLETED
# PYTHON 3.x
import sys
import math
from codingame_solutions.utilities.graph import Graph
# MAIN
persons = Graph()
n = int(input()) # the number of adjacency relations
for i in range(n):
# xi: the ID of a person which is adjacent to yi
# yi: the ID of a person which is adjacent to xi
xi, yi = [int(j) for j in input().split()]
persons.add_edge((xi, yi))
persons.add_edge((yi, xi))
#print(persons, file=sys.stderr)
# lets start with first person that has one neighbour
persons_with_1_neighbour = persons.get_vertices_with_n_edges(1)
print("persons_with_1_neighbour: " + str(persons_with_1_neighbour), file=sys.stderr)
paths = persons.find_all_paths_from_vertex(persons_with_1_neighbour[0])
print("paths: " + str(paths), file=sys.stderr)
longest_path_length = max([len(l) for l in paths])
longest_paths = []
for path in paths:
if len(path) == longest_path_length:
longest_paths.append(path)
print("longest_paths: " + str(longest_paths), file=sys.stderr)
result = set(longest_paths[0])
for s in longest_paths[1:]:
result.intersection_update(s)
print("result: " + str(result), file=sys.stderr)
# analyze all the common elements
paths = persons.find_all_paths_from_vertex(list(result)[-1])
longest_path_length = max([len(l) for l in paths])
minimal_distance = longest_path_length
minimal_distance = 9999
for person_name in list(result):
paths = persons.find_all_paths_from_vertex(person_name)
longest_path_length = max([len(l) for l in paths])
print("longest_path_length: " + str(longest_path_length), file=sys.stderr)
if longest_path_length < minimal_distance:
minimal_distance = longest_path_length
# other (much slower) solutions:
# iterate over every node of each path in longest_paths
# Write an action using print
# To debug: print("Debug messages...", file=sys.stderr)
# The minimal amount of steps required to completely propagate the advertisement
print(minimal_distance-1)
| {
"repo_name": "Michal-Fularz/codingame_solutions",
"path": "codingame_solutions/medium/medium_Teads_Sponsored_Challenge.py",
"copies": "1",
"size": "1988",
"license": "mit",
"hash": 3755305737188193000,
"line_mean": 30.0625,
"line_max": 84,
"alpha_frac": 0.7097585513,
"autogenerated": false,
"ratio": 3.1757188498402558,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43854774011402553,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Amin'
# COMPLETED
# PYTHON 3.x
import sys
import math
from enum import Enum
class Direction(Enum):
south = 1
east = 2
north = 3
west = 4
def get_as_string(self):
r = ""
if self == Direction.south:
r = "SOUTH"
elif self == Direction.north:
r = "NORTH"
elif self == Direction.east:
r = "EAST"
elif self == Direction.west:
r = "WEST"
else:
r = "WRONG VALUE"
return r
class Bender:
def __init__(self, starting_point_x: int, starting_point_y: int):
self.current_position = (starting_point_x, starting_point_y)
self.current_direction = Direction.south
self.__priorities = [Direction.south, Direction.east, Direction.north, Direction.west]
self.__priorities_inverted = [Direction.west, Direction.north, Direction.east, Direction.south]
self.flag_breaker_mode = False
self.flag_inverted_mode = False
def change_direction2(self):
if self.flag_inverted_mode:
priorities = self.__priorities_inverted
else:
priorities = self.__priorities
current_direction_index = self.__priorities.index(self.current_direction)
current_direction_index += 1
if current_direction_index >= len(priorities):
current_direction_index = 0
self.current_direction = priorities[current_direction_index]
def change_direction(self, cell_down, cell_up, cell_left, cell_right):
print("cell_down" + str(cell_down), file=sys.stderr)
print("cell_up" + str(cell_up), file=sys.stderr)
print("cell_left" + str(cell_left), file=sys.stderr)
print("cell_right" + str(cell_right), file=sys.stderr)
if not self.flag_inverted_mode:
if cell_down != "#" and (cell_down != "X" or self.flag_breaker_mode):
self.current_direction = Direction.south
elif cell_right != "#" and (cell_right != "X" or self.flag_breaker_mode):
self.current_direction = Direction.east
elif cell_up != "#" and (cell_up != "X" or self.flag_breaker_mode):
self.current_direction = Direction.north
elif cell_left != "#" and (cell_left != "X" or self.flag_breaker_mode):
self.current_direction = Direction.west
else:
if cell_left != "#" and (cell_left != "X" or self.flag_breaker_mode):
self.current_direction = Direction.west
elif cell_up != "#" and (cell_up != "X" or self.flag_breaker_mode):
self.current_direction = Direction.north
elif cell_right != "#" and (cell_right != "X" or self.flag_breaker_mode):
self.current_direction = Direction.east
elif cell_down != "#" and (cell_down != "X" or self.flag_breaker_mode):
self.current_direction = Direction.south
def set_direction(self, direction: str):
if direction == "S":
self.current_direction = Direction.south
elif direction == "N":
self.current_direction = Direction.north
elif direction == "E":
self.current_direction = Direction.east
elif direction == "W":
self.current_direction = Direction.west
def get_next_position(self):
next_x = -1
next_y = -1
if self.current_direction == Direction.south:
next_x = self.current_position[0]
next_y = self.current_position[1] + 1
elif self.current_direction == Direction.north:
next_x = self.current_position[0]
next_y = self.current_position[1] - 1
elif self.current_direction == Direction.east:
next_x = self.current_position[0] + 1
next_y = self.current_position[1]
elif self.current_direction == Direction.west:
next_x = self.current_position[0] - 1
next_y = self.current_position[1]
else:
pass
return next_x, next_y
def move(self, x: int, y: int):
self.current_position = (x, y)
def toggle_priorities(self):
self.flag_inverted_mode = not self.flag_inverted_mode
def toggle_breaker_mode(self):
self.flag_breaker_mode = not self.flag_breaker_mode
class FuturamaCity:
def __init__(self):
self.map = []
self.visited = []
self.width = -1
self.height = -1
self.teleports_positions = []
self.__load_from_input()
self.__find_teleports()
def __load_from_input(self):
self.height, self.width = [int(i) for i in input().split()]
for i in range(self.height):
row = input()
map_row = []
visited_row = []
for character in row:
map_row.append(character)
visited_row.append(False)
self.map.append(map_row)
self.visited.append(visited_row)
def get_map(self):
r = ""
for row in self.map:
for character in row:
r += character
r += "\n"
return r
def find_starting_point(self):
for i in range(self.height):
for j in range(self.width):
if self.map[i][j] == "@":
return j, i
return -1, -1
def __find_teleports(self):
for i in range(self.height):
for j in range(self.width):
if self.map[i][j] == "T":
self.teleports_positions.append((j, i))
def get_connected_teleport_position(self, used_teleport_x, used_teleport_y):
if used_teleport_x == self.teleports_positions[0][0] and used_teleport_y == self.teleports_positions[0][1]:
second_teleport_x = self.teleports_positions[1][0]
second_teleport_y = self.teleports_positions[1][1]
else:
second_teleport_x = self.teleports_positions[0][0]
second_teleport_y = self.teleports_positions[0][1]
return second_teleport_x, second_teleport_y
def get_cell(self, x: int, y: int):
return self.map[y][x]
def remove_bear(self, x: int, y: int):
self.map[y][x] = " "
def break_wall(self, x: int, y: int):
self.map[y][x] = " "
def mark_as_visited(self, x: int, y: int):
self.visited[y][x] = True
def was_visited(self, x: int, y: int):
return self.visited[y][x]
f = FuturamaCity()
print("Map:", file=sys.stderr)
print(f.get_map(), file=sys.stderr)
x, y = f.find_starting_point()
b = Bender(x, y)
print("Bender start: " + str(x) + ", " + str(y), file=sys.stderr)
moves = []
number_of_repeating_rounds = 0
flag_game_is_on = True
flag_loop = False
while flag_game_is_on:
print("Bender: " + str(b.current_position[0]) + ", " + str(b.current_position[1]) + ", " + str(b.current_direction), file=sys.stderr)
next_x, next_y = b.get_next_position()
cell = f.get_cell(next_x, next_y)
if cell == "#" or (cell == "X" and not b.flag_breaker_mode):
cell_down = f.get_cell(b.current_position[0], b.current_position[1]+1)
cell_up = f.get_cell(b.current_position[0], b.current_position[1]-1)
cell_left = f.get_cell(b.current_position[0]-1, b.current_position[1])
cell_right = f.get_cell(b.current_position[0]+1, b.current_position[1])
b.change_direction(cell_down, cell_up, cell_left, cell_right)
else:
b.move(next_x, next_y)
f.mark_as_visited(next_x, next_y)
moves.append(b.current_direction)
if cell == "$":
flag_game_is_on = False
elif cell == "X" and b.flag_breaker_mode:
f.break_wall(next_x, next_y)
elif cell == "S" or cell == "N" or cell == "E" or cell == "W":
b.set_direction(cell)
elif cell == "I":
b.toggle_priorities()
elif cell == "B":
b.toggle_breaker_mode()
elif cell == "T":
after_teleport_x, after_teleport_y = f.get_connected_teleport_position(next_x, next_y)
b.move(after_teleport_x, after_teleport_y)
f.mark_as_visited(after_teleport_x, after_teleport_y)
elif cell == " ":
pass
elif cell == "@":
pass
else:
print("Wrong cell value!", file=sys.stderr)
if f.was_visited(next_x, next_y):
number_of_repeating_rounds += 1
else:
number_of_repeating_rounds = 0
if number_of_repeating_rounds >= 500:
flag_game_is_on = False
flag_loop = True
# Write an action using print
# To debug: print("Debug messages...", file=sys.stderr)
r = ""
if flag_loop:
r = "LOOP"
else:
for move in moves:
r += move.get_as_string()
r += "\n"
r = r[:-1]
print(r)
| {
"repo_name": "Michal-Fularz/codingame_solutions",
"path": "codingame_solutions/medium/medium_Bender_a_depressed_robot.py",
"copies": "1",
"size": "8875",
"license": "mit",
"hash": -3959486521595050500,
"line_mean": 31.8703703704,
"line_max": 137,
"alpha_frac": 0.56,
"autogenerated": false,
"ratio": 3.4082181259600612,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4468218125960061,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Amin'
# COMPLETED
# PYTHON 3.x
import sys
import math
from enum import Enum
class Direction(Enum):
top = 1
bottom = 2
left = 3
right = 4
na = 5
def get_from_text(text):
direction = Direction.na
if text == "TOP":
direction = Direction.top
elif text == "LEFT":
direction = Direction.left
elif text == "RIGHT":
direction = Direction.right
else:
print("Wrong direction text!", file=sys.stderr)
return direction
class EntryExit:
def __init__(self, entry: Direction, exit):
self.entry = entry
self.exit = exit
class Room:
def __init__(self, type=0):
self.type = type
self.paths = []
self.__create_paths()
def __create_paths(self):
if self.type == 0:
# no entries, no exits
pass
elif self.type == 1:
self.paths.append(EntryExit(Direction.top, Direction.bottom))
self.paths.append(EntryExit(Direction.left, Direction.bottom))
self.paths.append(EntryExit(Direction.right, Direction.bottom))
elif self.type == 2:
self.paths.append(EntryExit(Direction.left, Direction.right))
self.paths.append(EntryExit(Direction.right, Direction.left))
elif self.type == 3:
self.paths.append(EntryExit(Direction.top, Direction.bottom))
elif self.type == 4:
self.paths.append(EntryExit(Direction.top, Direction.left))
self.paths.append(EntryExit(Direction.right, Direction.bottom))
elif self.type == 5:
self.paths.append(EntryExit(Direction.top, Direction.right))
self.paths.append(EntryExit(Direction.left, Direction.bottom))
elif self.type == 6:
self.paths.append(EntryExit(Direction.top, Direction.na))
self.paths.append(EntryExit(Direction.left, Direction.right))
self.paths.append(EntryExit(Direction.right, Direction.left))
elif self.type == 7:
self.paths.append(EntryExit(Direction.top, Direction.bottom))
self.paths.append(EntryExit(Direction.right, Direction.bottom))
elif self.type == 8:
self.paths.append(EntryExit(Direction.left, Direction.bottom))
self.paths.append(EntryExit(Direction.right, Direction.bottom))
elif self.type == 9:
self.paths.append(EntryExit(Direction.top, Direction.bottom))
self.paths.append(EntryExit(Direction.left, Direction.bottom))
elif self.type == 10:
self.paths.append(EntryExit(Direction.top, Direction.left))
self.paths.append(EntryExit(Direction.left, Direction.na))
elif self.type == 11:
self.paths.append(EntryExit(Direction.top, Direction.right))
self.paths.append(EntryExit(Direction.right, Direction.na))
elif self.type == 12:
self.paths.append(EntryExit(Direction.right, Direction.bottom))
elif self.type == 13:
self.paths.append(EntryExit(Direction.left, Direction.bottom))
else:
print("Wrong room type!", file=sys.stderr)
def get_exit(self, entry):
exit = Direction.na
for path in self.paths:
if path.entry == entry:
exit = path.exit
return exit
class Board:
def __init__(self, width, height):
self.width = width
self.height = height
self.rooms = []
self.__read_from_input()
def __read_from_input(self):
for i in range(self.height):
row_of_rooms = []
line = input() # represents a line in the grid and contains W integers. Each integer represents one room of a given type.
values = [int(x) for x in line.split()]
for value in values:
row_of_rooms.append(Room(value))
self.rooms.append(row_of_rooms)
def get_as_string(self):
r = ""
r += "Width: " + str(self.width) + ", height: " + str(self.height) + "\n"
for row_of_rooms in self.rooms:
for room in row_of_rooms:
r += str(room.type) + ", "
r += "\n"
return r
def get_exit_of_room_entered(self, x: int, y: int, direction: Direction):
return self.rooms[y][x].get_exit(direction)
class Indy:
def __init__(self):
self.position_x = -1
self.position_y = -1
self.direction = Direction.na
def set(self, position_x: int, position_y: int, direction: str):
self.position_x = position_x
self.position_y = position_y
self.direction = Direction.get_from_text(direction)
def get_next_position(self, board: Board):
next_direction = board.get_exit_of_room_entered(self.position_x, self.position_y, self.direction)
if next_direction == Direction.bottom:
next_position_x = self.position_x
next_position_y = self.position_y + 1
elif next_direction == Direction.right:
next_position_x = self.position_x + 1
next_position_y = self.position_y
elif next_direction == Direction.left:
next_position_x = self.position_x - 1
next_position_y = self.position_y
else:
next_position_x = -1
next_position_y = -1
print("Wrong next direction: " + str(next_direction), file=sys.stderr)
return next_position_x, next_position_y
# w: number of columns.
# h: number of rows.
w, h = [int(i) for i in input().split()]
b = Board(w, h)
ex = int(input()) # the coordinate along the X axis of the exit (not useful for this first mission, but must be read).
print(b.get_as_string(), file=sys.stderr)
hero = Indy()
# game loop
while 1:
xi, yi, pos = input().split()
xi = int(xi)
yi = int(yi)
hero.set(xi, yi, pos)
next_xi, next_yi = hero.get_next_position(b)
print("Next position: " + str(next_xi) + ", " + str(next_yi), file=sys.stderr)
# Write an action using print
# To debug: print("Debug messages...", file=sys.stderr)
# One line containing the X Y coordinates of the room in which you believe Indy will be on the next turn.
r = str(next_xi) + " " + str(next_yi)
print(r)
| {
"repo_name": "Michal-Fularz/codingame_solutions",
"path": "codingame_solutions/medium/medium_Indiana_Level_1.py",
"copies": "1",
"size": "6458",
"license": "mit",
"hash": 2331319417559143000,
"line_mean": 34.097826087,
"line_max": 134,
"alpha_frac": 0.5808299783,
"autogenerated": false,
"ratio": 3.7200460829493087,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48008760612493084,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Amin'
# COMPLETED
# PYTHON 3.x
import sys
import math
import itertools
def find_connections2(word1, word2):
longest_matched_part = ""
# check form first letter of word1 if it can be matched to beginning of word 2
for i in range(len(word1)):
matched_part = ""
w1_match_index = i
w2_match_index = 0
# ATTENTION: it is important which of the condition is checked first!
# word1[w1_match_index] == word2[w2_match_index] requires indexes to be within appropriate ranges
while w1_match_index < len(word1) and w2_match_index < len(word2) and word1[w1_match_index] == word2[w2_match_index]:
w1_match_index += 1
w2_match_index += 1
matched_part += word1[i]
if len(matched_part) > len(longest_matched_part):
longest_matched_part = matched_part
if len(longest_matched_part) > 0:
print("Started from: " + str(i) + ", matched part: " + longest_matched_part, file=sys.stderr)
else:
print("No matches found", file=sys.stderr)
return len(longest_matched_part)
def find_connections(word1, word2):
if word2 in word1:
return len(word2)
longest_matched_part = ""
# calculate where to start based on the size of the words e.g.
# AGATTA and TA we should start with index 4
if len(word1) > len(word2):
starting_index = len(word1) - len(word2)
else:
starting_index = 0
# check form first letter of the word1 if it can be matched to the beginning of word 2
for i in range(starting_index, len(word1)):
w1_match_index = i
w2_match_index = 0
flag_matched = True
while flag_matched and w1_match_index < len(word1): # and w2_match_index < len(word2):
if word1[w1_match_index] != word2[w2_match_index]:
flag_matched = False
w1_match_index += 1
w2_match_index += 1
if flag_matched:
matched_part = word1[i:]
else:
matched_part = ""
if len(matched_part) > len(longest_matched_part):
longest_matched_part = matched_part
if len(longest_matched_part) > 0:
print("Started from: " + str(i) + ", matched part: " + longest_matched_part, file=sys.stderr)
else:
print("No matches found", file=sys.stderr)
return len(longest_matched_part)
sub_sequences = []
n = int(input())
for i in range(n):
sub_sequences.append(input())
indexes = [i for i in range(0, len(sub_sequences))]
indexes_permutations = list(itertools.permutations(indexes, len(sub_sequences)))
print("Permutations: " + str(indexes_permutations), file=sys.stderr)
length_of_dna_sequence = 0
for sub_sequence in sub_sequences:
length_of_dna_sequence += len(sub_sequence)
minimal_length_of_dna_sequence = length_of_dna_sequence
for indexes_permutation in indexes_permutations:
current_length_of_dna_sequence = length_of_dna_sequence
for index_1, index_2 in zip(indexes_permutation[:-1], indexes_permutation[1:]):
word1 = sub_sequences[index_1]
word2 = sub_sequences[index_2]
print("Words: " + word1 + ", " + word2, file=sys.stderr)
current_length_of_dna_sequence -= find_connections(word1, word2)
if current_length_of_dna_sequence < minimal_length_of_dna_sequence:
minimal_length_of_dna_sequence = current_length_of_dna_sequence
# Write an action using print
# To debug: print("Debug messages...", file=sys.stderr)
print(minimal_length_of_dna_sequence)
# AACCGG
# AACCTT
| {
"repo_name": "Michal-Fularz/codingame_solutions",
"path": "codingame_solutions/hard/hard_Genome_Sequencing.py",
"copies": "1",
"size": "3562",
"license": "mit",
"hash": -9175904877766347000,
"line_mean": 31.0900900901,
"line_max": 125,
"alpha_frac": 0.639809096,
"autogenerated": false,
"ratio": 3.398854961832061,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9532668725968465,
"avg_score": 0.0011990663727192957,
"num_lines": 111
} |
__author__ = 'Amin'
# COMPLETED
# PYTHON 3.x
import sys
import math
import numpy as np
from enum import Enum
class Direction(Enum):
up = 1
down = 2
left = 3
right = 4
mixed = 5
@staticmethod
def get_opposite(direction):
if direction == Direction.up:
return Direction.down
elif direction == Direction.down:
return Direction.up
elif direction == Direction.right:
return Direction.left
elif direction == Direction.left:
return Direction.right
elif direction == Direction.mixed:
return Direction.mixed
else:
print("ERROR!!! This direction does not have implementation of this function!", file=sys.stderr)
class Movements(Enum):
vertical = 1
horizontal = 2
mixed = 3
@staticmethod
def get_opposite(movement):
if movement == Movements.vertical:
return Movements.horizontal
elif movement == Movements.horizontal:
return Movements.vertical
else:
return Movements.mixed
class Building:
def __init__(self, width, height):
self.width = width
self.height = height
self.usable_x_min = 0
self.usable_x_max = self.width
self.usable_y_min = 0
self.usable_y_max = self.height
self.map = np.zeros((self.height, self.width)) # , dtype=np.uint8)
def update_map(self, batman, bomb_distance):
if bomb_distance == "SAME":
if batman.direction_current == Direction.down:
distance_traveled = batman.y_current - batman.y_previous
y_start = batman.y_previous + distance_traveled // 2
y_end = batman.y_current - distance_traveled // 2 + 1
self.map[self.usable_y_min:y_start, self.usable_x_min:self.usable_x_max] = -1
self.usable_y_min = y_start
self.map[y_end:self.usable_y_max, self.usable_x_min:self.usable_x_max] = -1
self.usable_y_max = y_end
elif batman.direction_current == Direction.up:
distance_traveled = batman.y_previous - batman.y_current
y_start = batman.y_current + distance_traveled // 2
y_end = batman.y_previous - distance_traveled // 2 + 1
self.map[self.usable_y_min:y_start, self.usable_x_min:self.usable_x_max] = -1
self.usable_y_min = y_start
self.map[y_end:self.usable_y_max, self.usable_x_min:self.usable_x_max] = -1
self.usable_y_max = y_end
elif batman.direction_current == Direction.left:
distance_traveled = batman.x_previous - batman.x_current
x_start = batman.x_current + distance_traveled // 2
x_end = batman.x_previous - distance_traveled // 2 + 1
self.map[self.usable_y_min:self.usable_y_max, self.usable_x_min:x_start] = -1
self.usable_x_min = x_start
self.map[self.usable_y_min:self.usable_y_max, x_end:self.usable_x_max] = -1
self.usable_x_max = x_end
elif batman.direction_current == Direction.right:
distance_traveled = batman.x_current - batman.x_previous
x_start = batman.x_previous + distance_traveled // 2
x_end = batman.x_current - distance_traveled // 2 + 1
self.map[self.usable_y_min:self.usable_y_max, self.usable_x_min:x_start] = -1
self.usable_x_min = x_start
self.map[self.usable_y_min:self.usable_y_max, x_end:self.usable_x_max] = -1
self.usable_x_max = x_end
else:
# TODO: this is special case and can be treated accordingly
# if last direction is one of up_right, up_left, down_right, down_left do not update the map
pass
elif bomb_dist == "WARMER":
if batman.direction_current == Direction.down:
distance_traveled = batman.y_current - batman.y_previous
y_start = batman.y_previous + distance_traveled // 2 + 1
self.map[self.usable_y_min:y_start, self.usable_x_min:self.usable_x_max] = -1
self.usable_y_min = y_start
elif batman.direction_current == Direction.up:
distance_traveled = batman.y_previous - batman.y_current
y_end = batman.y_previous - distance_traveled // 2
self.map[y_end:self.usable_y_max, self.usable_x_min:self.usable_x_max] = -1
self.usable_y_max = y_end
elif batman.direction_current == Direction.left:
distance_traveled = batman.x_previous - batman.x_current
x_end = batman.x_previous - distance_traveled // 2
self.map[self.usable_y_min:self.usable_y_max, x_end:self.usable_x_max] = -1
self.usable_x_max = x_end
elif batman.direction_current == Direction.right:
distance_traveled = batman.x_current - batman.x_previous
x_start = batman.x_previous + distance_traveled // 2 + 1
self.map[self.usable_y_min:self.usable_y_max, self.usable_x_min:x_start] = -1
self.usable_x_min = x_start
else:
# if last direction is one of up_right, up_left, down_right, down_left do not update the map
pass
elif bomb_distance == "COLDER":
if batman.direction_current == Direction.down:
distance_traveled = batman.y_current - batman.y_previous
y_end = batman.y_current - distance_traveled // 2
self.map[y_end:self.usable_y_max, self.usable_x_min:self.usable_x_max] = -1
self.usable_y_max = y_end
elif batman.direction_current == Direction.up:
distance_traveled = batman.y_previous - batman.y_current
y_start = batman.y_current + distance_traveled // 2 + 1
self.map[self.usable_y_min:y_start, self.usable_x_min:self.usable_x_max] = -1
self.usable_y_min = y_start
elif batman.direction_current == Direction.left:
distance_traveled = batman.x_previous - batman.x_current
x_start = batman.x_current + distance_traveled // 2 + 1
self.map[self.usable_y_min:self.usable_y_max, self.usable_x_min:x_start] = -1
self.usable_x_min = x_start
elif batman.direction_current == Direction.right:
distance_traveled = batman.x_current - batman.x_previous
x_end = batman.x_current - distance_traveled // 2
self.map[self.usable_y_min:self.usable_y_max, x_end:self.usable_x_max] = -1
self.usable_x_max = x_end
else:
# if last direction is one of up_right, up_left, down_right, down_left do not update the map
pass
else: # UNKNOWN
pass
print("self.usable_x_min: " + str(self.usable_x_min), file=sys.stderr)
print("self.usable_x_max: " + str(self.usable_x_max), file=sys.stderr)
print("self.usable_y_min: " + str(self.usable_y_min), file=sys.stderr)
print("self.usable_y_max: " + str(self.usable_y_max), file=sys.stderr)
def check_if_only_one_column_left(self):
usable_x_width = self.usable_x_max - self.usable_x_min
if usable_x_width == 1:
return True
else:
return False
def check_if_only_one_row_left(self):
usable_y_width = self.usable_y_max - self.usable_y_min
if usable_y_width == 1:
return True
else:
return False
def check_if_only_one_cell_left(self):
usable_x_width = self.usable_x_max - self.usable_x_min
usable_y_width = self.usable_y_max - self.usable_y_min
if usable_x_width == 1 and usable_y_width == 1:
return True
else:
return False
def find_next_position_new(self, current_x, current_y, movement):
usable_y_midpoint = (self.usable_y_max + self.usable_y_min) // 2
usable_x_midpoint = (self.usable_x_max + self.usable_x_min) // 2
print("Midpoint x: " + str(usable_x_midpoint) + ", y: " + str(usable_y_midpoint), file=sys.stderr)
if movement == Movements.vertical:
if current_y < usable_y_midpoint:
if current_y < self.usable_y_min:
new_y = self.usable_y_min
else:
new_y = (self.usable_y_max - 1) - (current_y - self.usable_y_min)
else:
if current_y > self.usable_y_max:
new_y = self.usable_y_max - 1
else:
new_y = max(self.usable_y_min + ((self.usable_y_max - 1) - current_y), self.usable_y_min)
print("New y: " + str(new_y), file=sys.stderr)
return current_x, new_y
elif movement == Movements.horizontal:
if current_x < usable_x_midpoint:
new_x = (self.usable_x_max - 1) - (current_x - self.usable_x_min)
else:
new_x = self.usable_x_min + ((self.usable_x_max - 1) - current_x)
print("New x: " + str(new_x), file=sys.stderr)
return new_x, current_y
else:
return self.usable_x_max-1, self.usable_y_max-1
class Batman:
def __init__(self, x0, y0):
self.x_current = x0
self.y_current = y0
self.x_previous = x0
self.y_previous = y0
self.movement = Movements.vertical
self.direction_current = Direction.up
self.vertical_distance = "UNKNOWN"
self.horizontal_distance = "UNKNOWN"
def update_movement(self, current_distance):
if current_distance != "UNKNOWN":
if self.movement == Movements.vertical:
self.vertical_distance = current_distance
self.movement = Movements.horizontal
elif self.movement == Movements.horizontal:
self.horizontal_distance = current_distance
if self.vertical_distance == "WARMER" and self.horizontal_distance == "WARMER":
self.movement = Movements.vertical
else:
self.movement = Movements.mixed
else: # UNKNOWN
self.movement = Movements.vertical
else:
pass
def set_movement(self, new_movement):
self.movement = new_movement
def set_position(self, x, y):
self.x_previous = self.x_current
self.y_previous = self.y_current
self.x_current = x
self.y_current = y
self.__update_direction()
def __update_direction(self):
# this could be decided based on self. movement, but this method is more general
if self.x_current == self.x_previous:
# vertical
if self.y_current < self.y_previous:
self.direction_current = Direction.up
else:
self.direction_current = Direction.down
elif self.y_current == self.y_previous:
# horizontal
if self.x_current < self.x_previous:
self.direction_current = Direction.left
else:
self.direction_current = Direction.right
else:
# mixed direction
self.direction_current = Direction.mixed
def get_as_string(self):
r = "Batman: \n"
r += "x: " + str(self.x_current) + ", y: " + str(self.y_current) + "\n"
r += "x_p: " + str(self.x_previous) + ", y_p: " + str(self.y_previous) + "\n"
r += "dir: " + str(self.direction_current) + "\n"
r += "dis_vertical: " + str(self.vertical_distance) + ", dis_horizontal: " + str(self.horizontal_distance) + "\n"
r += "movement: " + str(self.movement)
return r
class BuildingOld:
def __init__(self, width, height):
self.width = width
self.height = height
print("Width: " + str(self.width) + ", height: " + str(self.height), file=sys.stderr)
self.map = np.zeros((self.height, self.width)) # , dtype=np.uint8)
print(self.map, file=sys.stderr)
self.odd_movement = False
def update_map(self, batman, bomb_distance):
if bomb_distance == "SAME":
if batman.direction_current == Direction.down:
distance_traveled = batman.y_current - batman.y_previous
y_start = batman.y_previous + distance_traveled // 2
y_end = batman.y_current - distance_traveled // 2 + 1
self.map[0:y_start, :] = -1
self.map[y_end:self.height, :] = -1
elif batman.direction_current == Direction.up:
distance_traveled = batman.y_previous - batman.y_current
y_start = batman.y_current + distance_traveled // 2
y_end = batman.y_previous - distance_traveled // 2 + 1
self.map[0:y_start, :] = -1
self.map[y_end:self.height, :] = -1
# this part is done only when the right row is chosen
elif batman.direction_current == Direction.left:
distance_traveled = batman.x_previous - batman.x_current
x_start = batman.x_current + distance_traveled // 2
x_end = batman.x_previous - distance_traveled // 2 + 1
self.map[batman.y_current, 0:x_start] = -1
self.map[batman.y_current, x_end:self.width] = -1
#print(self.map[batman.y_current], file=sys.stderr)
elif batman.direction_current == Direction.right:
distance_traveled = batman.x_current - batman.x_previous
x_start = batman.x_previous + distance_traveled // 2
x_end = batman.x_current - distance_traveled // 2 + 1
self.map[batman.y_current, 0:x_start] = -1
self.map[batman.y_current, x_end:self.width] = -1
#print(self.map[batman.y_current], file=sys.stderr)
else:
pass
elif bomb_dist == "WARMER":
if batman.direction_current == Direction.down:
distance_traveled = batman.y_current - batman.y_previous
y_start = batman.y_previous + distance_traveled // 2 + 1
self.map[0:y_start, :] = -1
elif batman.direction_current == Direction.up:
distance_traveled = batman.y_previous - batman.y_current
y_end = batman.y_previous - distance_traveled // 2
self.map[y_end:self.height, :] = -1
# this part is done only when the right row is chosen
elif batman.direction_current == Direction.left:
distance_traveled = batman.x_previous - batman.x_current
x_end = batman.x_previous - distance_traveled // 2
self.map[batman.y_current, x_end:self.width] = -1
#print(self.map[batman.y_current], file=sys.stderr)
elif batman.direction_current == Direction.right:
distance_traveled = batman.x_current - batman.x_previous
x_start = batman.x_previous + distance_traveled // 2 + 1
self.map[batman.y_current, 0:x_start] = -1
#print(self.map[batman.y_current], file=sys.stderr)
elif bomb_distance == "COLDER":
if batman.direction_current == Direction.down:
distance_traveled = batman.y_current - batman.y_previous
y_end = batman.y_current - distance_traveled // 2
self.map[y_end:self.height, :] = -1
elif batman.direction_current == Direction.up:
distance_traveled = batman.y_previous - batman.y_current
y_start = batman.y_current + distance_traveled // 2 + 1
self.map[0:y_start, :] = -1
# this part is done only when the right row is chosen
elif batman.direction_current == Direction.left:
distance_traveled = batman.x_previous - batman.x_current
x_start = batman.x_current + distance_traveled // 2 + 1
self.map[batman.y_current, 0:x_start] = -1
#print(self.map[batman.y_current], file=sys.stderr)
elif batman.direction_current == Direction.right:
distance_traveled = batman.x_current - batman.x_previous
x_end = batman.x_current - distance_traveled // 2
self.map[batman.y_current, x_end:self.width] = -1
#print(self.map[batman.y_current], file=sys.stderr)
def find_movements_based_on_distance(self, bat, bomb_distance):
direction = batman.direction_current
free_cells_in_current_direction = self.__count_number_of_free_cells_in_that_direction(bat.x_current, bat.y_current, direction)
free_cells_in_opposing_direction = self.__count_number_of_free_cells_in_that_direction(bat.x_current, bat.y_current, Direction.get_opposite(direction))
print("Free cells in current direction: " + str(free_cells_in_current_direction), file=sys.stderr)
print("Free cells in opposing direction: " + str(free_cells_in_opposing_direction), file=sys.stderr)
if bomb_distance == "WARMER":
# last time we moved in right direction
direction = bat.direction_current
if free_cells_in_opposing_direction > free_cells_in_current_direction:
direction = Direction.get_opposite(bat.direction_current)
print("Change direction in work!", file=sys.stderr)
elif bomb_distance == "COLDER":
direction = Direction.get_opposite(bat.direction_current)
elif bomb_distance == "SAME":
pass
print("First direction guess: " + str(direction), file=sys.stderr)
# check if move is possible
if not self.__check_if_there_are_free_cells_in_that_direction(bat.x_current, batman.y_current, direction):
if self.__check_if_there_are_free_cells_in_that_direction(bat.x_current, batman.y_current, Direction.up):
return Direction.up
if self.__check_if_there_are_free_cells_in_that_direction(bat.x_current, batman.y_current, Direction.down):
return Direction.down
if self.__check_if_there_are_free_cells_in_that_direction(bat.x_current, batman.y_current, Direction.right):
return Direction.right
if self.__check_if_there_are_free_cells_in_that_direction(bat.x_current, batman.y_current, Direction.left):
return Direction.left
return direction
def __count_number_of_free_cells_in_that_direction(self, current_x, current_y, direction ):
free_cells = 0
if direction == Direction.up:
# check column above
print("Checking column above", file=sys.stderr)
for y in range(0, current_y):
if self.map[y][current_x] == 0:
free_cells += 1
elif direction == Direction.down:
# check column below
print("Checking column below", file=sys.stderr)
for y in range(current_y+1, self.height):
if self.map[y][current_x] == 0:
free_cells += 1
if direction == Direction.left:
# check row on the left
print("Checking row on the left", file=sys.stderr)
for x in range(0, current_x):
if self.map[current_y][x] == 0:
free_cells += 1
elif direction == Direction.right:
# check column below
print("Checking row on the right", file=sys.stderr)
for x in range(current_x+1, self.width):
if self.map[current_y][x] == 0:
free_cells += 1
return free_cells
def __check_if_there_are_free_cells_in_that_direction(self, current_x, current_y, direction):
if direction == Direction.up:
# check column above
print("Checking column above", file=sys.stderr)
for y in range(0, current_y):
if self.map[y][current_x] == 0:
return True
elif direction == Direction.down:
# check column below
print("Checking column below", file=sys.stderr)
for y in range(current_y+1, self.height):
if self.map[y][current_x] == 0:
return True
if direction == Direction.left:
# check row on the left
print("Checking row on the left", file=sys.stderr)
for x in range(0, current_x):
if self.map[current_y][x] == 0:
return True
elif direction == Direction.right:
# check column below
print("Checking row on the right", file=sys.stderr)
for x in range(current_x+1, self.width):
if self.map[current_y][x] == 0:
return True
return False
# bomb_distance = UNKNOWN
def find_movements_first_round(self, current_x, current_y):
if current_y < (self.height // 2):
direction = Direction.down
new_position = self.height//2# - current_y
else:
direction = Direction.up
new_position = self.height//2# - current_y
return direction, new_position
def find_next_position(self, current_x, current_y, direction):
available_points = []
if direction == Direction.up:
for y in range(0, current_y):
if self.map[y][current_x] == 0:
available_points.append(y)
elif direction == Direction.down:
for y in range(current_y+1, self.height):
if self.map[y][current_x] == 0:
available_points.append(y)
elif direction == Direction.right:
for x in range(current_x+1, self.width):
if self.map[current_y][x] == 0:
available_points.append(x)
elif direction == Direction.left:
for x in range(0, current_x):
if self.map[current_y][x] == 0:
available_points.append(x)
#available_points.sort()
next_position = sum(available_points) / len(available_points)
#if next_position - int(next_position) > 0:
#next_position += 1
if not self.odd_movement:
if direction == Direction.up:
pass
#next_position = available_points[int(0.25*len(available_points))]
#next_position = min(available_points)
elif direction == Direction.down:
#pass
#next_position = available_points[int(0.75*len(available_points))]
next_position = max(available_points)
elif direction == Direction.left:
#pass
#next_position = available_points[int(0.25*len(available_points))]
next_position = min(available_points)
elif direction == Direction.right:
pass
#next_position = available_points[int(0.75*len(available_points))]
#next_position = max(available_points)
#self.odd_movement = True
# if self.odd_movement:
# if direction == Direction.up:
# next_position = available_points[int(0.25*len(available_points))]
# elif direction == Direction.down:
# next_position = available_points[int(0.75*len(available_points))]
# elif direction == Direction.left:
# next_position = available_points[int(0.25*len(available_points))]
# elif direction == Direction.right:
# next_position = available_points[int(0.75*len(available_points))]
# self.odd_movement = False
return int(next_position)
class BatmanOld:
def __init__(self, x0, y0):
self.x_initial = x0
self.y_initial = y0
self.x_current = self.x_initial
self.y_current = self.y_initial
self.x_previous = self.x_initial
self.y_previous = self.y_initial
self.direction_current = Direction.up
self.direction_previous = Direction.up
def update(self, dx, dy):
self.x_previous = self.x_current
self.y_previous = self.y_current
self.x_current += dx
self.y_current -= dy
def update_based_on_direction2(self, direction, new_pos):
self.x_previous = self.x_current
self.y_previous = self.y_current
self.direction_previous = self.direction_current
self.direction_current = direction
if direction == Direction.up:
self.y_current = new_pos
elif direction == Direction.down:
self.y_current = new_pos
elif direction == Direction.right:
self.x_current = new_pos
elif direction == Direction.left:
self.x_current = new_pos
def get_new_position_based_on_direction(self, direction, distance):
new_x = self.x_current
new_y = self.y_current
if direction == Direction.up:
new_y -= distance
elif direction == Direction.down:
new_y -= -distance
elif direction == Direction.right:
new_x += distance
elif direction == Direction.left:
new_x += -distance
return new_x, new_y
def get_as_string(self):
r = "Batman position: \n"
r += "x: " + str(self.x_current) + ", y: " + str(self.y_current) + "\n"
r += "x_p: " + str(self.x_previous) + ", y_p: " + str(self.y_previous) + "\n"
r += "dir: " + str(self.direction_current) + ", dir_p: " + str(self.direction_previous)
return r
if __name__ == '__main__':
# w: width of the building.
# h: height of the building.
w, h = [int(i) for i in input().split()]
print("w: " + str(w) + ", h: " + str(h), file=sys.stderr)
n = int(input()) # maximum number of turns before game over.
x0, y0 = [int(i) for i in input().split()]
if w != 8000 and h != 8000:
building = Building(w, h)
batman = Batman(x0, y0)
# game loop
while 1:
bomb_dist = input() # Current distance to the bomb compared to previous distance (COLDER, WARMER, SAME or UNKNOWN)
batman.update_movement(bomb_dist)
print(batman.get_as_string(), file=sys.stderr)
building.update_map(batman, bomb_dist)
if building.check_if_only_one_cell_left():
batman.set_position(building.usable_x_min, building.usable_y_min)
else:
if building.check_if_only_one_column_left():
batman.set_movement(Movements.vertical)
if building.check_if_only_one_row_left():
batman.set_movement(Movements.horizontal)
#print(building.map, file=sys.stderr)
batman_new_x, batman_new_y = building.find_next_position_new(batman.x_current, batman.y_current, batman.movement)
batman.set_position(batman_new_x, batman_new_y)
# Write an action using print
# To debug: print("Debug messages...", file=sys.stderr)
print(str(batman.x_current) + " " + str(batman.y_current))
else:
building = BuildingOld(w, h)
batman = BatmanOld(x0, y0)
bomb_dist = input() # Current distance to the bomb compared to previous distance (COLDER, WARMER, SAME or UNKNOWN)
# first round is special - bomb_distance = UNKNOWN
current_direction, new_pos = building.find_movements_first_round(batman.x_current, batman.y_current)
batman.update_based_on_direction2(current_direction, new_pos)
batman.direction_previous = current_direction
print(str(batman.x_current) + " " + str(batman.y_current))
# game loop
while 1:
previous_bomb_dist = bomb_dist
bomb_dist = input() # Current distance to the bomb compared to previous distance (COLDER, WARMER, SAME or UNKNOWN)
building.update_map(batman, bomb_dist)
current_direction = building.find_movements_based_on_distance(batman, bomb_dist)
print("Direction choosen: " + str(current_direction), file=sys.stderr)
new_pos = building.find_next_position(batman.x_current, batman.y_current, current_direction)
print("Distance available: " + str(new_pos), file=sys.stderr)
batman.update_based_on_direction2(current_direction, new_pos)
print(batman.get_as_string(), file=sys.stderr)
# Write an action using print
# To debug: print("Debug messages...", file=sys.stderr)
print(str(batman.x_current) + " " + str(batman.y_current))
| {
"repo_name": "Michal-Fularz/codingame_solutions",
"path": "codingame_solutions/very_hard/very_hard_Triangulation.py",
"copies": "1",
"size": "29167",
"license": "mit",
"hash": -8817852553275873000,
"line_mean": 37.6830238727,
"line_max": 159,
"alpha_frac": 0.5648506874,
"autogenerated": false,
"ratio": 3.5586871644704736,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4623537851870474,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Amin'
# COMPLETED
# PYTHON 3.x
import sys
import math
import numpy as np
class Building:
def __init__(self, width, height):
self.width = width
self.height = height
print("Width: " + str(self.width) + ", height: " + str(self.height), file=sys.stderr)
self.map = np.zeros((self.height, self.width), dtype="int8")
print(self.map, file=sys.stderr)
def __mark_up_right_as_empty(self, current_x, current_y):
self.map[0:current_y+1, current_x:self.width] = 1
# for y in range(0, current_y+1):
# for x in range(current_x, self.width):
# self.map[y, x] = 1
def __mark_up_left_as_empty(self, current_x, current_y):
self.map[0:current_y+1, 0:current_x+1] = 1
# for y in range(0, current_y+1):
# for x in range(0, current_x+1):
# self.map[y, x] = 1
def __mark_down_right_as_empty(self, current_x, current_y):
self.map[current_y:self.height, current_x:self.width] = 1
# for y in range(current_y, self.height):
# for x in range(current_x, self.width):
# self.map[y, x] = 1
def __mark_down_left_as_empty(self, current_x, current_y):
self.map[current_y:self.height, 0:current_x+1] = 1
# for y in range(current_y, self.height):
# for x in range(0, current_x+1):
# self.map[y, x] = 1
def mark_part_as_empty(self, current_x, current_y, bomb_direction):
if bomb_direction == "U":
self.__mark_down_left_as_empty(current_x, current_y)
self.__mark_down_right_as_empty(current_x, current_y)
elif bomb_direction == "UR":
self.__mark_up_left_as_empty(current_x, current_y)
self.__mark_down_left_as_empty(current_x, current_y)
self.__mark_down_right_as_empty(current_x, current_y)
elif bomb_direction == "R":
self.__mark_up_left_as_empty(current_x, current_y)
self.__mark_down_left_as_empty(current_x, current_y)
elif bomb_direction == "DR":
self.__mark_up_left_as_empty(current_x, current_y)
self.__mark_up_right_as_empty(current_x, current_y)
self.__mark_down_left_as_empty(current_x, current_y)
elif bomb_direction == "D":
self.__mark_up_left_as_empty(current_x, current_y)
self.__mark_up_right_as_empty(current_x, current_y)
elif bomb_direction == "DL":
self.__mark_up_left_as_empty(current_x, current_y)
self.__mark_up_right_as_empty(current_x, current_y)
self.__mark_down_right_as_empty(current_x, current_y)
elif bomb_direction == "L":
self.__mark_up_right_as_empty(current_x, current_y)
self.__mark_down_right_as_empty(current_x, current_y)
elif bomb_direction == "UL":
self.__mark_up_right_as_empty(current_x, current_y)
self.__mark_down_left_as_empty(current_x, current_y)
self.__mark_down_right_as_empty(current_x, current_y)
print(self.map, file=sys.stderr)
def find_movements(self, bomb_direction):
vertical_movement = 0
horizontal_movement = 0
if bomb_direction == "U":
vertical_movement = 1
elif bomb_direction == "UR":
vertical_movement = 1
horizontal_movement = 1
elif bomb_direction == "R":
horizontal_movement = 1
elif bomb_direction == "DR":
vertical_movement = -1
horizontal_movement = 1
elif bomb_direction == "D":
vertical_movement = -1
elif bomb_direction == "DL":
vertical_movement = -1
horizontal_movement = -1
elif bomb_direction == "L":
horizontal_movement = -1
elif bomb_direction == "UL":
vertical_movement = 1
horizontal_movement = -1
return vertical_movement, horizontal_movement
def find_distance_available(self, current_x, current_y, vertical_movement, horizontal_movement):
distance_available_vertical = 0
x = current_x
y = current_y
if vertical_movement == 1:
# UP
while y >= 0 and self.map[y][x] == 0:
distance_available_vertical += 1
y -= 1
elif vertical_movement == -1:
# DOWN
while y < self.height and self.map[y][x] == 0:
distance_available_vertical += 1
y += 1
distance_available_horizontal = 0
x = current_x
y = current_y
if horizontal_movement == -1:
# LEFT
while x >= 0 and self.map[y][x] == 0:
distance_available_horizontal += 1
x -= 1
elif horizontal_movement == 1:
# RIGHT
while x < self.width and self.map[y][x] == 0:
distance_available_horizontal += 1
x += 1
return distance_available_vertical, distance_available_horizontal
# w: width of the building.
# h: height of the building.
w, h = [int(i) for i in input().split()]
n = int(input()) # maximum number of turns before game over.
x0, y0 = [int(i) for i in input().split()]
building = Building(w, h)
current_x = x0
current_y = y0
# game loop
while 1:
bomb_dir = input() # the direction of the bombs from batman's current location (U, UR, R, DR, D, DL, L or UL)
# choose direction
vertical_movement, horizontal_movement = building.find_movements(bomb_dir)
distance_available_vertical, distance_available_horizontal = \
building.find_distance_available(current_x, current_y, vertical_movement, horizontal_movement)
# calculate the distance to travel
building.mark_part_as_empty(current_x, current_y, bomb_dir)
current_x += horizontal_movement * (distance_available_horizontal // 2)
current_y -= vertical_movement * (distance_available_vertical // 2)
# Write an action using print
# To debug: print("Debug messages...", file=sys.stderr)
# the location of the next window Batman should jump to.
print(str(current_x) + " " + str(current_y))
| {
"repo_name": "Michal-Fularz/codingame_solutions",
"path": "codingame_solutions/medium/medium_Heat_Detector.py",
"copies": "1",
"size": "6224",
"license": "mit",
"hash": -8544830736182106000,
"line_mean": 34.7701149425,
"line_max": 114,
"alpha_frac": 0.5729434447,
"autogenerated": false,
"ratio": 3.399235390496996,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4472178835196996,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Amin'
# COMPLETED
# PYTHON 3.x
import sys
import math
import numpy as np
class Map:
def __init__(self, width, height):
self.width = width
self.height = height
self.graphical_representation = []
self.visited = np.zeros((self.height, self.width))
self.__init_from_input()
def __init_from_input(self):
for i in range(self.height):
row = []
for c in input():
row.append(c)
self.graphical_representation.append(row)
def __get_element(self, x, y):
return self.graphical_representation[y][x]
def __set_as_visited(self, x, y):
self.visited[y][x] = 1
def __is_visited(self, x, y):
if self.visited[y][x] == 1:
return True
else:
return False
def check_element_recursive(self, x, y):
lake_size = 0
if 0 <= x < self.width and 0 <= y < self.height:
if self.__get_element(x, y) == "O" and not self.__is_visited(x, y):
lake_size += 1
# set this cell as visited
self.__set_as_visited(x, y)
# check elements around
lake_size += self.check_element(x, y-1)
lake_size += self.check_element(x-1, y)
lake_size += self.check_element(x+1, y)
lake_size += self.check_element(x, y+1)
return lake_size
def check_element(self, x, y):
lake_size = 0
elements_to_check = [(x, y)]
while len(elements_to_check) > 0:
element_to_check = elements_to_check.pop(0)
x_to_check = element_to_check[0]
y_to_check = element_to_check[1]
# check first element from the list
if 0 <= x_to_check < self.width and 0 <= y_to_check < self.height:
if self.__get_element(x_to_check, y_to_check) == "O" and not self.__is_visited(x_to_check, y_to_check):
lake_size += 1
# set this cell as visited
self.__set_as_visited(x_to_check, y_to_check)
# add elements around to be checked
elements_to_check.append((x_to_check, y_to_check-1))
elements_to_check.append((x_to_check-1, y_to_check))
elements_to_check.append((x_to_check+1, y_to_check))
elements_to_check.append((x_to_check, y_to_check+1))
return lake_size
def clear_visited(self):
self.visited = np.zeros((self.height, self.width))
def set_visited(self):
for i in range(self.height):
for j in range(self.width):
if i == j:
self.visited[i][j] = True
def get_graphical_representation_as_string(self):
r = ""
for row in self.graphical_representation:
for c in row:
r += c
r += "\n"
return r
def get_visited_as_string(self):
r = ""
for row in self.visited:
for cell in row:
r += str(cell)
r += "\n"
return r
l = int(input())
h = int(input())
m = Map(l, h)
print("Map: \n" + m.get_graphical_representation_as_string(), file=sys.stderr)
print("Map: \n" + m.get_visited_as_string(), file=sys.stderr)
n = int(input())
for i in range(n):
x, y = [int(j) for j in input().split()]
lake_size = m.check_element(x, y)
m.clear_visited()
# Write an action using print
# To debug: print("Debug messages...", file=sys.stderr)
print(lake_size)
| {
"repo_name": "Michal-Fularz/codingame_solutions",
"path": "codingame_solutions/hard/hard_Surface.py",
"copies": "1",
"size": "3615",
"license": "mit",
"hash": -2990221293524052500,
"line_mean": 27.6904761905,
"line_max": 119,
"alpha_frac": 0.5172890733,
"autogenerated": false,
"ratio": 3.442857142857143,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9457481261286306,
"avg_score": 0.0005329909741674447,
"num_lines": 126
} |
__author__ = 'Amin'
# COMPLETED
# PYTHON 3.x
import sys
import math
calculations = []
n = int(input())
for i in range(n):
starting_day, duration = [int(j) for j in input().split()]
calculations.append((starting_day, duration))
calculations.sort(key=lambda tup: tup[0])
print(calculations, file=sys.stderr)
number_of_executed_calculations = 0
calculations_to_be_executed = []
planned_starting_day = 0
planned_end_day = 0
for calculation in calculations:
current_starting_day = calculation[0]
current_end_day = current_starting_day + calculation[1]
if current_starting_day >= planned_end_day:
number_of_executed_calculations += 1
calculations_to_be_executed.append((planned_starting_day, planned_end_day))
planned_starting_day = current_starting_day
planned_end_day = current_end_day
elif current_end_day <= planned_end_day:
planned_starting_day = current_starting_day
planned_end_day = current_end_day
# add last planned calculations to the list and remove first one (it is just 0, 0)
calculations_to_be_executed.append((planned_starting_day, planned_end_day))
calculations_to_be_executed = calculations_to_be_executed[1:]
# it is not needed to increase number_of_executed_calculations because one additional (0, 0) calculation is already in
print(calculations_to_be_executed, file=sys.stderr)
# Write an action using print
# To debug: print("Debug messages...", file=sys.stderr)
print(number_of_executed_calculations)
| {
"repo_name": "Michal-Fularz/codingame_solutions",
"path": "codingame_solutions/hard/hard_Super_Computer.py",
"copies": "1",
"size": "1502",
"license": "mit",
"hash": -1902912102325353500,
"line_mean": 30.9574468085,
"line_max": 118,
"alpha_frac": 0.7217043941,
"autogenerated": false,
"ratio": 3.330376940133038,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9548639172954274,
"avg_score": 0.0006884322557528523,
"num_lines": 47
} |
__author__ = 'Amin'
# COMPLETED
# PYTHON 3.x
import sys
import math
from codingame_solutions.very_hard.very_hard_The_Resistance_utils import load_from_file, load_from_input, load_from_prepared_data
class MorseDictionaryElement:
def __init__(self, sign="x", flag_holds_words=False, number=0):
self.sign = sign
self.level = number
self.next = []
self.flag_holds_words = flag_holds_words
self.words = []
def contains(self, sign):
for element in self.next:
if element.sign == sign:
return True
return False
# this is the similar to
# next(x for x in self.next if x.sign == sign)
def get_next(self, sign):
for element in self.next:
if element.sign == sign:
return element
print("Error, next not found!", file=sys.stderr)
return None
def __add_sign(self, sign):
if not self.contains(sign):
new_element = MorseDictionaryElement(sign, number=self.level+1)
self.next.append(new_element)
return new_element
else:
return self.get_next(sign)
def __fill_with_word(self, word):
self.flag_holds_words = True
self.words.append(word)
def add(self, word_in_morse, word):
current_element = self
for sign in word_in_morse:
current_element = current_element.__add_sign(sign)
current_element.__fill_with_word(word)
def count_elements(self):
# do not take into account the first element (root), that is equal to "x"
if self.sign != "." and self.sign != "-":
count = 0
else:
count = 1
for element in self.next:
count += element.count_elements()
return count
def count_words(self):
if self.flag_holds_words:
count = len(self.words)
else:
count = 0
for element in self.next:
count += element.count_words()
return count
def get_as_string(self, s=""):
s += str(self.level) + ": " + self.sign + ", "
if self.flag_holds_words:
s += ", ".join(self.words) + "\n"
elif self.sign != "." and self.sign != "-":
s += "\n"
for element in self.next:
s += element.get_as_string()
return s
def find_words(self, word_in_morse):
current_element = self
for sign in word_in_morse:
if not current_element.contains(sign):
return ""
else:
current_element = current_element.get_next(sign)
return current_element.words
def test_where_the_word_follows(self, word_in_morse):
flag_end_of_tree = False
current_element = self
positions = []
numbers_of_words = []
for i, sign in enumerate(word_in_morse):
if current_element.flag_holds_words:
# there are some words here, remember them and current index!
positions.append(i)
numbers_of_words.append(len(current_element.words))
if not current_element.contains(sign):
# this is the end of the tree, stop the loop and mark that there is nothing more to check
flag_end_of_tree = True
break
else:
current_element = current_element.get_next(sign)
# if we analyse all the signs in the word, but we do not finish traversing the tree check current element
if not flag_end_of_tree:
if current_element.flag_holds_words:
# there are some words here, remember them and current index!
positions.append(len(word_in_morse))
numbers_of_words.append(len(current_element.words))
return positions, numbers_of_words
def print_morse_dict_info(morse_dictionary):
print(morse_dictionary.get_as_string(), file=sys.stderr)
print("morse_dictionary.count_words(): " + str(morse_dictionary.count_words()), file=sys.stderr)
print("morse_dictionary.count_elements(): " + str(morse_dictionary.count_elements()), file=sys.stderr)
def generate_morse_dictionary(words, words_in_morse):
morse_dictionary = MorseDictionaryElement(number=0)
for word, word_in_morse in zip(words, words_in_morse):
morse_dictionary.add(word_in_morse, word)
return morse_dictionary
if __name__ == '__main__':
#message, words, words_in_morse = load_from_prepared_data()
message, words, words_in_morse = load_from_file("very_hard_The_Resistance/very_hard_The_Resistance_test_4.txt")
morse_dictionary = generate_morse_dictionary(words, words_in_morse)
print_morse_dict_info(morse_dictionary)
solutions = [0] * (len(message)+1)
solutions[0] = 1
for i, sol in enumerate(solutions):
if sol != 0:
# check the part of message starting from index i where it gets you in the tree
positions, numbers_of_words = morse_dictionary.test_where_the_word_follows(message[i:])
for position, number in zip(positions, numbers_of_words):
# if there is a need to find the word use the lines below:
#word = morse_dictionary.find_words(message[i:(i+position)])
#print(word, file=sys.stderr)
solutions[i+position] += sol * number
# Write an action using print
# To debug: print("Debug messages...", file=sys.stderr)
print(solutions[-1])
| {
"repo_name": "Michal-Fularz/codingame_solutions",
"path": "codingame_solutions/very_hard/very_hard_The_Resistance.py",
"copies": "1",
"size": "5550",
"license": "mit",
"hash": -1996902431697612500,
"line_mean": 30.8965517241,
"line_max": 129,
"alpha_frac": 0.5922522523,
"autogenerated": false,
"ratio": 3.78839590443686,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.488064815673686,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Amin'
# COMPLETED
# PYTHON 3.x
import sys
import math
class ContactManagerElement:
def __init__(self, number=-1):
self.digit = number
self.next = []
def contains(self, digit: int):
for element in self.next:
if element.digit == digit:
return True
return False
def add_digit(self, digit: int):
if not self.contains(digit):
new_element = ContactManagerElement(digit)
self.next.append(new_element)
return new_element
else:
return next(x for x in self.next if x.digit == digit)
def add(self, telephone_number: list):
current_element = self
for digit in telephone_number:
current_element = current_element.add_digit(digit)
def count_elements(self):
# do not take into account first element (root)
if self.digit == -1:
count = 0
else:
count = 1
for element in self.next:
count += element.count_elements()
return count
n = int(input())
contact_manager = ContactManagerElement()
for i in range(n):
telephone_number = [int(digit) for digit in input()]
print(telephone_number, file=sys.stderr)
contact_manager.add(telephone_number)
number_of_elements = contact_manager.count_elements()
# Write an action using print
# To debug: print("Debug messages...", file=sys.stderr)
# The number of elements (referencing a number) stored in the structure.
print(number_of_elements)
| {
"repo_name": "Michal-Fularz/codingame_solutions",
"path": "codingame_solutions/medium/medium_Telephone_Numbers.py",
"copies": "1",
"size": "1551",
"license": "mit",
"hash": -7588476389660458000,
"line_mean": 24.0161290323,
"line_max": 72,
"alpha_frac": 0.6144422953,
"autogenerated": false,
"ratio": 3.906801007556675,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5021243302856675,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Amin'
# COMPLETED
# PYTHON 3.x
import sys
import math
class MayanNumericalSystem:
def __init__(self, base=20):
self.base = base
self.numbers = []
for i in range(self.base):
self.numbers.append("")
self.l = 0
self.h = 0
self.__read_from_input()
def __read_from_input(self):
l, h = [int(i) for i in input().split()]
self.l = l
self.h = h
for i in range(h):
numeral = input()
for j in range(self.base):
self.numbers[j] += numeral[j*l:(j+1)*l]
def get_number_graphical_representation(self, number : int):
digits = []
while number >= self.base:
rest = number % self.base
number = number // self.base
digits.append(rest)
digits.append(number)
print("digits: " + str(digits), file=sys.stderr)
r = ""
for digit in reversed(digits):
r += self.get_digit_graphical_representation(digit)
return r
def get_digit_graphical_representation(self, digit : int):
r = ""
for i in range(self.h):
r += self.numbers[digit][i*self.l:(i+1)*self.l]
r += "\n"
return r
def get_value(self, graphical_representation : str):
for i, n in enumerate(self.numbers):
if graphical_representation == n:
return i
return -1
def get_number_from_input(self, number_of_lines_to_read : int):
number = 0
for i in range(number_of_lines_to_read // self.h - 1, -1, -1):
r = ""
for j in range(self.h):
r += input()
value = self.get_value(r)
print("value: " + str(value), file=sys.stderr)
number += value * pow(self.base, i)
return number
ns = MayanNumericalSystem()
for i in range(20):
print(ns.get_number_graphical_representation(i), file=sys.stderr)
s1 = int(input())
n1 = ns.get_number_from_input(s1)
s2 = int(input())
n2 = ns.get_number_from_input(s2)
operation = input()
print("n1: " + str(n1), file=sys.stderr)
print("n2: " + str(n2), file=sys.stderr)
print("operator: " + str(operation), file=sys.stderr)
result = -1
if operation == "+":
result = n1 + n2
elif operation == "-":
result = n1 - n2
elif operation == "*":
result = n1 * n2
elif operation == "/":
result = n1 // n2
print("result: " + str(result), file=sys.stderr)
# Write an action using print
# To debug: print("Debug messages...", file=sys.stderr)
print(ns.get_number_graphical_representation(result)[:-1])
| {
"repo_name": "Michal-Fularz/codingame_solutions",
"path": "codingame_solutions/medium/medium_Mayan_Calculation.py",
"copies": "1",
"size": "2646",
"license": "mit",
"hash": 1171109061022263000,
"line_mean": 23.0545454545,
"line_max": 70,
"alpha_frac": 0.5487528345,
"autogenerated": false,
"ratio": 3.3451327433628317,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9385416741917216,
"avg_score": 0.0016937671891232263,
"num_lines": 110
} |
__author__ = 'Amin'
# COMPLETED
# PYTHON 3.x
import sys
import math
class Player:
def __init__(self, number):
self.number = number
self.deck = []
def get_deck_as_text(self):
deck_as_text = ""
for card in self.deck:
deck_as_text += str(card)
deck_as_text += ", "
return deck_as_text[:-1]
def get_top_card(self):
return self.deck.pop(0)
def get_top_three_cards(self):
cards = []
for i in range(3):
cards.append(self.deck.pop(0))
return cards
def add_won_cards(self, my_cards, opponent_cards):
if len(my_cards) == len(opponent_cards):
self.deck += my_cards + opponent_cards
#for my_card, opponent_card in zip(my_cards, opponent_cards):
#self.deck.append(my_card)
#self.deck.append(opponent_card)
else:
print("Wrong number of cards from players!", file=sys.stderr)
def convert_card_name_to_value(card_name):
card_value = -1
if len(card_name) == 3:
# special case - 10
card_value = 10
else:
# card name format: XY
# X - one of 1/2/3/4/5/6/7/8/9/J/Q/K/A
# Y - one of D/H/C/S - this does not matter for card value
if card_name[0] == "2" or \
card_name[0] == "3" or \
card_name[0] == "4" or \
card_name[0] == "5" or \
card_name[0] == "6" or \
card_name[0] == "7" or \
card_name[0] == "8" or \
card_name[0] == "9":
card_value = int(card_name[0])
elif card_name[0] == "J":
card_value = 11
elif card_name[0] == "Q":
card_value = 12
elif card_name[0] == "K":
card_value = 13
elif card_name[0] == "A":
card_value = 14
else:
card_value = -2
print("Unrecognized card value!", file=sys.stderr)
return card_value
def play_war(p1, p2, used_cards_p1, used_cards_p2):
if len(p1.deck) < 4 or len(p2.deck) < 4:
return -1
used_cards_p1 += p1.get_top_three_cards()
used_cards_p2 += p2.get_top_three_cards()
card_from_p1 = p1.get_top_card()
card_from_p2 = p2.get_top_card()
used_cards_p1.append(card_from_p1)
used_cards_p2.append(card_from_p2)
if card_from_p1 > card_from_p2:
winner_of_war = 1
elif card_from_p1 < card_from_p2:
winner_of_war = 2
else:
winner_of_war = play_war(p1, p2, used_cards_p1, used_cards_p2)
return winner_of_war
player_1 = Player(1)
player_2 = Player(2)
n = int(input()) # the number of cards for player 1
for i in range(n): # the n cards of player 1
card_value = convert_card_name_to_value(input())
player_1.deck.append(card_value)
m = int(input()) # the number of cards for player 2
for i in range(m): # the m cards of player 2
card_value = convert_card_name_to_value(input())
player_2.deck.append(card_value)
flag_war_has_ended = False
number_of_rounds = 0
while not flag_war_has_ended:
#print(player_1.get_deck_as_text(), file=sys.stderr)
#print(player_2.get_deck_as_text(), file=sys.stderr)
# draw cards
card_from_player_1 = player_1.get_top_card()
card_from_player_2 = player_2.get_top_card()
# compare
used_cards_from_player_1 = []
used_cards_from_player_2 = []
used_cards_from_player_1.append(card_from_player_1)
used_cards_from_player_2.append(card_from_player_2)
winner = -1
if card_from_player_1 > card_from_player_2:
winner = 1
elif card_from_player_1 < card_from_player_2:
winner = 2
else:
# if a draw - play a sub-war
winner = play_war(player_1, player_2, used_cards_from_player_1, used_cards_from_player_2)
# distribute the winnings
if winner == 1:
player_1.add_won_cards(used_cards_from_player_1, used_cards_from_player_2)
elif winner == 2:
player_2.add_won_cards(used_cards_from_player_1, used_cards_from_player_2)
else:
# end of cards during the war - PAT
flag_war_has_ended = True
number_of_rounds += 1
print("Winner of round " + str(number_of_rounds) + ": " + str(winner) + ", P1 number of cards " + str(len(player_1.deck)) + ", P2 number of cards " + str(len(player_2.deck)), file=sys.stderr)
# check if game should be continued
if len(player_1.deck) == 0 or len(player_2.deck) == 0:
flag_war_has_ended = True
if len(player_1.deck) == 0:
r = "2 " + str(number_of_rounds)
elif len(player_2.deck) == 0:
r = "1 " + str(number_of_rounds)
else:
r = "PAT"
# Write an action using print
# To debug: print >> sys.stderr, "Debug messages..."
print(r)
| {
"repo_name": "Michal-Fularz/codingame_solutions",
"path": "codingame_solutions/medium/medium_Winamax_Sponsored_Challenge.py",
"copies": "1",
"size": "4776",
"license": "mit",
"hash": 559992022288150800,
"line_mean": 27.9454545455,
"line_max": 195,
"alpha_frac": 0.5649078727,
"autogenerated": false,
"ratio": 2.939076923076923,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8997753440457422,
"avg_score": 0.0012462710639001618,
"num_lines": 165
} |
__author__ = 'Amin'
# COMPLETED
# PYTHON 3.x
import sys
import math
class Word:
def __init__(self, text: str):
self.__number_of_letters = ord("z") - ord("a") + 1
# self.__alphabet_1_point = ["e", "a", "i", "o", "n", "r", "t", "l", "s", "u"]
# self.__alphabet_2_point = ["d", "g"]
# self.__alphabet_3_point = ["b", "c", "m", "p"]
# self.__alphabet_4_point = ["f", "h", "v", "w", "y"]
# self.__alphabet_5_point = ["k"]
# self.__alphabet_8_point = ["j", "x"]
# self.__alphabet_10_point = ["q", "z"]
# self.alphabets = [
# (self.__alphabet_1_point, 1),
# (self.__alphabet_2_point, 2),
# (self.__alphabet_3_point, 3),
# (self.__alphabet_4_point, 4),
# (self.__alphabet_5_point, 5),
# (self.__alphabet_8_point, 8),
# (self.__alphabet_10_point, 10),
# ]
self.letters = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
self.values = [ 1, 3 , 3, 2, 1, 4, 2, 4, 1, 8, 5, 1, 3, 1, 1, 3, 10, 1, 1, 1, 1, 4, 4, 8, 4, 10]
self.alphabet = [0] * self.__number_of_letters
self.original = text
for letter in text:
index = self.get_letter_index(letter)
self.alphabet[index] += 1
def get_letter_index(self, letter):
return ord(letter) - ord("a")
def compare(self, w):
score = 0
for letter_count_1, letter_count_2, value in zip(self.alphabet, w.alphabet, self.values):
occurences_in_both_words = 0
if letter_count_1 < letter_count_2:
occurences_in_both_words = letter_count_1
else:
occurences_in_both_words = letter_count_2
score += occurences_in_both_words * value
if not self.__check_if_subset(w):
score = 0
return score
def __check_if_subset(self, w):
flag_subset = True
for letter_count_1, letter_count_2 in zip(self.alphabet, w.alphabet):
if letter_count_1 < letter_count_2:
flag_subset = False
return flag_subset
# w1 = Word("arwtsre")
# w2 = Word("arrest")
# w3 = Word("waster")
#
# print(w1.compare(w2))
# print(w1.compare(w3))
dictionary = []
n = int(input())
for i in range(n):
w = input()
if len(w) <= 7:
dictionary.append(Word(w))
letters = input()
my_word = Word(letters)
best_score = 0
best_word = ""
for word in dictionary:
score = my_word.compare(word)
if score > best_score:
best_score = score
best_word = word
# Write an action using print
# To debug: print("Debug messages...", file=sys.stderr)
print(best_word.original)
| {
"repo_name": "Michal-Fularz/codingame_solutions",
"path": "codingame_solutions/medium/medium_Scrabble.py",
"copies": "1",
"size": "2839",
"license": "mit",
"hash": 6754119895113370000,
"line_mean": 28.5729166667,
"line_max": 153,
"alpha_frac": 0.4938358577,
"autogenerated": false,
"ratio": 2.9207818930041154,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8908541403465899,
"avg_score": 0.0012152694476432163,
"num_lines": 96
} |
__author__ = 'Amin'
# COMPLETED
# PYTHON 3.x
import sys
import math
def get_count_of_first_number(elements):
count = 1
number_to_look_for = elements[0]
for number in elements[1:]:
if number_to_look_for == number:
count += 1
else:
break
return number_to_look_for, count
r = int(input())
l = int(input())
current_line = [r]
new_line = []
for i in range(l-1):
k = 0
new_line = []
print("Current line: " + str(current_line), file=sys.stderr)
while k < len(current_line):
number, count = get_count_of_first_number(current_line[k:])
print("Processed fragment: " + str(current_line[k:]), file=sys.stderr)
print("Number: " + str(number) + ", count: " + str(count), file=sys.stderr)
new_line.append(count)
new_line.append(number)
k += count
current_line = new_line
# Write an action using print
# To debug: print("Debug messages...", file=sys.stderr)
result = ""
for value in current_line:
result += str(value) + " "
print(result[:-1])
| {
"repo_name": "Michal-Fularz/codingame_solutions",
"path": "codingame_solutions/medium/medium_Conway_Sequence.py",
"copies": "1",
"size": "1068",
"license": "mit",
"hash": -1334255756115997400,
"line_mean": 20.7959183673,
"line_max": 83,
"alpha_frac": 0.5889513109,
"autogenerated": false,
"ratio": 3.1597633136094676,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9241497451926148,
"avg_score": 0.0014434345166638083,
"num_lines": 49
} |
__author__ = 'Amin'
# COMPLETED
# PYTHON 3.x
import sys
import math
def o_1(n: int):
return 1
def o_log_n(n: int):
return math.log2(n)
def o_n(n: int):
return n
def o_n_log_n(n: int):
return n * math.log2(n)
def o_n_2(n: int):
return n * n
def o_n_2_log_n(n: int):
return n * n * math.log2(n)
def o_n_3(n: int):
return n * n * n
def o_2_n(n: int):
return math.pow(2, n)
def normalize_difference(difference: float, n: int):
normalized_difference = difference / n
normalized_difference = abs(1 - normalized_difference)
return normalized_difference
number_of_elements = int(input())
times = []
nums = []
for i in range(number_of_elements):
num, t = [int(j) for j in input().split()]
times.append(t)
nums.append(num)
# calculate differences
differences = [0]*8
for t_previous, t, num_previous, num in zip(times[:-1], times[1:], nums[:-1], nums[1:]):
t_quotient = t / t_previous
t_quotient_o_1 = o_1(num) / o_1(num_previous)
t_quotient_o_log_n = o_log_n(num) / o_log_n(num_previous)
t_quotient_o_n = o_n(num) / o_n(num_previous)
t_quotient_o_n_log_n = o_n_log_n(num) / o_n_log_n(num_previous)
t_quotient_o_n_2 = o_n_2(num) / o_n_2(num_previous)
t_quotient_o_n_2_log_n = o_n_2_log_n(num) / o_n_2_log_n(num_previous)
t_quotient_o_n_3 = o_n_3(num) / o_n_3(num_previous)
#t_quotient_o_2_n = o_2_n(num) / o_2_n(num_previous)
differences[0] += t_quotient / t_quotient_o_1
differences[1] += t_quotient / t_quotient_o_log_n
differences[2] += t_quotient / t_quotient_o_n
differences[3] += t_quotient / t_quotient_o_n_log_n
differences[4] += t_quotient / t_quotient_o_n_2
differences[5] += t_quotient / t_quotient_o_n_2_log_n
differences[6] += t_quotient / t_quotient_o_n_3
#differences[7] += t_quotient / t_quotient_o_2_n
# this can be somehow tricky (it was for me, when I was writing this)
# in short it changes each element of the list using the function provided
# it can be done without the function, it is used for readability
differences[:] = [normalize_difference(diff, number_of_elements-1) for diff in differences]
for diff in differences:
print(str(diff), file=sys.stderr)
minimal_difference = min(differences)
index_of_minimal_difference = differences.index(minimal_difference)
print("Minimal difference: " + str(minimal_difference), file=sys.stderr)
print("Index: " + str(index_of_minimal_difference), file=sys.stderr)
# IMPORTANT: as calculating 2^n is impossible for most of the cases (overflow errors), we use a special trick
if minimal_difference > 0.1:
index_of_minimal_difference = 7
# Write an action using print
# To debug: print("Debug messages...", file=sys.stderr)
r = ""
if index_of_minimal_difference == 0:
r = "O(1)"
elif index_of_minimal_difference == 1:
r = "O(log n)"
elif index_of_minimal_difference == 2:
r = "O(n)"
elif index_of_minimal_difference == 3:
r = "O(n log n)"
elif index_of_minimal_difference == 4:
r = "O(n^2)"
elif index_of_minimal_difference == 5:
r = "O(n^2 log n)"
elif index_of_minimal_difference == 6:
r = "O(n^3)"
elif index_of_minimal_difference == 7:
r = "O(2^n)"
else:
print("Wrong index!", file=sys.stderr)
print(r)
| {
"repo_name": "Michal-Fularz/codingame_solutions",
"path": "codingame_solutions/hard/hard_Bender_Algorithmic_Complexity.py",
"copies": "1",
"size": "3259",
"license": "mit",
"hash": -5894315202620934000,
"line_mean": 26.1583333333,
"line_max": 109,
"alpha_frac": 0.6452899662,
"autogenerated": false,
"ratio": 2.6647587898609975,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8806445583641406,
"avg_score": 0.0007206344839183896,
"num_lines": 120
} |
__author__ = 'Amin'
# COMPLETED
# PYTHON 3.x
import sys
import math
n = int(input())
vs = input()
stock_values = []
for value in vs.split(" "):
stock_values.append(int(value))
start_value = stock_values[0]
difference = 0
global_difference = 0
trend = 0
for stock_value in stock_values:
if trend == 0:
if stock_value < start_value:
# downward trend
trend = -1
difference = stock_value - start_value
else:
# nothing changes, update starting value
start_value = stock_value
elif trend == -1:
if stock_value < start_value:
current_difference = (stock_value - start_value)
if current_difference < difference:
difference = current_difference
elif stock_value > start_value:
trend = 0
start_value = stock_value
if difference < global_difference:
global_difference = difference
# additional check in case all values were in downward trend
if difference < global_difference:
global_difference = difference
# Write an action using print
# To debug: print("Debug messages...", file=sys.stderr)
print(stock_values, file=sys.stderr)
print(global_difference)
| {
"repo_name": "Michal-Fularz/codingame_solutions",
"path": "codingame_solutions/medium/medium_Stock_Exchange_Losses.py",
"copies": "1",
"size": "1256",
"license": "mit",
"hash": -3720164446917385000,
"line_mean": 23.6274509804,
"line_max": 60,
"alpha_frac": 0.6178343949,
"autogenerated": false,
"ratio": 3.888544891640867,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5006379286540867,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Amin'
# COMPLETED
# PYTHON 3.x
import sys
import math
n = int(input())
c = int(input())
budgets = []
for i in range(n):
b = int(input())
budgets.append(b)
contributions = []
result = ""
if sum(budgets) < c:
result = "IMPOSSIBLE"
else:
budgets.sort()
flag_still_searching = True
gift_value_to_pay = c
while flag_still_searching:
print("Number of persons: " + str(len(budgets)), file=sys.stderr)
print("Remaining contribution: " + str(gift_value_to_pay), file=sys.stderr)
# calculate average sum to pay for each person
avg_pay_float = gift_value_to_pay / len(budgets)
# special check to find if it has fractional part
avg_pay = int(avg_pay_float)
if avg_pay_float - int(avg_pay_float) > 0:
avg_pay += 1
flag_everybody_has_enough = True
for b in budgets:
if b < avg_pay:
flag_everybody_has_enough = False
if flag_everybody_has_enough:
# TODO: change this to take into account that some fractional parts exists
rest = gift_value_to_pay - len(budgets)*int(avg_pay_float)
# add average pay for each remaining person but last
for i in range(len(budgets)-rest):
contributions.append(int(avg_pay_float))
for i in range(rest):
contributions.append(int(avg_pay_float)+1)
flag_still_searching = False
else:
# remove the poorest guy and substitute his contribution from overall cost
smallest_contribution = budgets[0]
budgets.pop(0)
gift_value_to_pay -= smallest_contribution
contributions.append(smallest_contribution)
r = ""
for contrib in contributions:
r += str(contrib) + "\n"
result = r[:-1]
# Write an action using print
# To debug: print("Debug messages...", file=sys.stderr)
print(result)
| {
"repo_name": "Michal-Fularz/codingame_solutions",
"path": "codingame_solutions/medium/medium_The_Gift.py",
"copies": "1",
"size": "1958",
"license": "mit",
"hash": 3777576429674442000,
"line_mean": 25.8219178082,
"line_max": 86,
"alpha_frac": 0.595505618,
"autogenerated": false,
"ratio": 3.5215827338129495,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46170883518129496,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Amin'
# COMPLETED
# PYTHON 3.x
import sys
import math
n = int(input())
whole_file = ""
for i in range(n):
cgxline = input()
whole_file += cgxline
whole_file = whole_file.strip()
r = ""
flag_string = False
intend = 0
for c in whole_file:
# register strings
if c == "'" and not flag_string:
flag_string = True
r += c
elif c == "'" and flag_string:
flag_string = False
r += c
elif flag_string:
r += c
elif not flag_string:
if c == " " or c == "\t":
# ignore whitespace while not in string
pass
# special case of bracket after equal sign
elif c == "(" and len(r) > 0 and r[-1] == "=":
r += "\n"
for i in range(intend):
r += " "
r += c + "\n"
intend += 1
for i in range(intend):
r += " "
elif c == "(":
r += c + "\n"
intend += 1
for i in range(intend):
r += " "
elif c == ")":
# special case for empty parts of left bracket, right bracket
last_bracket_index = str.rfind(r, "(")
text_from_last_bracket = r[last_bracket_index:].strip(" ")
if text_from_last_bracket == "(\n":
r = r[:last_bracket_index+1]
r += "\n"
intend -= 1
for i in range(intend):
r += " "
r += c
elif c == ";":
r += c + "\n"
for i in range(intend):
r += " "
else:
r += c
# Write an action using print
# To debug: print("Debug messages...", file=sys.stderr)
print(r)
| {
"repo_name": "Michal-Fularz/codingame_solutions",
"path": "codingame_solutions/hard/hard_CGX_Formatter.py",
"copies": "1",
"size": "1740",
"license": "mit",
"hash": -2264450889739075000,
"line_mean": 23.1666666667,
"line_max": 73,
"alpha_frac": 0.4298850575,
"autogenerated": false,
"ratio": 3.6554621848739495,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45853472423739494,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Amin'
# COMPLETED
# PYTHON 3.x
import sys
import math
# Weber problem
# http://www.matstos.pjwstk.edu.pl/no10/no10_mlodak.pdf
# https://en.wikipedia.org/wiki/Weber_problem
# https://en.wikipedia.org/wiki/Geometric_median
# simple solution:
# find the median point and that is all!
homes = []
n = int(input())
for i in range(n):
x, y = [int(j) for j in input().split()]
homes.append((x, y))
max_x = max(homes, key=lambda tup: tup[0])
min_x = min(homes, key=lambda tup: tup[0])
max_y = max(homes, key=lambda tup: tup[1])
min_y = min(homes, key=lambda tup: tup[1])
print("Max x: " + str(max_x), file=sys.stderr)
print("Min x: " + str(min_x), file=sys.stderr)
print("Max y: " + str(max_y), file=sys.stderr)
print("Min y: " + str(min_y), file=sys.stderr)
print("Sum y: " + str(sum(abs(y) for x, y in homes)), file=sys.stderr)
sorted_by_y = sorted(homes, key=lambda tup: tup[1])
print("Sorted y: " + str(sorted_by_y), file=sys.stderr)
median = 0
if n % 2 == 1:
median = sorted_by_y[n//2]
else:
median = ((sorted_by_y[n//2][0] + sorted_by_y[(n+1)//2][0])/2, (sorted_by_y[n//2][1] + sorted_by_y[(n+1)//2][1])/2)
print("Median: " + str(median), file=sys.stderr)
median_y = median[1]
distance = 0
for x, y in sorted_by_y:
distance += abs(y - median_y)
distance += abs(max_x[0] - min_x[0])
print("Distance: " + str(distance), file=sys.stderr)
# Write an action using print
# To debug: print >> sys.stderr, "Debug messages..."
print(int(distance))
| {
"repo_name": "Michal-Fularz/codingame_solutions",
"path": "codingame_solutions/medium/medium_Network_Cabling.py",
"copies": "1",
"size": "1486",
"license": "mit",
"hash": -1078594770539370100,
"line_mean": 24.6206896552,
"line_max": 119,
"alpha_frac": 0.6298788694,
"autogenerated": false,
"ratio": 2.4320785597381342,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.35619574291381345,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Amin'
from collections import deque, namedtuple
GraphEdge = namedtuple("GraphEdge", "starting_vertex, ending_vertex, distance")
EdgeProperties = namedtuple("EdgeProperties", "destitantion_vertex, distance")
class Graph(object):
"""
A simple Python graph class, demonstrating the essential facts and functionalities of graphs.
Based on:
http://www.python-course.eu/graphs_python.php
and:
https://www.python.org/doc/essays/graphs/
"""
def __init__(self, graph_dict={}, flag_dag=False):
"""
:param graph_dict:
:param flag_dag: flag indicating if the graph is a Directed Acyclic Graph
:return:
"""
self.flag_DAG = flag_dag
""" initializes a graph object """
self.__graph_dict = graph_dict
self.additional_info_dict = {}
def vertices(self):
""" returns the vertices of a graph """
return list(self.__graph_dict.keys())
def edges(self):
""" returns the edges of a graph """
return self.__generate_edges()
def add_vertex(self, vertex, additional_info=None):
""" If the vertex "vertex" is not in
self.__graph_dict, a key "vertex" with an empty
list as a value is added to the dictionary.
Otherwise nothing has to be done.
"""
if vertex not in self.__graph_dict:
self.__graph_dict[vertex] = []
self.additional_info_dict[vertex] = additional_info
def add_edge(self, edge: GraphEdge):
""" between two vertices can be multiple edges!
"""
if edge.starting_vertex in self.__graph_dict:
self.__graph_dict[edge.starting_vertex].append(EdgeProperties(edge.ending_vertex, edge.distance))
else:
self.__graph_dict[edge.ending_vertex] = [EdgeProperties(edge.ending_vertex, edge.distance)]
if edge.ending_vertex not in self.__graph_dict:
self.__graph_dict[edge.ending_vertex] = []
def __generate_edges(self):
""" A private method generating the edges of the
graph. Edges are represented as GraphEdge namedtuple
"""
edges = []
for vertex in self.__graph_dict:
for neighbour in self.__graph_dict[vertex]:
if {neighbour, vertex} not in edges:
edges.append(GraphEdge(vertex, neighbour.destitantion_vertex, neighbour.distance))
return edges
def find_path(self, start, end, path=[]):
path = path + [start]
if start == end:
return path
if start not in self.__graph_dict:
return None
for node in self.__graph_dict[start]:
if node not in path:
new_path = self.find_path(node, end, path)
if new_path:
return new_path
return None
def find_all_paths(self, start, end, path=[]):
path = path + [start]
if start == end:
return [path]
if start not in self.__graph_dict:
return []
paths = []
for node in self.__graph_dict[start]:
if node not in path:
new_paths = self.find_all_paths(node, end, path)
for new_path in new_paths:
paths.append(new_path)
return paths
def find_shortest_path(self, start, end, path=[]):
path = path + [start]
if start == end:
return path
if start not in self.__graph_dict:
return None
shortest = None
for node in self.__graph_dict[start]:
if node not in path:
new_path = self.find_shortest_path(node, end, path)
if new_path:
if not shortest or len(new_path) < len(shortest):
shortest = new_path
return shortest
def find_longest_path(self, start, end, path=[]):
path = path + [start]
if start == end:
return path
if start not in self.__graph_dict:
return None
longest = None
for node in self.__graph_dict[start]:
if node not in path:
new_path = self.find_longest_path(node, end, path)
if new_path:
if not longest or len(new_path) > len(longest):
longest = new_path
return longest
def find_all_paths_from_vertex(self, start, path=[]):
""" This only works for graphs that are more like
a tree - edges are one direction only and there
are no loops at all
It means that a graph is DAG
"""
if self.flag_DAG:
path = path + [start]
if start not in self.__graph_dict:
return None
# additional stop condition to finish when in leaf
if len(path) > 1 and len(self.__graph_dict[start]) == 1:
return [path]
paths = []
for node in self.__graph_dict[start]:
if node not in path:
new_paths = self.find_all_paths_from_vertex(node, path)
for new_path in new_paths:
paths.append(new_path)
return paths
else:
raise ValueError("This method only works for DAG!")
def __topological_sort_util(self, key, visited, stack):
visited[key] = True
for neighbour in self.__graph_dict[key]:
if not visited[neighbour.destitantion_vertex]:
self.__topological_sort_util(neighbour.destitantion_vertex, visited, stack)
stack.appendleft(key)
def topological_sort(self) -> deque:
"""
Based on:
http://www.geeksforgeeks.org/topological-sorting/
:return:
"""
if self.flag_DAG:
stack = deque(maxlen=len(self.__graph_dict))
visited = {}
for key in self.__graph_dict:
visited[key] = False
for key in self.__graph_dict:
if not visited[key]:
self.__topological_sort_util(key, visited, stack)
return stack
else:
raise ValueError("This method only works for DAG!")
def find_longest_path(self, starting_vertex):
"""
Based on:
http://www.geeksforgeeks.org/find-longest-path-directed-acyclic-graph/
The method find longest distances from a given vertex. It uses
recursive topological_sort() to get topological sorting.
:param starting_vertex:
:return:
"""
if self.flag_DAG:
stack = self.topological_sort()
min_distance = -1
distances = {}
for key in self.__graph_dict:
distances[key] = min_distance
distances[starting_vertex] = 0
while len(stack) > 0:
vertex = stack.popleft()
if distances[vertex] != min_distance:
for neighbour in self.__graph_dict[vertex]:
n_name = neighbour.destitantion_vertex
n_dist = neighbour.distance
if distances[n_name] < distances[vertex] + n_dist:
distances[n_name] = distances[vertex] + n_dist
return distances
else:
raise ValueError("This method only works for DAG!")
def get_vertices_with_n_edges(self, n):
vertices_with_n_edges = []
for key, values in self.__graph_dict.items():
if len(values) == n:
vertices_with_n_edges.append(key)
return vertices_with_n_edges
def __str__(self):
res = "vertices: "
for k in self.__graph_dict:
res += str(k) + " "
res += "\n"
res += "edges: "
for edge in self.__generate_edges():
res += str(edge) + " "
return res
if __name__ == "__main__":
# TODO: use unit test to test this module
# g = { "a" : ["d", "e"],
# "b" : ["c"],
# "c" : ["b", "c", "d", "e"],
# "d" : ["a", "c"],
# "e" : ["c", "b"],
# "f" : []
# }
#
# graph = Graph(g)
#
# print("Vertices of graph:")
# print(graph.vertices())
#
# print("Edges of graph:")
# print(graph.edges())
#
# print("Add vertex:")
# graph.add_vertex("z")
#
# print("Vertices of graph:")
# print(graph.vertices())
#
# print("Add an edge:")
# graph.add_edge({"a", "z"})
#
# print("Vertices of graph:")
# print(graph.vertices())
#
# print("Edges of graph:")
# print(graph.edges())
#
# print('Adding an edge {"x","y"} with new vertices:')
# graph.add_edge({"x", "y"})
# print("Vertices of graph:")
# print(graph.vertices())
# print("Edges of graph:")
# print(graph.edges())
#
# print("Find path between a and b:")
# print(graph.find_path("a", "b"))
#
# print("Find all paths between a and b:")
# print(graph.find_all_paths("a", "b"))
#
# print("Find shortest path between a and b:")
# print(graph.find_shortest_path("a", "b"))
#
# print("Find logest path between a and b:")
# print(graph.find_longest_path("a", "b"))
#
# g = { "0" : ["1"],
# "1" : ["0", "2"],
# "2" : ["1", "3", "4"],
# "3" : ["2"],
# "4" : ["2"],
# }
#
# graph = Graph(g)
graph = Graph()
graph.add_edge(("0", "1"))
graph.add_edge(("1", "0"))
graph.add_edge(("1", "2"))
graph.add_edge(("2", "1"))
graph.add_edge(("2", "3"))
graph.add_edge(("3", "2"))
graph.add_edge(("2", "4"))
graph.add_edge(("4", "2"))
print(graph)
print("Find all paths from 0:")
print(graph.find_all_paths_from_vertex("0"))
print("Vertices with 1 neighbour:")
vertices_with_1_edge = graph.get_vertices_with_n_edges(1)
print(vertices_with_1_edge)
print(vertices_with_1_edge[1])
| {
"repo_name": "Michal-Fularz/codingame_solutions",
"path": "codingame_solutions/utilities/graph.py",
"copies": "1",
"size": "10083",
"license": "mit",
"hash": 633989918754274300,
"line_mean": 31.0095238095,
"line_max": 109,
"alpha_frac": 0.5234553208,
"autogenerated": false,
"ratio": 3.915728155339806,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49391834761398057,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Amin'
from os import listdir
from os.path import isfile, join, exists
import cv2
import pickle
from lxml import etree
class Utils:
def __init__(self):
# IMPORTANT - PARAMETERS
# there is a problem with images that are bigger than screen resolution
# they are resized by this parameters
# change to 1.0 if not resizing is needed
# self.scale_x = 1.0
# self.scale_y = 1.0
self.scale_x = 0.8
self.scale_y = 0.8
# choose the part of the image that should be used
# put (0, 0) and (width, height) if you want whole image
# self.roi_top_left_x = 698
# self.roi_top_left_y = 650
# self.roi_bottom_right_x = 1375 * self.scale_x
# self.roi_bottom_right_y = 1150 * self.scale_y
self.roi_top_left_x = 0
self.roi_top_left_y = 0
self.roi_bottom_right_x = 1600 * self.scale_x
self.roi_bottom_right_y = 1200 * self.scale_y
# set the directories (relative or not) where the dataset is and where the descriptions should be placed
# all of this directories have to exist
#self.dataset_name = "dataset_7"
self.dataset_name = "processed_6"
self.path_to_description = "description/" + self.dataset_name + "/"
self.path_to_images = "datasets/" + self.dataset_name + "/"
self.image_counter_start = 900
self.image_counter_stop = self.image_counter_start + 630#360#630
# END OF PARAMETERS LIST
# CONSTANTS
self.font = cv2.FONT_HERSHEY_SIMPLEX
def load_and_resize_image(self, filename):
img_original = cv2.imread(self.path_to_images + filename, cv2.IMREAD_COLOR)
img_original_resized = cv2.resize(img_original, (0, 0), None,
fx=self.scale_x, fy=self.scale_y,
interpolation=cv2.INTER_NEAREST
)
img_original_resized_roi = img_original_resized[
self.roi_top_left_y:self.roi_bottom_right_y,
self.roi_top_left_x:self.roi_bottom_right_x
]
return img_original_resized_roi
def create_video(self):
# INITIALIZATION
window_name = "image"
cv2.namedWindow(window_name)
files = [f for f in listdir(self.path_to_images) if isfile(join(self.path_to_images, f))]
img = self.load_and_resize_image(files[self.image_counter_start])
height, width, layers = img.shape
video = cv2.VideoWriter("video\\" + self.dataset_name + ".avi", -1, 10, (width, height))
for image_counter in range(self.image_counter_start, self.image_counter_stop, 1):
img = self.load_and_resize_image(files[image_counter])
file_to_open = self.path_to_description + str(image_counter) + "_" + files[image_counter] + ".pickle"
print file_to_open
if exists(file_to_open):
with open(file_to_open, "rb") as f:
objects_to_draw = pickle.load(f)
for object_to_draw in objects_to_draw:
object_to_draw.draw(img, self.font)
cv2.imshow(window_name, img)
cv2.waitKey(20)
video.write(img)
cv2.destroyAllWindows()
video.release()
def prepare_roi(self):
path_to_roi_images = "datasets/" + self.dataset_name + "_roi/"
files = [f for f in listdir(self.path_to_images) if isfile(join(self.path_to_images, f))]
for image_counter in range(self.image_counter_start, self.image_counter_stop, 1):
img = self.load_and_resize_image(files[image_counter])
cv2.imwrite(path_to_roi_images + files[image_counter] + ".png", img)
def prepare_xml(self):
files = [f for f in listdir(self.path_to_description) if isfile(join(self.path_to_description, f))]
xml_name = "PUT_Surveillance_database_sequence_3"
# Create the root element
page = etree.Element(xml_name)
# Make a new document tree
doc = etree.ElementTree(page)
# For multiple multiple attributes, use as shown above
for filename in files:
# Add the subelements
pageElement = etree.SubElement(page, 'Image',
name=filename)
with open(self.path_to_description + filename, "rb") as f:
objects_to_draw = pickle.load(f)
for object_to_draw in objects_to_draw:
top_left_x = int(round(object_to_draw.point_top_left.x * 1/self.scale_x, 0))
top_left_y = int(round(object_to_draw.point_top_left.y * 1/self.scale_y, 0))
bottom_right_x = int(round(object_to_draw.point_bottom_right.x * 1/self.scale_x, 0))
bottom_right_y = int(round(object_to_draw.point_bottom_right.y * 1/self.scale_y, 0))
if object_to_draw.type != "hidden":
etree.SubElement(pageElement, 'Object',
type=object_to_draw.type,
#center_of_gravity_x=str(object_to_draw.centre.x),
#center_of_gravity_y=str(object_to_draw.centre.y),
minimal_bounding_box_top_left_x=str(top_left_x),
minimal_bounding_box_top_left_y=str(top_left_y),
minimal_bounding_box_bottom_right_x=str(bottom_right_x),
minimal_bounding_box_bottom_right_y=str(bottom_right_y)
)
# Save to XML file
outFile = open("xml/" + xml_name + '.xml', 'w')
doc.write(outFile, pretty_print=True, xml_declaration=True, encoding='utf-8')
if __name__ == "__main__":
u = Utils()
#u.create_video()
u.prepare_xml()
#u.prepare_roi()
| {
"repo_name": "Michal-Fularz/database_marking_tool",
"path": "Utils.py",
"copies": "1",
"size": "6086",
"license": "mit",
"hash": -1996747124815045600,
"line_mean": 39.8456375839,
"line_max": 113,
"alpha_frac": 0.5504436411,
"autogenerated": false,
"ratio": 3.6684749849306812,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47189186260306815,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Amin'
import cv2
from os import listdir
from os.path import isfile, join, exists
import pickle
from ObjectInformation import ObjectInformation
from MouseButton import MouseButton
import copy
# GLOBALS - required for OpenCV mouse callback
right_button = MouseButton()
left_button = MouseButton()
new_object = None
current_object = None
objects_to_draw = []
# mouse callback function
# left key is used for moving objects
# right key is used for creating new objects
def mouse_callback(event, x, y, flags, param):
global current_object, new_object, objects_to_draw, right_button, left_button
# right mouse button
if event == cv2.EVENT_RBUTTONDOWN:
right_button.callback_pressed(x, y)
new_object = ObjectInformation()
new_object.init(x, y, x+1, y+1, len(objects_to_draw))
objects_to_draw.append(new_object)
elif event == cv2.EVENT_RBUTTONUP:
right_button.callback_released(x, y)
new_object.finish(x, y)
# mouse movement
elif event == cv2.EVENT_MOUSEMOVE:
if right_button.is_pressed:
right_button.callback_moved(x, y)
new_object.resize(x, y)
if left_button.is_pressed:
if current_object is not None and current_object.selected:
dx = (x-left_button.get_previous_location_x())
dy = (y-left_button.get_previous_location_y())
current_object.move(dx, dy)
left_button.callback_moved(x, y)
# left mouse button
elif event == cv2.EVENT_LBUTTONDOWN:
print x, y
left_button.callback_pressed(x, y)
for object_to_draw in objects_to_draw:
object_to_draw.unselect()
for object_to_draw in objects_to_draw:
if object_to_draw.check_if_inside(x, y):
current_object = object_to_draw
current_object.select(x, y)
break
elif event == cv2.EVENT_LBUTTONUP:
left_button.callback_released(x, y)
def select_object_below(objects):
object_below = None
flag_start_checking = False
selected_object = None
for o in objects:
if flag_start_checking:
if o.check_if_inside(
selected_object.point_of_selection.x,
selected_object.point_of_selection.y
):
selected_object.unselect()
object_below = o
object_below.select(selected_object.point_of_selection.x, selected_object.point_of_selection.y)
break
if o.selected:
flag_start_checking = True
selected_object = o
return object_below
if __name__ == "__main__":
# IMPORTANT - PARAMETERS
# there is a problem with images that are bigger than screen resolution
# they are resized by this parameters
# change to 1.0 if not resizing is needed
scale_x = 0.8#1.0#0.8
scale_y = 0.8#1.0#0.8
# choose the part of the image that should be used
# put (0, 0) and (width, height) if you want whole image
roi_top_left_x = 0#698
roi_top_left_y = 0#650
roi_bottom_right_x = 1600 * scale_x#1375
roi_bottom_right_y = 1200 * scale_y#1150
# set the directories (relative or not) where the dataset is and where the descriptions should be placed
# all of this directories have to exist
dataset_name = "processed_5"#"dataset_7"
path_to_description = "description/" + dataset_name + "/"
path_to_images = "datasets/" + dataset_name + "/"
# END OF PARAMETERS LIST
# CONSTANTS
font = cv2.FONT_HERSHEY_SIMPLEX
window_name = "image"
# INITIALIZATION
image_counter = 240#900#0
flag_auto_load = False
files = [f for f in listdir(path_to_images) if isfile(join(path_to_images, f))]
cv2.namedWindow(window_name)
cv2.setMouseCallback(window_name, mouse_callback)
#img = cv2.imread("Desert.jpg", cv2.IMREAD_COLOR)
#img_original = np.zeros((512, 512, 3), np.uint8)
img_original = cv2.imread(path_to_images + files[image_counter], cv2.IMREAD_COLOR)
img_original_resized = cv2.resize(img_original, (0, 0), None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_NEAREST)
img_original_resized_roi = img_original_resized[roi_top_left_y:roi_bottom_right_y, roi_top_left_x:roi_bottom_right_x]
object_6_to_copy = None
object_9_to_copy = None
key = 0
while key != 27:
if current_object is not None:
# move selected object
if key == ord('w'):
current_object.move(0, -1)
elif key == ord('s'):
current_object.move(0, 1)
elif key == ord('a'):
current_object.move(-1, 0)
elif key == ord('d'):
current_object.move(1, 0)
# resize selected object
elif key == ord('q'):
current_object.increase_size(top=-1)
elif key == ord('z'):
current_object.increase_size(top=1)
elif key == ord('Q'):
current_object.increase_size(down=1)
elif key == ord('Z'):
current_object.increase_size(down=-1)
elif key == ord('e'):
current_object.increase_size(left=-1)
elif key == ord('c'):
current_object.increase_size(left=1)
elif key == ord('E'):
current_object.increase_size(right=1)
elif key == ord('C'):
current_object.increase_size(right=-1)
# choose object below
elif key == ord('1'):
current_object = select_object_below(objects_to_draw)
# delete selected object
elif key == ord('2'):
objects_to_draw.remove(current_object)
# change type to person
elif key == ord('3'):
current_object.change_type_to_person()
# change type to car
elif key == ord('4'):
current_object.change_type_to_car()
# change type to hidden
elif key == ord('5'):
current_object.change_type_to_hidden()
elif key == ord('6'):
current_object.change_type_to_cyclist()
elif key == ord('7'):
current_object.change_id(9)
# next image
if key == ord('m'):
image_counter += 1
print "Image number: " + str(image_counter)
img_original = cv2.imread(path_to_images + files[image_counter], cv2.IMREAD_COLOR)
img_original_resized = cv2.resize(img_original, (0, 0), None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_NEAREST)
img_original_resized_roi = img_original_resized[roi_top_left_y:roi_bottom_right_y, roi_top_left_x:roi_bottom_right_x]
if flag_auto_load:
file_to_open = path_to_description + str(image_counter) + "_" + files[image_counter] + ".pickle"
if exists(file_to_open):
with open(file_to_open, "rb") as f:
objects_to_draw = pickle.load(f)
# previous image
elif key == ord('n'):
image_counter -= 1
print "Image number: " + str(image_counter)
img_original = cv2.imread(path_to_images + files[image_counter], cv2.IMREAD_COLOR)
img_original_resized = cv2.resize(img_original, (0, 0), None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_NEAREST)
img_original_resized_roi = img_original_resized[roi_top_left_y:roi_bottom_right_y, roi_top_left_x:roi_bottom_right_x]
if flag_auto_load:
file_to_open = path_to_description + str(image_counter) + "_" + files[image_counter] + ".pickle"
if exists(file_to_open):
with open(file_to_open, "rb") as f:
objects_to_draw = pickle.load(f)
# save the descriptioon
elif key == ord('p'):
file_to_save = path_to_description + str(image_counter) + "_" + files[image_counter] + ".pickle"
print file_to_save
with open(file_to_save, "wb") as f:
pickle.dump(objects_to_draw, f)
# load the description
elif key == ord('o'):
file_to_open = path_to_description + str(image_counter) + "_" + files[image_counter] + ".pickle"
if exists(file_to_open):
with open(file_to_open, "rb") as f:
objects_to_draw = pickle.load(f)
# toggle auto load of description (invoked when choosing next image)
elif key == ord('l'):
flag_auto_load = not flag_auto_load
print "Auto load state: " + str(flag_auto_load)
# remove all created objects
elif key == ord('k'):
objects_to_draw = []
elif key == ord('v'):
object_6_to_copy = copy.deepcopy(next(x for x in objects_to_draw if x.id == 6))
object_9_to_copy = copy.deepcopy(next(x for x in objects_to_draw if x.id == 9))
elif key == ord('f'):
objects_to_draw.remove(next(x for x in objects_to_draw if x.id == 6))
objects_to_draw.append(object_6_to_copy)
objects_to_draw.append(object_9_to_copy)
elif key == ord('r'):
cv2.imwrite("sample_image.png", img_working)
img_working = img_original_resized_roi.copy()
# draw all stored objects
for object_to_draw in objects_to_draw:
object_to_draw.draw(img_working, font)
cv2.imshow(window_name, img_working)
key = cv2.waitKey(20)
cv2.destroyAllWindows()
| {
"repo_name": "Michal-Fularz/database_marking_tool",
"path": "database_marking_tool.py",
"copies": "1",
"size": "9668",
"license": "mit",
"hash": 4593501376108989000,
"line_mean": 35.4830188679,
"line_max": 130,
"alpha_frac": 0.5742656185,
"autogenerated": false,
"ratio": 3.579415031469826,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4653680649969826,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Amin'
import math
import cv2
class Point:
def __init__(self):
self.x = 0
self.y = 0
def update(self, x, y):
self.x = x
self.y = y
def change(self, dx=0, dy=0):
self.x += dx
self.y += dy
def to_tuple(self):
return self.x, self.y
class ObjectInformation:
def __init__(self):
self.centre = Point()
self.width = 0
self.height = 0
self.point_top_left = Point()
self.point_bottom_right = Point()
self.selected = False
self.point_of_selection = Point()
self._update_based_on_points()
self.type = "type"
self.id = 0
def init(self, x1, y1, x2, y2, _id):
self.point_top_left.update(x1, y1)
self.point_bottom_right.update(x2, y2)
self._update_based_on_points()
self.id = _id
self.type = "temporary"
def resize(self, x2, y2):
self.point_bottom_right.update(x2, y2)
self._update_based_on_points()
def increase_size(self, top=0, down=0, left=0, right=0):
if top != 0:
self.point_top_left.y += top
elif down != 0:
self.point_bottom_right.y += down
elif left != 0:
self.point_top_left.x += left
elif right != 0:
self.point_bottom_right.x += right
self._update_based_on_points()
def finish(self, x2, y2):
self.point_bottom_right.update(x2, y2)
self.change_type_to_person()
self._update_based_on_points()
def change_type_to_person(self):
self.type = "person"
def change_type_to_car(self):
self.type = "car"
def change_type_to_cyclist(self):
self.type = "cyclist"
def change_type_to_hidden(self):
self.type = "hidden"
def change_id(self, new_id):
self.id = new_id
def move(self, dx, dy):
self.centre.change(dx, dy)
self._update_base_on_centre_and_size()
def _update_based_on_points(self):
x1 = self.point_top_left.x
x2 = self.point_bottom_right.x
y1 = self.point_top_left.y
y2 = self.point_bottom_right.y
self.centre.update((x1 + x2)/2.0, (y1 + y2)/2.0)
self.width = abs(x1-x2)
self.height = abs(y1-y2)
def _update_base_on_centre_and_size(self):
x1 = int(self.centre.x - math.floor(self.width/2))
x2 = int(self.centre.x + math.floor(self.width/2))
y1 = int(self.centre.y - math.floor(self.height/2))
y2 = int(self.centre.y + math.floor(self.height/2))
self.point_top_left.update(x1, y1)
self.point_bottom_right.update(x2, y2)
def select(self, x, y):
self.point_of_selection.update(x, y)
self.selected = True
# unselect is not in a dictionary, delesect is, another option in uncheck
# http://english.stackexchange.com/questions/18465/unselect-or-deselect
def unselect(self):
self.selected = False
def check_if_inside(self, x, y):
x1 = self.point_top_left.x
x2 = self.point_bottom_right.x
y1 = self.point_top_left.y
y2 = self.point_bottom_right.y
result_x = False
if x1 > x2:
if x2 <= x <= x1:
result_x = True
else:
if x1 <= x <= x2:
result_x = True
result_y = False
if y1 > y2:
if y2 <= y <= y1:
result_y = True
else:
if y1 <= y <= y2:
result_y = True
result = False
if result_x and result_y:
result = True
return result
def __choose_colour_and_width(self):
colour = (100, 100, 100)
width = 2
if self.selected:
colour = (0, 255, 0)
width = 3
elif self.type == "temporary":
colour = (0, 100, 255)
width = 2
elif self.type == "person":
colour = (0, 0, 255)
width = 2
elif self.type == "car":
colour = (255, 0, 0)
width = 2
elif self.type == "cyclist":
colour = (40, 160, 40)
width = 3
elif self.type == "hidden":
colour = (100, 100, 100)
width = 1
return colour, width
def draw(self, img, font):
colour, width = self.__choose_colour_and_width()
cv2.putText(img, self.type + " " + str(self.id), self.point_top_left.to_tuple(), font, 1, colour, 1)
centre_x1 = int(self.centre.x - 3)
centre_y1 = int(self.centre.y + 3)
centre_x2 = int(self.centre.x + 3)
centre_y2 = int(self.centre.y - 3)
cv2.rectangle(img, (centre_x1, centre_y1), (centre_x2, centre_y2), colour, -1)
cv2.rectangle(img, self.point_top_left.to_tuple(), self.point_bottom_right.to_tuple(), colour, width)
| {
"repo_name": "Michal-Fularz/database_marking_tool",
"path": "ObjectInformation.py",
"copies": "1",
"size": "4896",
"license": "mit",
"hash": -99276013467842400,
"line_mean": 25.6086956522,
"line_max": 109,
"alpha_frac": 0.5261437908,
"autogenerated": false,
"ratio": 3.2041884816753927,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4230332272475393,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Amin'
import numpy as np
class MF_lbp:
def __init__(self, use_test_version=False):
if use_test_version:
self.encoded_lbp_lut = [0] * 256
self.encoded_lbp_lut[16] = 11
else:
self.encoded_lbp_lut = [29] * 256
self.encoded_lbp_lut[0], self.encoded_lbp_lut[255] = 0, 0
self.encoded_lbp_lut[1], self.encoded_lbp_lut[254] = 1, 1
self.encoded_lbp_lut[2], self.encoded_lbp_lut[253] = 2, 2
self.encoded_lbp_lut[3], self.encoded_lbp_lut[252] = 3, 3
self.encoded_lbp_lut[4], self.encoded_lbp_lut[251] = 4, 4
self.encoded_lbp_lut[6], self.encoded_lbp_lut[249] = 5, 5
self.encoded_lbp_lut[7], self.encoded_lbp_lut[248] = 6, 6
self.encoded_lbp_lut[8], self.encoded_lbp_lut[247] = 7, 7
self.encoded_lbp_lut[12], self.encoded_lbp_lut[243] = 8, 8
self.encoded_lbp_lut[14], self.encoded_lbp_lut[241] = 9, 9
self.encoded_lbp_lut[15], self.encoded_lbp_lut[240] = 10, 10
self.encoded_lbp_lut[16], self.encoded_lbp_lut[239] = 11, 11
self.encoded_lbp_lut[24], self.encoded_lbp_lut[231] = 12, 12
self.encoded_lbp_lut[28], self.encoded_lbp_lut[227] = 13, 13
self.encoded_lbp_lut[30], self.encoded_lbp_lut[225] = 14, 14
self.encoded_lbp_lut[31], self.encoded_lbp_lut[224] = 15, 15
self.encoded_lbp_lut[32], self.encoded_lbp_lut[223] = 16, 16
self.encoded_lbp_lut[48], self.encoded_lbp_lut[207] = 17, 17
self.encoded_lbp_lut[56], self.encoded_lbp_lut[199] = 18, 18
self.encoded_lbp_lut[60], self.encoded_lbp_lut[195] = 19, 19
self.encoded_lbp_lut[62], self.encoded_lbp_lut[193] = 20, 20
self.encoded_lbp_lut[63], self.encoded_lbp_lut[192] = 21, 21
self.encoded_lbp_lut[64], self.encoded_lbp_lut[191] = 22, 22
self.encoded_lbp_lut[96], self.encoded_lbp_lut[159] = 23, 23
self.encoded_lbp_lut[112], self.encoded_lbp_lut[243] = 24, 24
self.encoded_lbp_lut[120], self.encoded_lbp_lut[135] = 25, 25
self.encoded_lbp_lut[124], self.encoded_lbp_lut[131] = 26, 26
self.encoded_lbp_lut[126], self.encoded_lbp_lut[129] = 27, 27
self.encoded_lbp_lut[127], self.encoded_lbp_lut[128] = 28, 28
def calc_nrulbp_3x3(self, image):
output_shape = (image.shape[0], image.shape[1])
nrulbp_3x3_image = np.zeros(output_shape, dtype=np.double)
rows = image.shape[0]
cols = image.shape[1]
for r in range(1, rows-1):
for c in range(1, cols-1):
central_pixel = int(image[r, c])
raw_lbp_descriptor = 0
if int(image[r-1, c-1]) - central_pixel > 0:
raw_lbp_descriptor += 1 * pow(2, 0)
if int(image[r-1, c]) - central_pixel > 0:
raw_lbp_descriptor += 1 * pow(2, 1)
if int(image[r-1, c+1]) - central_pixel > 0:
raw_lbp_descriptor += 1 * pow(2, 2)
if int(image[r, c-1]) - central_pixel > 0:
raw_lbp_descriptor += 1 * pow(2, 3)
if int(image[r, c+1]) - central_pixel > 0:
raw_lbp_descriptor += 1 * pow(2, 4)
if int(image[r+1, c-1]) - central_pixel > 0:
raw_lbp_descriptor += 1 * pow(2, 5)
if int(image[r+1, c]) - central_pixel > 0:
raw_lbp_descriptor += 1 * pow(2, 6)
if int(image[r+1, c+1]) - central_pixel > 0:
raw_lbp_descriptor += 1 * pow(2, 7)
nrulbp_3x3_image[r, c] = self.encoded_lbp_lut[raw_lbp_descriptor]
return np.asarray(nrulbp_3x3_image)
| {
"repo_name": "PUTvision/decision_tree",
"path": "decision_trees/LBP/MF_lbp.py",
"copies": "2",
"size": "3844",
"license": "mit",
"hash": 9209180079129229000,
"line_mean": 48.9220779221,
"line_max": 81,
"alpha_frac": 0.5319979188,
"autogenerated": false,
"ratio": 2.8729446935724963,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9326228643414625,
"avg_score": 0.01574279379157428,
"num_lines": 77
} |
__author__ = 'Amin'
import sys
import math
from collections import deque
import copy
# TODO: ordered dict require manual sorting after all the items were inserted
# TODO: I am not sure if this will speed the is in part
#from collections import OrderedDict
import cProfile
from codingame_solutions.very_hard.very_hard_The_Resistance_utils import Morse
class Solution:
def __init__(self, number_of_words=0, words_thus_far=[], part_to_use_start_index=0, part_in_use_end_index=1):
self.number_of_words = number_of_words
self.words_thus_far = words_thus_far
self.part_to_use_start_index = part_to_use_start_index
self.part_in_use_end_index = part_in_use_end_index
class very_hard_The_Resistance_2:
def __init__(self):
self.morse = Morse()
self.words = []
#self.keys = set()#[]
for i in range(0, 50):
self.words.append({})
#self.keys.append(set())
self.l = ""
print([len(x) for x in self.words], file=sys.stderr)
def spawn_new_solution(self, solutions, part_to_use_start_index, part_in_use_end_index):
solutions.append(Solution(
#copy.deepcopy(current_solution.words_thus_far),
part_to_use_start_index=copy.deepcopy(part_to_use_start_index),
part_in_use_end_index=copy.deepcopy(part_in_use_end_index)
))
def get_dict_key(self, current_solution):
return self.l[current_solution.part_to_use_start_index:current_solution.part_in_use_end_index]
def is_currently_anaylzed_part_in_dictionary1(self, current_dict_key):
if len(self.words[len(current_dict_key)]) > 0:
if self.words[len(current_dict_key)].get(current_dict_key, "") != "":
return True
else:
return False
else:
return False
def is_currently_anaylzed_part_in_dictionary2(self, current_dict_key):
#if len(self.words[len(current_dict_key)]) > 0:
#if current_dict_key in self.words[len(current_dict_key)]:
if current_dict_key in self.keys:
return True
else:
return False
#else:
#return False
def is_currently_anaylzed_part_in_dictionary(self, current_dict_key):
if len(self.words[len(current_dict_key)]) > 0:
try:
self.words[len(current_dict_key)][current_dict_key]
return True
except KeyError:
return False
else:
return False
def check_one_solution(self, current_solution, solutions):
current_dict_key = ""
current_dict_key_len = 0
l_len = len(self.l)
# if solution still has signs to process
# TODO - add min value
# TODO - this does not work as it should - current_dict_len is not passed to new solutions
# TODO - same goes with current_dict_key
# TODO - change string as key to values - it should be faster
# TODO - http://code.activestate.com/recipes/198157-improve-dictionary-lookup-performance/
while current_solution.part_in_use_end_index < l_len and current_dict_key_len < 48:
# get new sign to process
# add it to the collections holding currently processed set of signs
# TODO - check how fast this part is - maybe join is faster?
current_dict_key += self.l[current_solution.part_in_use_end_index-1]
current_solution.part_in_use_end_index += 1
current_dict_key_len += 1
#print("current_solution.part_in_use: " + "".join(current_solution.part_in_use), file=sys.stderr)
# if current analysed set of signs is a word in dictionary
if self.is_currently_anaylzed_part_in_dictionary(current_dict_key):
# spawn new solution that continue looking for longer words
self.spawn_new_solution(solutions, current_solution.part_to_use_start_index, current_solution.part_in_use_end_index)
# get all available words
words_found = self.words[len(current_dict_key)][current_dict_key]
print("words_found: " + str(words_found), file=sys.stderr)
# clear currently processed set of signs
current_solution.part_to_use_start_index = current_solution.part_in_use_end_index
current_dict_key_len = 0
# for all words except last spawn new solutions
# for word in words_found[:-1]:
# new_words_thus_far = copy.deepcopy(current_solution.words_thus_far)
# new_words_thus_far.append(word)
# solutions.append(Solution(
# current_solution.position_in_dictionary,
# new_words_thus_far,
# copy.deepcopy(current_solution.part_to_use),
# copy.deepcopy(current_solution.part_in_use)
# ))
#
# current_solution.words_thus_far.append(words_found[-1])
current_solution.words_thus_far.append(words_found)
current_solution.number_of_words += 1
def find_results(self, solutions):
results = []
# keep solutions on stack, and do one after another
while len(solutions) > 0:
# get one solution
current_solution = solutions.popleft()
self.check_one_solution(current_solution, solutions)
# print("current_solution.part_in_use: " + str(current_solution.part_in_use), file=sys.stderr)
#print("current_solution.words_thus_far: " + str(current_solution.words_thus_far), file=sys.stderr)
# TODO - use with new = solution without lists
#if len(current_solution.part_in_use) == 0:
#results.append(current_solution.words_thus_far)
return results
def run(self):
solutions = deque()
solutions.append(Solution(
part_to_use_start_index=0,
part_in_use_end_index=1
))
results = self.find_results(solutions)
r = ""
for result in results:
r += str(result) + "\n"
print("result: " + r, file=sys.stderr)
# Write an action using print
# To debug: print("Debug messages...", file=sys.stderr)
print(len(results))
def foo():
app = very_hard_The_Resistance_2()
#app.load_from_file()
app.use_prepared_set()
print(app.words, file=sys.stderr)
app.run()
if __name__ == "__main__":
cProfile.run('foo()')
#foo()
| {
"repo_name": "Michal-Fularz/codingame_solutions",
"path": "codingame_solutions/very_hard/very_hard_The_Resistance_ineffective_dict.py",
"copies": "1",
"size": "6675",
"license": "mit",
"hash": 8914619105042898000,
"line_mean": 36.0833333333,
"line_max": 132,
"alpha_frac": 0.5911610487,
"autogenerated": false,
"ratio": 3.7394957983193278,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9810509245604773,
"avg_score": 0.004029520282910942,
"num_lines": 180
} |
__author__ = 'Amin'
import sys
import math
import numpy as np
from collections import deque
from collections import namedtuple
PickupInfo = namedtuple("PickupInfo", ["number_of_groups_taken", "earnings", "rides_taken"])
number_of_places, number_of_rides_per_day, number_of_groups = [int(i) for i in input().split()]
#groups = []
#groups = np.zeros(number_of_groups)
groups = deque([], maxlen=number_of_groups)
groups_pickup_info = deque([], maxlen=number_of_groups)
for i in range(number_of_groups):
#groups.append(int(input()))
#groups[i] = int(input())
groups.append(int(input()))
groups_pickup_info.append(PickupInfo(-1, 0, 0))
print("Places: " + str(number_of_places), file=sys.stderr)
print("Number of groups: " + str(len(groups)), file=sys.stderr)
print("Sum of groups: " + str(sum(groups)), file=sys.stderr)
##print("Groups: \n" + str(groups), file=sys.stderr)
##print("groups_pickup_info: \n" + str(groups_pickup_info), file=sys.stderr)
print("groups_pickup_info len: " + str(len(groups_pickup_info)), file=sys.stderr)
print("groups len: " + str(len(groups)), file=sys.stderr)
earnings = 0
rides_taken = 0
places_taken = 0
current_group_size = 0
flag_take_next_group = True
flag_group_used = True
flag_loop_found = False
number_of_groups_served = 0
#groups_on_the_rollercoaster = []
groups_on_the_rollercoaster = deque([], maxlen=number_of_groups)
groups_pickup_info_on_the_rollercoaster = deque([], maxlen=number_of_groups)
#number_of_groups_on_the_rollercoaster = 0
while rides_taken < number_of_rides_per_day and not flag_loop_found:
#while flag_take_next_group and len(groups) > 0:
#while flag_take_next_group and number_of_groups_on_the_rollercoaster < number_of_groups:
while flag_take_next_group and len(groups) > 0:
if flag_group_used:
#current_group_size = groups.pop(0)
#current_group_size = groups[0]
#groups = np.roll(groups, -1)
current_group_size = groups.popleft()
current_group_pickup_info = groups_pickup_info.popleft()
#print("groups_pickup_info_on_the_rollercoaster len: " + str(len(groups_pickup_info_on_the_rollercoaster)), file=sys.stderr)
if len(groups_pickup_info_on_the_rollercoaster) == 0 and current_group_pickup_info.number_of_groups_taken != -1:
flag_loop_found = True
print("New way of loop searching works!!!", file=sys.stderr)
# put taken groups back to que
groups.appendleft(current_group_size)
groups_pickup_info.appendleft(current_group_pickup_info)
break
#print("Groups: \n" + str(groups), file=sys.stderr)
if (places_taken + current_group_size) <= number_of_places:
places_taken += current_group_size
#groups_on_the_rollercoaster.append(current_group_size)
groups_on_the_rollercoaster.append(current_group_size)
groups_pickup_info_on_the_rollercoaster.append(current_group_pickup_info)
#number_of_groups_on_the_rollercoaster += 1
number_of_groups_served += 1
flag_group_used = True
else:
# write current situation (groups taken, earnings, rides taken) into the first group pickup info
groups_pickup_info_on_the_rollercoaster[0] = PickupInfo(len(groups_pickup_info_on_the_rollercoaster), earnings, rides_taken)
flag_take_next_group = False
flag_group_used = False
if len(groups) == 0:
groups_pickup_info_on_the_rollercoaster[0] = PickupInfo(len(groups_pickup_info_on_the_rollercoaster), earnings, rides_taken)
if not flag_loop_found:
rides_taken += 1
earnings += places_taken
#print("Ride taken (nr: " + str(rides_taken) + ")! Number of people taken: " + str(places_taken), file=sys.stderr)
#print("Earnings: " + str(earnings) + ", groups served: " + str(number_of_groups_served), file=sys.stderr)
places_taken = 0
#groups += groups_on_the_rollercoaster
#groups_on_the_rollercoaster.clear()
for i in range(len(groups_on_the_rollercoaster)):
groups.append(groups_on_the_rollercoaster.popleft())
groups_pickup_info.append(groups_pickup_info_on_the_rollercoaster.popleft())
#groups.extend(groups_on_the_rollercoaster)
#groups_on_the_rollercoaster.clear()
#number_of_groups_on_the_rollercoaster = 0
flag_take_next_group = True
#print("current_group_pickup_info: " + str(current_group_pickup_info), file=sys.stderr)
#print("groups_pickup_info: " + str(groups_pickup_info), file=sys.stderr)
##print("groups len: " + str(len(groups)), file=sys.stderr)
if flag_loop_found:
loop_length = rides_taken - groups_pickup_info[0].rides_taken
loop_earnings = earnings - groups_pickup_info[0].earnings
rides_left = number_of_rides_per_day - rides_taken
print("loop_length: " + str(loop_length), file=sys.stderr)
print("loop_earnings: " + str(loop_earnings), file=sys.stderr)
print("rides_left: " + str(rides_left), file=sys.stderr)
number_of_full_loops = rides_left // loop_length
rides_after_all_loops = rides_left - number_of_full_loops * loop_length
print("number_of_full_loops: " + str(number_of_full_loops), file=sys.stderr)
print("rides_after_all_loops: " + str(rides_after_all_loops), file=sys.stderr)
earnings += number_of_full_loops * loop_earnings
rides_taken += number_of_full_loops * loop_length
print("earnings: " + str(earnings), file=sys.stderr)
print("rides_taken: " + str(rides_taken), file=sys.stderr)
current_group_size = 0
flag_take_next_group = True
flag_group_used = True
number_of_groups_served = 0
#groups_on_the_rollercoaster = []
groups_on_the_rollercoaster = deque([], maxlen=number_of_groups)
groups_pickup_info_on_the_rollercoaster = deque([], maxlen=number_of_groups)
#number_of_groups_on_the_rollercoaster = 0
while rides_taken < number_of_rides_per_day:
#while flag_take_next_group and len(groups) > 0:
#while flag_take_next_group and number_of_groups_on_the_rollercoaster < number_of_groups:
while flag_take_next_group and len(groups) > 0:
if flag_group_used:
#current_group_size = groups.pop(0)
#current_group_size = groups[0]
#groups = np.roll(groups, -1)
current_group_size = groups.popleft()
current_group_pickup_info = groups_pickup_info.popleft()
#print("groups_pickup_info_on_the_rollercoaster len: " + str(len(groups_pickup_info_on_the_rollercoaster)), file=sys.stderr)
#print("Groups: \n" + str(groups), file=sys.stderr)
if (places_taken + current_group_size) <= number_of_places:
places_taken += current_group_size
#groups_on_the_rollercoaster.append(current_group_size)
groups_on_the_rollercoaster.append(current_group_size)
groups_pickup_info_on_the_rollercoaster.append(current_group_pickup_info)
#number_of_groups_on_the_rollercoaster += 1
number_of_groups_served += 1
flag_group_used = True
else:
# write current situation (groups taken, earnings, rides taken) into the first group pickup info
groups_pickup_info_on_the_rollercoaster[0] = PickupInfo(len(groups_pickup_info_on_the_rollercoaster), earnings, rides_taken)
flag_take_next_group = False
flag_group_used = False
rides_taken += 1
earnings += places_taken
#print("Ride taken (nr: " + str(rides_taken) + ")! Number of people taken: " + str(places_taken), file=sys.stderr)
#print("Earnings: " + str(earnings) + ", groups served: " + str(number_of_groups_served), file=sys.stderr)
places_taken = 0
#groups += groups_on_the_rollercoaster
#groups_on_the_rollercoaster.clear()
for i in range(len(groups_on_the_rollercoaster)):
groups.append(groups_on_the_rollercoaster.popleft())
groups_pickup_info.append(groups_pickup_info_on_the_rollercoaster.popleft())
#groups.extend(groups_on_the_rollercoaster)
#groups_on_the_rollercoaster.clear()
#number_of_groups_on_the_rollercoaster = 0
flag_take_next_group = True
#print("groups_pickup_info: " + str(groups_pickup_info), file=sys.stderr)
##print("groups len: " + str(len(groups)), file=sys.stderr)
# Write an action using print
# To debug: print("Debug messages...", file=sys.stderr)
print(int(earnings))
| {
"repo_name": "Michal-Fularz/codingame_solutions",
"path": "codingame_solutions/hard/hard_Roller_Coaster.py",
"copies": "1",
"size": "8680",
"license": "mit",
"hash": 8812627106549451000,
"line_mean": 45.6666666667,
"line_max": 140,
"alpha_frac": 0.6483870968,
"autogenerated": false,
"ratio": 3.1621129326047357,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9278347047022518,
"avg_score": 0.006430596476443557,
"num_lines": 186
} |
__author__ = 'Amin'
import sys
import math
import numpy as np
from enum import Enum
from codingame_solutions.very_hard.very_hard_Triangulation import Batman
from codingame_solutions.very_hard.very_hard_Triangulation import Building
def calculate_distances(x, y, building):
for i in range(building.height):
for j in range(building.width):
building.map[i][j] = math.sqrt((i-y)**2 + (j-x)**2)
def compare_distances(building_1, buidling_2):
building_result = Building(building_1.width, building_1.height)
for i in range(building_1.height):
for j in range(building_1.width):
if building_1.map[i][j] > buidling_2.map[i][j]:
building_result.map[i][j] = 1
elif building_1.map[i][j] < buidling_2.map[i][j]:
building_result.map[i][j] = -1
else:
building_result.map[i][j] = 3
return building_result
def calculate_number_of_points_further(building):
number_of_points_further = 0
number_of_points_same = 0
for i in range(building.height):
for j in range(building.width):
if building.map[i][j] == -1:
number_of_points_further += 1
elif building.map[i][j] == 3:
number_of_points_same += 1
if number_of_points_further != 0 or number_of_points_same != 0:
print("number_of_points_same: " + str(number_of_points_same))
print("number_of_points_further: " + str(number_of_points_further))
def find_best_spot(building_staring):
building_test = Building(building_staring.width, building_staring.height)
for i in range(building_staring.height//2 + 1):
for j in range(building_staring.width):
calculate_distances(j, i, building_test)
building_result = compare_distances(building_staring, building_test)
building_result.map[building_result.width//2:][:] = 0
print("x: " + str(j) + ", y: " + str(i))
calculate_number_of_points_further(building_result)
if __name__ == '__main__':
np.set_printoptions(precision=2)
width = 24
height = 6
building_previous = Building(width, height)
building_current = Building(width, height)
x, y = [int(v) for v in input().split()]
batman = Batman(x, y)
calculate_distances(batman.x_current, batman.y_current, building_previous)
#find_best_spot(building_previous)
x, y = [int(v) for v in input().split()]
new_x, new_y = x, y
batman.set_position(new_x, new_y)
calculate_distances(batman.x_current, batman.y_current, building_current)
building_result = compare_distances(building_previous, building_current)
building_result.map[height//2:][:] = 0
building_result.map[new_y][new_x] = 9
print(building_result.map)
| {
"repo_name": "Michal-Fularz/codingame_solutions",
"path": "codingame_solutions/very_hard/very_hard_Triangulation_helper.py",
"copies": "1",
"size": "2806",
"license": "mit",
"hash": 5079001830048179000,
"line_mean": 31.6279069767,
"line_max": 80,
"alpha_frac": 0.6297220242,
"autogenerated": false,
"ratio": 3.247685185185185,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4377407209385185,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Amin'
import sys
import math
# convert value provided as HH:MM to a number of minutes
def hours_and_minutes_to_minutes():
d=input()
print((int(d[0])*60+int(d[1])*6+int(d[3])*10+int(d[4])))
# check if provided number is lucky - sum of first three digits is equal to sum of next three digits
# eg.
# 111003 true
# 202121 true
def lucky_number():
l=[int(i) for i in input()]
if sum(l[0:3])==sum(l[3:6]):print("true")
else:print("false")
# Two adventurers are going to duel, each of them has health points HP1 and HP2 and apply D1 and D2 damage at their opponents each round.
# Your program must output which adventurer win and how many round are needed to end the duel.
# There are no draw.
def duel():
h1, d1 = [int(i) for i in input().split()]
h2, d2 = [int(i) for i in input().split()]
# TODO - this is bad! does not work for all possibilities
if d2==0:
d2=0.001
# TODO - add extra check for d1!
nm2=h1%d2
n2 = h1//d2
nm1=h2%d1
n1=h2//d1
if nm2!=0:
n2+=1
if nm1!=0:
n1+=1
if n1<n2:
print("1 "+str(n1))
else:
print("2 "+str(n2))
# Your program must prepare a sentence for encryption and reshape it in a grid.
# You are given a sentence and a number of columns col.
# First, you need to remove all whitespaces.
# Then divide the processed text into parts of col characters each.
# The last part can contain less than col letters.
# Each part is placed on the new line.
# INPUT:
# Line 1: a text sentence.
# Line 2: an integer number col.
# OUTPUT:
# The text grid with col columns.
# CONSTRAINTS:
# 0 ? sentence length ? 100
# 0 < col ? 10
# A text contains at least one non-whitespace character.
# EXAMPLE:
# Input
# Hello Perfect World
# 5
# Output:
# Hello
# Perfe
# ctWor
# ld
def split_text_into_columns():
s=input().replace(" ", "")
c=int(input())
r=s[0]
for i in range(1,len(s)):
if i%c==0:r+="\n"
r+=s[i]
print(r)
# Your program must perform a binary OR on two binary numbers given through the standard input and print the result to the standard output.
# OR Truth Table
# Input Output
# A B
# 0 0 0
# 0 1 1
# 1 0 1
# 1 1 1
# Warning, the number in output must have the same number of digits as the given numbers.
# INPUT:
# 2 binary numbers n1 and n2, separated by spaces.
# OUTPUT:
# The result of an OR between n1 and n2.
# CONSTRAINTS:
# n1 and n2 have the same number of digits.
# EXAMPLE:
# Input: 001 011
# Output: 011
def operation_or():
n,m=input().split()
r=""
for k,l in zip(n,m):
if k=="1" or l=="1":r+="1"
else:r+="0"
print(r)
# Your program must find the point that is exactly between two other points.
# You are given the coordinates (x, y) of two points which bind a line segment.
# The midpoint of this line segment is the target point.
# Be careful with float numbers and use . as a decimal mark.
def midpoint():
x,y=[int(i)for i in input().split()]
X,Y=[int(i)for i in input().split()]
a=(x+X)/2
b=(y+Y)/2
if a-int(a)==0:a=int(a)
if b-int(b)==0:b=int(b)
print(str(a)+" "+str(b))
# How many times is the most common letter used in a given string?
# The string only contains lowercase letters and spaces.
def most_common_letter():
l=[0]*40
for c in input():
if c!=" ":l[ord(c)-97]+=1
print(max(l))
# list of values is given, sort them and print
def sort_values():
v=[]
for i in range(int(input())):
v.append(int(input()))
v.sort()
print(v)
# TODO:
# The Hofstadter Conway sequence is defined like so:
# a(1) = 1.
# a(2) = 1.
# a(n) = a(a(n - 1)) + a(n - a(n - 1)), for n > 2.
# Your program must ouput the first N terms of this sequence.
def a(n):
if n<3:
return 1
else:
return a(a(n-1)+a(n-a(n-1)))
# try converting to this:
#return(a(a(n-1)+a(n-a(n-1))),1)[n<3]
# C version from Kuba:
# N,i;
# int a(int b){return b<3?1:(a(a(b-1))+a(b-a(b-1)));};
# int main()
# {
# scanf("%d",&N);
# for(i=1;i<N;i++)
# printf("%d ",a(i));
# printf("%d\n"",a(N));
# }
# Given a certain number of blocks N, your program must return the height of the tallest possible 2D pyramid that can be created, followed by the number of unused blocks remaining.
# For example, a pyramid of height 3 contains 6 blocks: 3 for the first level, 2 for the second level and 1 for the last level.
# INPUT:
# Line 1: An integer N, the number of blocks to be used for the pyramid.
# OUTPUT:
# Line 1: Two integers H and R, where H is the greatest possible pyramid height, and R is the remaining unused blocks.
# CONSTRAINTS:
# 0 ? N < 50000
# EXAMPLE:
# Input
# 10
# Output
# 4 0
# general version
# n=int(input())
# flag_continue = True
# h=0
# r=n
# while flag_continue:
# print("h: " + str(h), file=sys.stderr)
# print("r: " + str(r), file=sys.stderr)
# if r>=(h+1):
# h+=1
# r-=h
# else:
# flag_continue = False
# print(str(h) + " " + str(r))
def tallest_pyramid():
f,h=1,0
r=int(input())
while f:
if r>=h+1:h+=1;r-=h
else:f=0
print(str(h)+" "+str(r))
# The program:
# Your given a scrambled sentence. You must output an unscrambled version of the same sentence using these rules:
# - First, print one in every two characters.
# - Then print every other character starting from the end, going backwards. Make sure you handle strings of both even and odd lengths.
def scrambled():
s=input();print(s[0::2]+s[1::2][::-1])
if __name__ == "__main__":
hours_and_minutes_to_minutes()
lucky_number()
duel()
split_text_into_columns()
operation_or()
midpoint()
most_common_letter()
sort_values()
# Hofstadter Conway done but without reading values etc
tallest_pyramid()
scrambled()
| {
"repo_name": "Michal-Fularz/codingame_solutions",
"path": "codingame_solutions/Clash_of_Code/shortest.py",
"copies": "1",
"size": "5706",
"license": "mit",
"hash": 1043224973622212200,
"line_mean": 23.0759493671,
"line_max": 180,
"alpha_frac": 0.6342446547,
"autogenerated": false,
"ratio": 2.8402190144350423,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39744636691350427,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Amin'
import sys
import math
from codingame_solutions.utilities.graph import Graph, GraphEdge
if __name__ == "__main__":
f = open("hard_Bender_The_Money_Machine/test06_in.txt")
n = int(f.readline())
n = int(input())
v_names = []
v_values = []
v_destination_1 = []
v_destination_2 = []
for i in range(n):
room = f.readline()
room = input()
vertex_name, vertex_value, vertex_neighbour_1, vertex_neighbour_2 = [v for v in room.split()]
v_names.append(int(vertex_name))
v_values.append(int(vertex_value))
if vertex_neighbour_1 != "E":
v_destination_1.append(int(vertex_neighbour_1))
else:
v_destination_1.append(-1)
if vertex_neighbour_2 != "E":
v_destination_2.append(int(vertex_neighbour_2))
else:
v_destination_2.append(-1)
# create graph
g = Graph(flag_dag=True)
for name in v_names:
g.add_vertex(name)
if v_destination_1[name] != -1:
g.add_edge(GraphEdge(name, v_destination_1[name], v_values[v_destination_1[name]]))
if v_destination_2[name] != -1:
g.add_edge(GraphEdge(name, v_destination_2[name], v_values[v_destination_2[name]]))
#print(g.edges(), file=sys.stderr)
#print(g.vertices(), file=sys.stderr)
#print(g.find_longest_path(0), file=sys.stderr)
max_dist = max([value for key, value in g.find_longest_path(0).items()]) + v_values[0]
# Write an action using print
# To debug: print("Debug messages...", file=sys.stderr)
print(max_dist)
| {
"repo_name": "Michal-Fularz/codingame_solutions",
"path": "codingame_solutions/hard/hard_Bender_The_Money_Machine.py",
"copies": "1",
"size": "1613",
"license": "mit",
"hash": -2691377800585047000,
"line_mean": 28.3272727273,
"line_max": 101,
"alpha_frac": 0.5902045877,
"autogenerated": false,
"ratio": 3.1320388349514565,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9212215460417612,
"avg_score": 0.0020055924467689175,
"num_lines": 55
} |
__author__ = 'Amin'
import sys
import math
from collections import deque
import copy
from codingame_solutions.very_hard.very_hard_The_Resistance_utils import load_from_file, load_from_input, load_from_prepared_data
from codingame_solutions.very_hard.very_hard_The_Resistance import generate_morse_dictionary, print_morse_dict_info
import cProfile
class Solution:
def __init__(self, position_in_dictionary, words_thus_far=[], number_of_words_thus_far=0, part_to_use_start_index=0, part_in_use_end_index=1):
self.position_in_dictionary = position_in_dictionary
self.words_thus_far = words_thus_far
self.number_of_words_thus_far = number_of_words_thus_far
self.part_to_use_start_index = part_to_use_start_index
self.part_in_use_end_index = part_in_use_end_index
def process_solutions(solutions, message, morse_dictionary):
results = []
# keep solutions on stack, and do one after another
while len(solutions) > 0:
# get one solution
current_solution = solutions.popleft()
flag_no_more_elements = False
# if solution still has signs to process and its pointer to dictionary is valid
while current_solution.part_in_use_end_index < len(message) and not flag_no_more_elements:
# get new sign to process
current_sign = message[current_solution.part_in_use_end_index]
# add it to the collections holding currently processed set of signs
current_solution.part_in_use_end_index += 1
# get next element from dictionary based on current sign
current_solution.position_in_dictionary = current_solution.position_in_dictionary.get_next(current_sign)
# if new position is valid
if current_solution.position_in_dictionary is not None:
# if there are some words for this position
if current_solution.position_in_dictionary.flag_holds_words:
# spawn new solution that continue looking for longer words
solutions.append(Solution(
current_solution.position_in_dictionary,
copy.deepcopy(current_solution.words_thus_far),
current_solution.number_of_words_thus_far,
current_solution.part_to_use_start_index,
current_solution.part_in_use_end_index
))
# get all available words
words_found = current_solution.position_in_dictionary.words
# clear currently processed set of signs
current_solution.part_to_use_start_index = current_solution.part_in_use_end_index - 1
# and set dictionary pointer to first element
current_solution.position_in_dictionary = morse_dictionary
current_solution.number_of_words_thus_far += 1
# for all words except last spawn new solutions
for word in words_found[:-1]:
new_words_thus_far = copy.deepcopy(current_solution.words_thus_far)
new_words_thus_far.append(word)
solutions.append(Solution(
current_solution.position_in_dictionary,
new_words_thus_far,
current_solution.number_of_words_thus_far,
current_solution.part_to_use_start_index,
current_solution.part_in_use_end_index
))
current_solution.words_thus_far.append(words_found[-1])
else:
flag_no_more_elements = True
if current_solution is not None and current_solution.part_in_use_end_index - current_solution.part_to_use_start_index == 1:
results.append(current_solution.words_thus_far)
return results
def process_and_print_results(results):
r = ""
for result in results:
r += str(result) + "\n"
print("result: " + r, file=sys.stderr)
# Write an action using print
# To debug: print("Debug messages...", file=sys.stderr)
print(len(results))
def main():
#message, words, words_in_morse = load_from_prepared_data()
message, words, words_in_morse = load_from_file("very_hard_The_Resistance_test_4.txt")
morse_dictionary = generate_morse_dictionary(words, words_in_morse)
print_morse_dict_info(morse_dictionary)
solutions = deque()
solutions.append(Solution(
position_in_dictionary=morse_dictionary,
words_thus_far=[],
part_to_use_start_index=0,
part_in_use_end_index=0
))
results = process_solutions(solutions, message, morse_dictionary)
process_and_print_results(results)
if __name__ == '__main__':
cProfile.run('main()')
| {
"repo_name": "Michal-Fularz/codingame_solutions",
"path": "codingame_solutions/very_hard/very_hard_The_Resistance_ineffective_tree.py",
"copies": "1",
"size": "4906",
"license": "mit",
"hash": 6062217816281297000,
"line_mean": 39.5454545455,
"line_max": 146,
"alpha_frac": 0.6161842642,
"autogenerated": false,
"ratio": 4.00163132137031,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0012975486408534217,
"num_lines": 121
} |
__author__ = 'Amin'
import sys
import math
function A*(start,goal)
ClosedSet := {} // The set of nodes already evaluated.
OpenSet := {start} // The set of tentative nodes to be evaluated, initially containing the start node
Came_From := the empty map // The map of navigated nodes.
g_score := map with default value of Infinity
g_score[start] := 0 // Cost from start along best known path.
// Estimated total cost from start to goal through y.
f_score := map with default value of Infinity
f_score[start] := g_score[start] + heuristic_cost_estimate(start, goal)
while OpenSet is not empty
current := the node in OpenSet having the lowest f_score[] value
if current = goal
return reconstruct_path(Came_From, goal)
OpenSet.Remove(current)
ClosedSet.Add(current)
for each neighbor of current
if neighbor in ClosedSet
continue // Ignore the neighbor which is already evaluated.
tentative_g_score := g_score[current] + dist_between(current,neighbor) // length of this path.
if neighbor not in OpenSet // Discover a new node
OpenSet.Add(neighbor)
else if tentative_g_score >= g_score[neighbor]
continue // This is not a better path.
// This path is the best until now. Record it!
Came_From[neighbor] := current
g_score[neighbor] := tentative_g_score
f_score[neighbor] := g_score[neighbor] + heuristic_cost_estimate(neighbor, goal)
return failure
function reconstruct_path(Came_From,current)
total_path := [current]
while current in Came_From.Keys:
current := Came_From[current]
total_path.append(current)
return total_path
# r: number of rows.
# c: number of columns.
# a: number of rounds between the time the alarm countdown is activated and the time the alarm goes off.
r, c, a = [int(i) for i in input().split()]
# game loop
while 1:
# kr: row where Kirk is located.
# kc: column where Kirk is located.
kr, kc = [int(i) for i in input().split()]
for i in range(r):
row = input() # C of the characters in '#.TC?' (i.e. one line of the ASCII maze).
# Write an action using print
# To debug: print("Debug messages...", file=sys.stderr)
# Kirk's next move (UP DOWN LEFT or RIGHT).
print("RIGHT")
| {
"repo_name": "Michal-Fularz/codingame_solutions",
"path": "codingame_solutions/hard/hard_The_Labyrinth.py",
"copies": "1",
"size": "2425",
"license": "mit",
"hash": -8521331160952456000,
"line_mean": 36.3076923077,
"line_max": 108,
"alpha_frac": 0.6313402062,
"autogenerated": false,
"ratio": 3.736517719568567,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4867857925768567,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Amin'
import sys
import math
class Floor:
def __init__(self, width):
self.width = width
self.contains_elevator = False
self.elevators_positions = []
self.contains_exit = False
self.exit_position = -1
def add_exit(self, exit_position):
self.contains_exit = True
self.exit_position = exit_position
def add_elevator(self, elevator_position):
self.contains_elevator = True
self.elevators_positions.append(elevator_position)
def should_be_blocked(self, position, direction):
flag_should_be_blocked = False
flag_should_build_elevator = False
if self.contains_elevator:
if position > self.__elevator_position and direction == "RIGHT" or \
position < self.__elevator_position and direction == "LEFT":
flag_should_be_blocked = True
elif self.contains_exit:
if position > self.exit_position and direction == "RIGHT" or \
position < self.exit_position and direction == "LEFT":
flag_should_be_blocked = True
else:
flag_should_build_elevator = True
return flag_should_be_blocked, flag_should_build_elevator
class Drive:
def __init__(self):
self.floors = []
self.load_from_input()
def load_from_input(self):
# nb_floors: number of floors
# width: width of the area
# nb_rounds: maximum number of rounds
# exit_floor: floor on which the exit is found
# exit_pos: position of the exit on its floor
# nb_total_clones: number of generated clones
# nb_additional_elevators: number of additional elevators that you can build
# nb_elevators: number of elevators
nb_floors, width, nb_rounds, exit_floor, exit_pos, nb_total_clones, nb_additional_elevators, nb_elevators = \
[int(i) for i in input().split()]
for i in range(nb_floors):
self.floors.append(Floor(width))
self.floors[exit_floor].add_exit(exit_pos)
for i in range(nb_elevators):
# elevator_floor: floor on which this elevator is found
# elevator_pos: position of the elevator on its floor
elevator_floor, elevator_pos = [int(j) for j in input().split()]
self.floors[elevator_floor].add_elevator(elevator_pos)
# TODO: find all the available paths to the elevator, calculate the distance, and number of clones required
class Path:
def __init__(self):
pass
# MAIN
drive = Drive()
flag_do_the_blocking = False
# game loop
while 1:
# clone_floor: floor of the leading clone
# clone_pos: position of the leading clone on its floor
# direction: direction of the leading clone: LEFT or RIGHT
clone_floor, clone_pos, direction = input().split()
clone_floor = int(clone_floor)
clone_pos = int(clone_pos)
flag_do_the_blocking, flag_build_elevator = drive.floors[clone_floor].should_be_blocked(clone_pos, direction)
# action: WAIT or BLOCK or ELEVATOR
if flag_do_the_blocking:
print("BLOCK")
elif flag_build_elevator:
print("ELEVATOR")
drive.floors[clone_floor].add_elevator(clone_pos)
else:
print("WAIT")
| {
"repo_name": "Michal-Fularz/codingame_solutions",
"path": "codingame_solutions/hard/hard_The_Paranoid_Android-One_step_further.py",
"copies": "1",
"size": "3295",
"license": "mit",
"hash": 552259232171963600,
"line_mean": 31.6237623762,
"line_max": 117,
"alpha_frac": 0.6273141123,
"autogenerated": false,
"ratio": 3.702247191011236,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4829561303311236,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Amin'
import sys
import math
def calc_distance(latitudeA, longitudeA, latitudeB, longitudeB):
x = (longitudeB - longitudeA) * math.cos((latitudeA + latitudeB) / 2)
y = latitudeB - latitudeA
d = math.sqrt(x*x + y*y) * 6371
return d
LON = raw_input()
LAT = raw_input()
N = int(raw_input())
defibrillators = []
for i in xrange(N):
DEFIB = raw_input()
defibrillators.append(DEFIB)
user_longitude = float(LON.replace(",", "."))
user_latitude = float(LAT.replace(",", "."))
min_distance = 999999
closest_defib_name = "xxx"
for defibrillator in defibrillators:
defib_id, defib_name, defib_address, defib_phone, \
defib_longitude, defib_latitude = defibrillator.split(";")
current_distance = calc_distance(
user_latitude,
user_longitude,
float(defib_latitude.replace(",", ".")),
float(defib_longitude.replace(",", "."))
)
if current_distance < min_distance:
min_distance = current_distance
closest_defib_name = defib_name
print >> sys.stderr, defib_name
# Write an action using print
# To debug: print >> sys.stderr, "Debug messages..."
print >> sys.stderr, user_longitude
print >> sys.stderr, user_latitude
print closest_defib_name
| {
"repo_name": "Michal-Fularz/codingame_solutions",
"path": "codingame_solutions/easy/easy_Defibrillators.py",
"copies": "1",
"size": "1249",
"license": "mit",
"hash": 8021293627992946000,
"line_mean": 23.4901960784,
"line_max": 73,
"alpha_frac": 0.6509207366,
"autogenerated": false,
"ratio": 3.1225,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9266417935479552,
"avg_score": 0.0014005602240896359,
"num_lines": 51
} |
__author__ = 'Amin'
import sys
import math
def dna(s):
nuclobases = ["A", "T", "C", "G"]
nuclobases_complementary = ["T", "A", "G", "C"]
r = ""
for c in s:
if c in nuclobases:
index = nuclobases.index(c)
r += nuclobases_complementary[index]
return r
def dna_if(s):
r = ""
for c in s:
if c == "A":
r += "T"
elif c == "T":
r += "A"
elif c == "C":
r += "G"
elif c == "G":
r += "C"
else:
r += c
return r
def l33t(s):
normal_speach = "EeAaOo"
l33t_speach = "334400a"
for ns, ls in zip(normal_speach, l33t_speach):
s = s.replace(ns, ls)
return s
def count_letters(s):
count = 0
for c in s:
if c.islower() or c.isupper():
count += 1
return count
def only_capital(s):
r = ""
for c in s:
if c.isalpha():
if c.isupper():
r += c
def sum_of_letters_values(s):
sum_of_letters = 0
for c in s:
sum_of_letters += ord(c)
return sum_of_letters
def sort_tuples():
n = int(input())
elements = []
for i in range(n):
item, distance = input().split()
distance = float(distance)
elements.append((item, distance))
elements_sorted = sorted(elements, key=lambda tup: tup[1])
r = ""
for x, y in reversed(elements_sorted):
r += x + " "
print(r[:-1])
# you are provided with a and b coefficients and then for each calc you have to calculate y=a*x+b
# and print each y in separate line
def linear_function():
a, b = [int(i) for i in input().split()]
n = int(input())
for i in range(n):
x = int(input())
y = a * x + b
print(y)
# you are provided with n numbers and should print them from lowest to highest
def sort_numbers():
n = int(input())
l = []
for i in range(n):
x = int(input())
l.append(x)
l.sort()
r = ""
for v in reversed(l):
r += str(v) + " "
print(r[:-1])
# The program:
# Your program must output the N first numbers of the Fibonacci sequence.
# Each number of the Fibonacci sequence is calculated by adding up the two numbers that precede it in the sequence, except from the two first numbers, which are 0 and 1.
# Therefore, the third number is 1 (0 + 1 = 1), the fourth one 2 (1 + 1 = 2), the fifth one 3 (1 + 2 = 3), the sixth one 5 (2 + 3 = 5), and so on.
# The begin of the Fibonacci sequence is: 0, 1, 1, 2, 3, 5, 8, 13, 21, ...
def _f(n):
if n == 0:
return 0
elif n == 1:
return 1
else:
return _f(n-1) + _f(n-2)
def _fi(n):
a_n_2, a_n_1 = 0, 1
for i in range(0, n):
a_n_2, a_n_1 = a_n_1, a_n_2 + a_n_1
return a_n_2
def fibbonaci():
n = int(input())
r = ""
for i in range(0, n):
r += str(_fi(i)) + " "
print(str(n), file=sys.stderr)
print(r[:-1])
if __name__ == "__main__":
#a = [int(x) for x in input().split()]
#print(a[1])
s = input()
print(dna(s))
print(dna_if(s))
print(l33t(s))
print(count_letters(s))
sort_tuples()
linear_function()
sort_numbers()
fibbonaci()
| {
"repo_name": "Michal-Fularz/codingame_solutions",
"path": "codingame_solutions/Clash_of_Code/fastest.py",
"copies": "1",
"size": "3265",
"license": "mit",
"hash": 7433760439874466000,
"line_mean": 18.4345238095,
"line_max": 169,
"alpha_frac": 0.5071975498,
"autogenerated": false,
"ratio": 2.9844606946983547,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3991658244498355,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Amin'
import sys
import math
def prepare_answer(bit_type, count, flag_without_trailing_space=False):
answer = ""
if bit_type == 1:
answer += "0"
else:
answer += "00"
answer += " "
for i in xrange(0, count):
answer += "0"
if not flag_without_trailing_space:
answer += " "
return answer
MESSAGE = raw_input()
characters = list(MESSAGE)
bits = []
for char in characters:
# iterate over each bit
for i in reversed(xrange(0, 7)):
bit = (ord(char) >> i) & 0x01
bits.append(bit)
answer = ""
bit_type = 0
count = 0
for bit in bits:
if count == 0:
if bit == 1:
bit_type = 1
else:
bit_type = 0
count += 1
else:
if bit != bit_type:
# the sign has changed
answer += prepare_answer(bit_type, count)
bit_type = bit
count = 1
else:
count += 1
# add the last part (accumulated but not added to answer)
answer += prepare_answer(bit_type, count, flag_without_trailing_space=True)
# instead of using flag in function it is possible to just remove last character
# (space) like this
#answer = answer[:-1]
# Write an asction using print
# To debug: print >> sys.tderr, "Debug messages..."
print answer
| {
"repo_name": "Michal-Fularz/codingame_solutions",
"path": "codingame_solutions/easy/easy_Chuck_Norris.py",
"copies": "1",
"size": "1322",
"license": "mit",
"hash": 8394152877300634000,
"line_mean": 21.0333333333,
"line_max": 80,
"alpha_frac": 0.567322239,
"autogenerated": false,
"ratio": 3.5633423180592994,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4630664557059299,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Amin'
import sys
import math
N = int(raw_input()) # Number of elements which make up the association table.
Q = int(raw_input()) # Number Q of file names to be analyzed.
known_extensions = []
mime_types = []
for i in xrange(N):
# EXT: file extension
# MT: MIME type.
EXT, MT = raw_input().split()
known_extensions.append(EXT.lower())
mime_types.append(MT)
for i in xrange(Q):
FNAME = raw_input() # One file name per line.
index_of_last_dot = FNAME.rfind(".")
if index_of_last_dot != -1:
# filename is not important
extension = FNAME[index_of_last_dot+1:].lower()
else:
extension = ""
print >> sys.stderr, "Extension: " + extension
answer = "UNKNOWN"
if extension != "":
try:
index = known_extensions.index(extension)
print >> sys.stderr, "Index: " + str(index)
answer = mime_types[index]
except ValueError:
pass
# Write an action using print
# To debug: print >> sys.stderr, "Debug messages..."
# For each of the Q filenames, display on a line the corresponding MIME type.
# If there is no corresponding type, then display UNKNOWN.
print answer
| {
"repo_name": "Michal-Fularz/codingame_solutions",
"path": "codingame_solutions/easy/easy_MIME_Type.py",
"copies": "1",
"size": "1230",
"license": "mit",
"hash": 5308587271876881000,
"line_mean": 25.7391304348,
"line_max": 81,
"alpha_frac": 0.6048780488,
"autogenerated": false,
"ratio": 3.6936936936936937,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47985717424936936,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Amin'
import sys
import math
road = int(raw_input()) # the length of the road before the gap.
gap = int(raw_input()) # the length of the gap.
platform = int(raw_input()) # the length of the landing platform.
required_speed = gap + 1
# game loop
while 1:
speed = int(raw_input()) # the motorbike's speed.
coordX = int(raw_input()) # the position on the road of the motorbike.
action = ""
# decide if we are before or after the gap
if coordX < road:
if (coordX + speed) > road:
action = "JUMP"
else:
if speed < required_speed:
action = "SPEED"
elif speed > required_speed:
action = "SLOW"
else:
action = "WAIT"
else:
action = "SLOW"
print >> sys.stderr, "Choosen action: " + action
# Write an action using print
# To debug: print >> sys.stderr, "Debug messages..."
print action # A single line containing one of 4 keywords: SPEED, SLOW, JUMP, WAIT.
| {
"repo_name": "Michal-Fularz/codingame_solutions",
"path": "codingame_solutions/easy/easy_Skynet_the_Chasm.py",
"copies": "1",
"size": "1036",
"license": "mit",
"hash": -438199063306466750,
"line_mean": 25.5641025641,
"line_max": 87,
"alpha_frac": 0.5733590734,
"autogenerated": false,
"ratio": 3.547945205479452,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9618390525965699,
"avg_score": 0.0005827505827505828,
"num_lines": 39
} |
__author__ = 'Amin'
import sys
import math
surfaceN = int(raw_input()) # the number of points used to draw the surface of Mars.
for i in xrange(surfaceN):
# landX: X coordinate of a surface point. (0 to 6999)
# landY: Y coordinate of a surface point. By linking all the points together
# in a sequential fashion, you form the surface of Mars.
landX, landY = [int(j) for j in raw_input().split()]
# game loop
while 1:
# hSpeed: the horizontal speed (in m/s), can be negative.
# vSpeed: the vertical speed (in m/s), can be negative.
# fuel: the quantity of remaining fuel in liters.
# rotate: the rotation angle in degrees (-90 to 90).
# power: the thrust power (0 to 4).
X, Y, hSpeed, vSpeed, fuel, rotate, power = [int(i) for i in raw_input().split()]
# Write an action using print
# To debug: print >> sys.stderr, "Debug messages..."
rotate = 0
power = 0
if vSpeed < -39:
power = 4
else:
power = 0
# rotate power. rotate is the desired rotation angle. power is the desired thrust power.
print str(rotate) + " " + str(power)
| {
"repo_name": "Michal-Fularz/codingame_solutions",
"path": "codingame_solutions/easy/easy_Mars_Lander_Level_1.py",
"copies": "1",
"size": "1121",
"license": "mit",
"hash": -701498166344011600,
"line_mean": 31.0285714286,
"line_max": 92,
"alpha_frac": 0.6396074933,
"autogenerated": false,
"ratio": 3.4598765432098766,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9591161795663328,
"avg_score": 0.0016644481693096787,
"num_lines": 35
} |
__author__ = 'Amin'
import sys
class Morse:
def __init__(self):
self.morse_alphabet = []
self.morse_alphabet.append((".", "E"))
self.morse_alphabet.append(("..", "I"))
self.morse_alphabet.append((".-", "A"))
self.morse_alphabet.append(("...", "S"))
self.morse_alphabet.append(("..-", "U"))
self.morse_alphabet.append((".-.", "R"))
self.morse_alphabet.append((".--", "W"))
self.morse_alphabet.append(("....", "H"))
self.morse_alphabet.append(("...-", "V"))
self.morse_alphabet.append(("..-.", "F"))
self.morse_alphabet.append((".-..", "L"))
self.morse_alphabet.append((".--.", "P"))
self.morse_alphabet.append((".---", "J"))
self.morse_alphabet.append(("-", "T"))
self.morse_alphabet.append(("-.", "N"))
self.morse_alphabet.append(("--", "M"))
self.morse_alphabet.append(("-..", "D"))
self.morse_alphabet.append(("-.-", "K"))
self.morse_alphabet.append(("--.", "G"))
self.morse_alphabet.append(("---", "O"))
self.morse_alphabet.append(("-...", "B"))
self.morse_alphabet.append(("-..-", "X"))
self.morse_alphabet.append(("-.-.", "C"))
self.morse_alphabet.append(("-.--", "Y"))
self.morse_alphabet.append(("--..", "Z"))
self.morse_alphabet.append(("--.-", "Q"))
def convert_word_to_morse(self, word):
word_in_morse = ""
for letter in word:
sign_representation_of_letter = [x[0] for x in self.morse_alphabet if x[1] == letter]
word_in_morse += "".join(sign_representation_of_letter)
return word_in_morse
def find_longest_word(words):
longest_word_length = max([len(word) for word in words])
print("Longest word consist of: " + str(longest_word_length) + " signs", file=sys.stderr)
def load_from_file(filename):
morse = Morse()
words = []
words_in_morse = []
f = open(filename)
l = f.readline().replace("\n", "")
n = int(f.readline())
for i in range(n):
w = f.readline().replace("\n", "")
words.append(w)
words_in_morse.append(morse.convert_word_to_morse(w))
find_longest_word(words_in_morse)
return l, words, words_in_morse
def load_from_input():
morse = Morse()
words = []
words_in_morse = []
l = input()
n = int(input())
for i in range(n):
w = input()
words.append(w)
words_in_morse.append(morse.convert_word_to_morse(w))
find_longest_word(words_in_morse)
return l, words, words_in_morse
def load_from_prepared_data():
morse = Morse()
words = []
words_in_morse = []
l = "......-...-..---.-----.-..-..-.."
w = "EEEEE"
words.append(w)
words_in_morse.append(morse.convert_word_to_morse(w))
w = "HE"
words.append(w)
words_in_morse.append(morse.convert_word_to_morse(w))
w = "HELL"
words.append(w)
words_in_morse.append(morse.convert_word_to_morse(w))
w = "HELLO"
words.append(w)
words_in_morse.append(morse.convert_word_to_morse(w))
w = "LL"
words.append(w)
words_in_morse.append(morse.convert_word_to_morse(w))
w = "LLO"
words.append(w)
words_in_morse.append(morse.convert_word_to_morse(w))
w = "OWORLD"
words.append(w)
words_in_morse.append(morse.convert_word_to_morse(w))
w = "WORLD"
words.append(w)
words_in_morse.append(morse.convert_word_to_morse(w))
w = "TEST"
words.append(w)
words_in_morse.append(morse.convert_word_to_morse(w))
find_longest_word(words_in_morse)
return l, words, words_in_morse
| {
"repo_name": "Michal-Fularz/codingame_solutions",
"path": "codingame_solutions/very_hard/very_hard_The_Resistance_utils.py",
"copies": "1",
"size": "3657",
"license": "mit",
"hash": -827668587071724500,
"line_mean": 27.1307692308,
"line_max": 97,
"alpha_frac": 0.5537325677,
"autogenerated": false,
"ratio": 2.934991974317817,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8983638418206297,
"avg_score": 0.0010172247623039038,
"num_lines": 130
} |
__author__ = 'Amin'
input_header_filenames = ["header.h"]
input_source_filenames = ["source.cpp"]
output_filename = "code_in_game_file.cpp"
catchword_start = "abracadabra start"
catchword_stop = "abracadabra stop"
# add all the includes etc.
output_init_fragment = ""
output_init_fragment += "#include <iostream>\n"
output_init_fragment += "\n\n"
output_file = open(output_filename, "w")
output_file.write(output_init_fragment)
# add all the header files
for input_header_filename in input_header_filenames:
input_header_file = open(input_header_filename, "r")
for line in input_header_file:
if catchword_start in line:
pass
elif catchword_stop in line:
break
else:
output_file.write(line)
output_file.write("\n\n")
input_header_file.close()
# add all the source files
for input_source_filename in input_source_filenames:
input_source_file = open(input_source_filename, "r")
for line in input_source_file:
if catchword_start in line:
pass
elif catchword_stop in line:
break
else:
output_file.write(line)
output_file.write("\n\n")
input_source_file.close()
output_file.close()
| {
"repo_name": "Michal-Fularz/codingame_solutions",
"path": "codingame_solutions/utilities/concatenate_sources.py",
"copies": "1",
"size": "1241",
"license": "mit",
"hash": 6300911852426638000,
"line_mean": 23.82,
"line_max": 56,
"alpha_frac": 0.6494762288,
"autogenerated": false,
"ratio": 3.3722826086956523,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45217588374956524,
"avg_score": null,
"num_lines": null
} |
__author__ = "Amish Anand"
__copyright__ = "Copyright (c) 2015 Juniper Networks, Inc."
from setuptools import setup, find_packages
# parse requirements
req_lines = [line.strip() for line in open(
'requirements.txt').readlines()]
install_reqs = list(filter(None, req_lines))
setup(
name="snabb-junos",
namespace_packages=['jnpr'],
version="0.0.1",
author="Amish Anand",
author_email="jet-hackers@juniper.net",
description=("Junos Snabb Integration JET app"),
license="Apache 2.0",
package_dir={'': 'src'},
packages=find_packages('src'),
install_requires=install_reqs,
classifiers=[
'Development Status :: 1 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'Intended Audience :: Telecommunications Industry',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Other Scripting Engines',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Networking',
'Topic :: System :: Networking :: Firewalls',
'Topic :: Text Processing :: Markup :: XML'
],
)
| {
"repo_name": "amanand/vmx-docker-lwaftr",
"path": "jetapp/setup.py",
"copies": "1",
"size": "1540",
"license": "apache-2.0",
"hash": 9076671115743948000,
"line_mean": 36.5609756098,
"line_max": 79,
"alpha_frac": 0.6318181818,
"autogenerated": false,
"ratio": 4.242424242424242,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 41
} |
__author__ = "Amish Anand"
__copyright__ = "Copyright (c) 2015 Juniper Networks, Inc."
import subprocess
import signal
from common.mylogging import LOG
import os
from string import Template
from conf_globals import *
SNABB_PROCESS_SEARCH_STRING = 'snabbvmx-lwaftr-xe'
SNABB_INSTANCE_LAUNCH_TEMPLATE = Template(
'/usr/local/bin/snabb snabbvmx lwaftr --conf $cfg --id xe$id --pci $pci --mac $mac')
class ConfAction(object):
def start_snabb_instance(self, instance_id):
s = SNABB_INSTANCE_LAUNCH_TEMPLATE
config_file_name = SNABB_FILENAME + str(instance_id) + '.cfg'
pci_path = SNABB_PCI_PATH + str(instance_id)
mac_path = SNABB_MAC_PATH + str(instance_id)
# Read the files
mac_id = ''
pci_id = ''
try:
with open(pci_path) as f:
pci_id = f.read().strip().split('/')[0]
except Exception as e:
LOG.info('Failed to read the file %s due to exception: %s' %
(pci_path, e.message))
return False
try:
with open(mac_path) as f:
mac_id = f.read().strip()
except Exception as e:
LOG.info('Failed to read the file %s due to exception: %s' %
(mac_path, e.message))
return False
cmd = s.substitute(cfg=config_file_name, id=instance_id,
pci=pci_id, mac=mac_id)
# TODO launch the process if required
output = 0
try:
pid = subprocess.Popen(["sudo",cmd], stdout=subprocess.PIPE,stderr=subprocess.STDOUT).pid
LOG.info('Tried to restart the snabb instance id %s, returned %s' %
(str(instance_id), str(pid)))
except Exception as e:
LOG.info("Failed to start the snabb instance, exception %s" %e.message)
return output
def bindAction(self, binding_file):
# Compile the binding file
signal_sent = False
# Find the snabb instances and send sighup to all the instances
p = subprocess.Popen(['ps', '-axw'], stdout=subprocess.PIPE)
out, err = p.communicate()
snabb_search_string = SNABB_PROCESS_SEARCH_STRING
for lines in out.splitlines():
if snabb_search_string in lines:
pid = int(lines.split(None, 1)[0])
cmd = r"/usr/local/bin/snabb lwaftr control " + str(pid)+" reload"
try:
output = subprocess.check_output(cmd, shell=True)
LOG.info('Sent SIGHUP to instance %d' %pid)
except Exception as e:
LOG.info("Failed to send SIGHUP to instance %d" %pid)
signal_sent = True
LOG.info("Successfully sent SIGHUP to the snabb instance")
break
return signal_sent
def cfgAction(self, instance_id=None, restart_action=True):
# Find the specific snabb instance or all depending on instance_id argument
# Kill the relevant instances
signal_sent = False
if instance_id is not None and restart_action is False:
cfg_file_name = SNABB_FILENAME + str(instance_id) + '.cfg'
conf_file_name = SNABB_FILENAME + str(instance_id) + '.conf'
try:
os.remove(cfg_file_name)
os.remove(conf_file_name)
except OSError:
pass
LOG.info("Removed the file %s and %s as the instance %d was deleted" % (
cfg_file_name, conf_file_name, int(instance_id)))
p = subprocess.Popen(['ps', '-axw'], stdout=subprocess.PIPE)
out, err = p.communicate()
if instance_id is not None:
snabb_search_string = SNABB_PROCESS_SEARCH_STRING + \
str(instance_id)
else:
snabb_search_string = SNABB_PROCESS_SEARCH_STRING
for lines in out.splitlines():
if snabb_search_string in lines:
pid = int(lines.split(None, 1)[0])
os.kill(pid, signal.SIGTERM)
LOG.info("Successfully sent SIGTERM to the snabb instance %s" % str(
lines.split(None, 1)[1]))
signal_sent = True
return signal_sent
def deleteAction(self):
# Delete the cfg files
snabb_search_string = SNABB_PROCESS_SEARCH_STRING
for f in os.listdir('/tmp'):
if snabb_search_string in f:
LOG.info('Deleting the file %s' % str(f))
try:
os.remove(os.path.join('/tmp/', f))
except OSError:
pass
signal_sent = False
p = subprocess.Popen(['ps', '-axw'], stdout=subprocess.PIPE)
out, err = p.communicate()
for lines in out.splitlines():
if snabb_search_string in lines:
pid = int(lines.split(None, 1)[0])
os.kill(pid, signal.SIGTERM)
LOG.info("Successfully sent SIGTERM to the snabb instance %s" % str(
lines.split(None, 1)[1]))
signal_sent = True
return signal_sent
| {
"repo_name": "amanand/vmx-docker-lwaftr",
"path": "jetapp/src/conf/conf_action.py",
"copies": "1",
"size": "5051",
"license": "apache-2.0",
"hash": -2055800942879194600,
"line_mean": 38.4609375,
"line_max": 94,
"alpha_frac": 0.5668184518,
"autogenerated": false,
"ratio": 3.792042042042042,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9786954267905161,
"avg_score": 0.014381245187376237,
"num_lines": 128
} |
__author__ = "Amish Anand"
__copyright__ = "Copyright (c) 2017 Juniper Networks, Inc."
from mylogging import LOG
import conf.protos.mgd_service_pb2 as mgd_service_pb2
import conf.protos.openconfig_service_pb2 as openconfig_service_pb2
import common.app_globals
import json
from conf.conf_globals import *
class Sanity(object):
"""
Contains the sanity functions for the JET app
"""
def __init__(self, dev):
self._dev = dev
LOG.info("Sanity object initialized")
def YangModulePresent(self):
# Check if the YANG modules are present on the device
# Use the MGD api to get the YANG config
yang_config_present = True
stub = mgd_service_pb2.beta_create_ManagementRpcApi_stub(
self._dev.getChannel())
try:
request = mgd_service_pb2.ExecuteOpCommandRequest(
xml_command="<get-system-yang-packages></get-system-yang-packages>",
out_format=0, request_id=1000)
for response in stub.ExecuteOpCommand(request, common.app_globals.RPC_TIMEOUT_SECONDS):
LOG.info(
"Invoked the OpCommand to fetch yang packages, received response: %s" % response)
if "ietf-inet-types.yang" in response.data and "ietf-softwire.yang" in response.data:
yang_config_present = True
except Exception as e:
LOG.error(
"Failed to execute the MGD api due to exception: %s" % e.message)
return yang_config_present
def NotificationConfigPresent(self):
# Check if the commit notification config is present
try:
stub = openconfig_service_pb2.beta_create_OpenconfigRpcApi_stub(
self._dev.getChannel())
getRequest = openconfig_service_pb2.GetRequestList(operation_id="1001", operation=1,
path="/configuration/system/services/extension-service/notification")
request = openconfig_service_pb2.GetRequest(
request_id=1002, encoding=0, get_request=[getRequest])
response = stub.Get(
request, common.app_globals.RPC_TIMEOUT_SECONDS)
print response
for rsp in response.response:
if rsp.response_code == openconfig_service_pb2.OK and rsp.value != "":
LOG.info(
"Invoked the getRequest for notification configuration, response= %s" % rsp.message)
return True
LOG.info("Notification configuration is not present")
except Exception as e:
LOG.error(
"Failed to fetch notification configuration due to exception: %s" % e.message)
return False
def CommitNotificationConfig(self):
# Apply the commit notification config
jsonCfgValue = """ <configuration-json>
{
"configuration" : {
"system" : {
"services" : {
"extension-service" : {
"notification" : {
"max-connections" : "5",
"allow-clients" : {
"address" : ["0.0.0.0/0"]
}
}
}
}
}
}
}</configuration-json> """
try:
stub = openconfig_service_pb2.beta_create_OpenconfigRpcApi_stub(
self._dev.getChannel())
jsonCfgRequest = openconfig_service_pb2.SetRequest.ConfigOperationList(operation_id="jcfg", operation=0,
path="/", value=jsonCfgValue)
request = openconfig_service_pb2.SetRequest(
request_id=1000, encoding=1, config_operation=[jsonCfgRequest])
response = stub.Set(
request, common.app_globals.RPC_TIMEOUT_SECONDS)
LOG.info("Applied the notification config, response:%s" % response)
return True
except Exception as e:
LOG.error(
"Failed to set the notification config, execption: %s" % e.message)
return False
def StartSnabbifConfigPresent(self):
LOG.info("Checking if Snabb configuration is present in JUNOS")
stub = openconfig_service_pb2.beta_create_OpenconfigRpcApi_stub(
self._dev.getChannel())
get_request = openconfig_service_pb2.GetRequestList(operation_id="1001", operation=1,
path="/configuration/ietf-softwire:softwire-config")
request = openconfig_service_pb2.GetRequest(
request_id=1002, encoding=1, get_request=[get_request])
response = stub.Get(request, common.app_globals.RPC_TIMEOUT_SECONDS)
for rsp in response.response:
if rsp.response_code == openconfig_service_pb2.OK and rsp.value != "":
LOG.info("Invoked the getRequest for snabb configuration")
config_dict = json.loads(rsp.value)[
"ietf-softwire:softwire-config"]
LOG.debug("Snabb config is present in the VMX %s" %
(str(config_dict)))
dispQ.put(config_dict)
| {
"repo_name": "amanand/vmx-docker-lwaftr",
"path": "jetapp/src/common/sanity.py",
"copies": "1",
"size": "5423",
"license": "apache-2.0",
"hash": 6523316843270282000,
"line_mean": 44.1916666667,
"line_max": 132,
"alpha_frac": 0.5568873317,
"autogenerated": false,
"ratio": 4.500414937759336,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0013619000253575785,
"num_lines": 120
} |
__author__ = 'Amish'
from process_sentence_dataset import *
import time
import datetime
def pipeline_runner():
#Setup
movieSents = MovieSentences()
# This should create results for a set # of sentences
movieSents.register_baseline_results()
# Apply Semantic Tuning
#movieSents.apply_semantic_tuning_with_modals_on_baseline()
# Save the results to a file which we can compare
#movieSents.persist_results()
def write_analysis_file():
x = MovieSentences()
x.load_afinn_for_all_sents()
sentences = [w for w in x.all_sents_with_afinn if len(w['modals']) > -1]
print(len(sentences))
output_directory = "output"
if not os.path.exists(output_directory):
os.makedirs(output_directory)
output_file = os.path.join(output_directory, get_filename_timestamp())
with open(output_file, 'a') as the_file:
# Write the header
columns = ['SentID', 'Original_file', 'AFINN', 'Modals', '# of modals', 'Pos', 'Neg', 'Has Negation', 'Sentence']
the_file.write("\t".join(columns) + "\n")
for sentence in sentences:
data_object = []
data_object.append(str(sentence['sent_id']))
data_object.append(sentence['original_file'])
data_object.append(str(float("{0:.5f}".format(sentence['afinn']))))
data_object.append(str(sentence['modals']))
data_object.append(str(len(sentence['modals'])))
data_object.append(str(sentence['valence_details']['pos']))
data_object.append(str(sentence['valence_details']['neg']))
data_object.append(str(sentence['has_negation']))
data_object.append(sentence['sent'])
#print("\t".join(nice))
the_file.write("\t".join(data_object) + "\n")
os.system('start excel.exe "%s"' % (output_file))
def get_filename_timestamp(suffix=None):
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d-%H-%M-%S')
if suffix != None:
return suffix + st + ".txt"
else:
return st + ".txt" | {
"repo_name": "amishwins/PyUnit",
"path": "Charm/runner.py",
"copies": "1",
"size": "2084",
"license": "mit",
"hash": -4188403246305902000,
"line_mean": 33.1803278689,
"line_max": 121,
"alpha_frac": 0.6161228407,
"autogenerated": false,
"ratio": 3.490787269681742,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9577503086132204,
"avg_score": 0.005881404849907727,
"num_lines": 61
} |
__author__ = 'Amish'
from read_afin import *
from collections import Counter
# Sentence Sentiment (Announcement November 5, 2014)
class MovieSentences:
def __init__(self):
self.afinn = AFINNData()
self.neg_path = os.path.join('..', 'rt-polarity', 'rt-polaritydata', 'rt-polarity.neg')
self.pos_path = os.path.join('..', 'rt-polarity', 'rt-polaritydata', 'rt-polarity.pos')
self.upper_threshold = 1.5
self.lower_threshold = -1
# this isn't good - temporal coupling :(
self.utterance_index = 1
self.neg_sents = self.load_sents(self.neg_path)
self.pos_sents = self.load_sents(self.pos_path)
self.all_sents_with_afinn = []
# Baseline using AFINN
self.pos_sent_with_neg_afinn = [(w, self.afinn.get_sentiment_for_sentence(w)) for w in self.pos_sents
if self.afinn.get_sentiment_for_sentence(w) < 0]
self.neg_sent_with_pos_afinn = [(w, self.afinn.get_sentiment_for_sentence(w)) for w in self.neg_sents
if self.afinn.get_sentiment_for_sentence(w) > 0]
self.neutral_neg_sent_afinn = [(w, 'n') for w in self.neg_sents
if self.afinn.get_sentiment_for_sentence(w) == 0]
self.neutral_pos_sent_afinn = [(w, 'p') for w in self.pos_sents
if self.afinn.get_sentiment_for_sentence(w) == 0]
self.all_neutrals = self.neutral_neg_sent_afinn + self.neutral_pos_sent_afinn
self.neutrals_with_absolutes = self.get_result_with_absolute_score_and_modals()
self.original_baseline = []
# Made this a load function, because if called during initialization it's very slow for all unit tests
# Is this good design?
def load_afinn_for_all_sents(self):
if len(self.all_sents_with_afinn) == 0:
neg = self.get_afinn_for_sents('n')
pos = self.get_afinn_for_sents('p')
self.all_sents_with_afinn = neg + pos
else:
print("AFINN for all sents registered")
def save_data(self, object_to_save, filename):
output_directory = "output"
if not os.path.exists(output_directory):
os.makedirs(output_directory)
output_file = os.path.join(output_directory, filename + ".json")
with open(output_file, 'w') as js:
json.dump(object_to_save, js, indent=4, sort_keys=True)
def get_next_index(self):
result = self.utterance_index
self.utterance_index = self.utterance_index + 1
return result
def get_afinn_for_sents(self, original_file):
results = list()
if original_file == 'n':
list_to_use = self.neg_sents
elif original_file == 'p':
list_to_use = self.pos_sents
else:
raise NameError("Wrong file type:", original_file)
for sent in list_to_use:
result = dict()
result['sent_id'] = self.get_next_index()
result['sent'] = sent
result['original_file'] = original_file
result['afinn'] = self.afinn.get_sentiment_for_sentence(sent)
result['valence_details'] = self.afinn.get_valence_details(sent, store_not_found=False)
result['modals'] = self.get_modals(sent)
result['has_negation'] = self.has_negation(sent)
results.append(result)
return results
# Some helper methods
def get_data_in_range(self, threshold, positive=True):
data = [w['afinn'] for w in self.all_sents_with_afinn]
min_afinn = min(data)
max_afinn = max(data)
#print(min_afinn)
#print(max_afinn)
if positive:
data = [w for w in self.all_sents_with_afinn if w['afinn'] >= threshold]
else:
data = [w for w in self.all_sents_with_afinn if w['afinn'] <= threshold]
return data
def get_result_with_absolute_score_and_modals(self):
results = []
for neutral in self.all_neutrals:
result = dict()
result['sent'] = neutral[0]
result['original_file'] = neutral[1]
result['valence_details'] = self.afinn.get_valence_details(neutral[0])
result['modals'] = self.get_modals(neutral[0])
result['sentiment_evaluation'] = ""
results.append(result)
return results
def get_modals(self, sentence):
result = {}
modals = ["may", "can", "must", "ought", "will", "shall", "need",
"dare", "might", "could", "would", "should"]
negatives = ["'t", "not"]
for modal in modals:
count = len([w for w in list(filter(None, re.split(r"\W+", sentence.lower()))) if w == modal])
if count > 0:
result[modal] = count
return result
def has_negation(self, sent):
negators = ['not', "can't", "didn't", "wouldn't", "shouldn't", "couldn't", 'but', 'cannot']
for negator in negators:
if self.find_whole_word(negator)(sent):
return True
return False
def find_whole_word(self, word):
return re.compile(r'\b({0})\b'.format(word), flags=re.IGNORECASE).search
def get_linguistic_fun(self, baseline):
result = {}
result['wordlist'] = []
for sent in [s[0] for s in baseline]:
for word in sent.split():
result['wordlist'].append(word)
result['text'] = nltk.Text(result['wordlist'])
result['fd'] = nltk.FreqDist(result['text'])
return result
def get_sents_from_all_neutrals_with_words(self, words):
return [w for w in self.all_neutrals if words in w[0]]
def summarize_baseline(self):
print("False positives %4s %6.2f" %
(len(self.pos_sent_with_neg_afinn), (len(self.pos_sent_with_neg_afinn) / len(self.pos_sents)) * 100))
print("False negatives %4s %6.2f" %
(len(self.neg_sent_with_pos_afinn), (len(self.neg_sent_with_pos_afinn) / len(self.neg_sents)) * 100))
def load_sents(self, the_path):
with open(the_path, 'r') as f:
lines = re.split(r'\n', f.read())
return lines
# problem with utf-8 :( boo
# with codecs.open(the_path, 'r', encoding='utf8') as f:
# lines = re.split(r'\n', f.read())
# return lines
def get_positive_sents_with_negative_afinn(self, rating):
low = [w for w in self.pos_sent_with_neg_afinn if w[1] < rating]
return low
def display_list_nicely(self, sents_and_afinn):
for entry in sents_and_afinn:
print("%5.2f %s" % (entry[1], entry[0]))
class MovieSentencesTests(unittest.TestCase):
def setUp(self):
self.cut = MovieSentences()
def test_files_have_good_endings(self):
self.assertTrue(self.cut.neg_path.endswith('.neg'))
self.assertTrue(self.cut.pos_path.endswith('.pos'))
def test_files_have_good_length(self):
self.assertEqual(len(self.cut.neg_sents), 5332)
self.assertEqual(len(self.cut.pos_sents), 5332)
def test_baseline_values(self):
self.assertEqual(len(self.cut.pos_sent_with_neg_afinn), 812)
self.assertEqual(len(self.cut.neg_sent_with_pos_afinn), 1968)
self.assertEqual(len(self.cut.neutral_neg_sent_afinn), 1530)
self.assertEqual(len(self.cut.neutral_pos_sent_afinn), 1307)
self.assertEqual(len(self.cut.all_neutrals), 2837)
def test_positive_sent_low_neg(self):
self.assertEqual(len(self.cut.get_positive_sents_with_negative_afinn(-2)), 5)
self.assertEqual(len(self.cut.get_positive_sents_with_negative_afinn(-1)), 88)
self.assertEqual(len(self.cut.get_positive_sents_with_negative_afinn(-0.5)), 364)
self.assertEqual(len(self.cut.get_positive_sents_with_negative_afinn(0)), 812)
def test_freq_dist_is_not_null(self):
self.assertIsNotNone(self.cut.get_linguistic_fun(self.cut.neutral_neg_sent_afinn))
self.assertIsNotNone(self.cut.get_linguistic_fun(self.cut.neutral_neg_sent_afinn)['fd'])
def test_get_absolutes_and_modals(self):
self.assertIsNotNone(self.cut.neutrals_with_absolutes)
self.assertEqual(len(self.cut.neutrals_with_absolutes), 2837)
self.assertEqual(len([r for r in self.cut.neutrals_with_absolutes if r['valence_details']['absolute_total'] == 0]), 2354)
# Sentences which have an absolute_valence of not 0 (meaning even though the sum was 0, some words had AFINN values
non_absolute_zero_sents = [r for r in self.cut.neutrals_with_absolutes if r['valence_details']['absolute_total'] != 0]
self.assertEqual(len(non_absolute_zero_sents), 483)
for sent in non_absolute_zero_sents:
if len(sent['modals'].keys()) > 0:
toprint = []
toprint.append(sent['modals'])
toprint.append(sent['valence_details']['pos_total'])
toprint.append(sent['valence_details']['neg_total'])
toprint.append(sent['valence_details']['absolute_total'])
toprint.append(sent['sent'])
#print(toprint)
def test_get_modals(self):
modals = self.cut.get_modals("would would could could can must will")
self.assertEqual(modals['would'], 2)
#print(modals.keys())
def test_metrics_for_all_sents(self):
self.cut.load_afinn_for_all_sents()
all_sents = [w['sent'] for w in self.cut.all_sents_with_afinn]
all_words = re.split(r"\W+", " ".join(all_sents))
print("All words:", len(all_words))
print("Unique words: ", len(set(all_words)))
def test_extremes(self):
self.cut.load_afinn_for_all_sents()
data = self.cut.get_data_in_range(1.5, True)
from_neg = [w for w in data if w['original_file'] == 'n']
with_modals = [w for w in from_neg if len(w['modals']) > 0]
#print(len(data), len(from_neg), len(with_modals))
#for entry in with_modals:
# print(entry)
data = self.cut.get_data_in_range(-1, False)
from_pos = [w for w in data if w['original_file'] == 'p']
with_modals = [w for w in from_pos if len(w['modals']) > 0]
#print(len(data), len(from_pos), len(with_modals))
#for entry in with_modals:
# print(entry)
def test_has_negation(self):
sent = "I didn't love him"
self.assertEqual(self.cut.has_negation(sent), True)
sent = "It was great, but I hated it."
self.assertEqual(self.cut.has_negation(sent), True)
sent = "I did not enjoy it"
self.assertEqual(self.cut.has_negation(sent), True)
sent = "Wouldn't"
self.assertEqual(self.cut.has_negation(sent), True)
sent = "I loved Benjamin Button"
self.assertEqual(self.cut.has_negation(sent), False)
sent = "I would've enjoyed it"
self.assertEqual(self.cut.has_negation(sent), False)
def test_modals(self):
# res = self.cut.get_linguistic_fun(self.cut.all_neutrals)
#may, can, must, ought, will, shall, need, dare, might, could, would, and should
self.display_modal_count("may")
self.display_modal_count("may not")
self.display_modal_count("can")
self.display_modal_count("cannot")
self.display_modal_count("can't")
self.display_modal_count("must")
#self.display_modal_count("must not")
self.display_modal_count("ought")
#self.display_modal_count("ought not")
self.display_modal_count("will")
self.display_modal_count("will not")
#self.display_modal_count("won't")
self.display_modal_count("shall")
#self.display_modal_count("shall not")
self.display_modal_count("need")
#self.display_modal_count("need not")
self.display_modal_count("dare")
#self.display_modal_count("dare not")
self.display_modal_count("might")
self.display_modal_count("might not")
self.display_modal_count("could")
#self.display_modal_count("could not")
self.display_modal_count("couldn't")
self.display_modal_count("would")
#self.display_modal_count("would not")
self.display_modal_count("wouldn't")
self.display_modal_count("should")
#self.display_modal_count("should not")
self.display_modal_count("shouldn't")
def display_modal_count(self, modal):
result = self.cut.get_sents_from_all_neutrals_with_words(modal)
#print("%10s %3s %3s %3s" % (modal, len(result), len([w for w in result if w[1] == 'n']), len([w for w in result if w[1] == 'p'])))
if __name__ == "__main__":
unittest.main()
| {
"repo_name": "amishwins/PyUnit",
"path": "Charm/process_sentence_dataset.py",
"copies": "1",
"size": "12830",
"license": "mit",
"hash": -7997528842089598000,
"line_mean": 38.8447204969,
"line_max": 139,
"alpha_frac": 0.5943881528,
"autogenerated": false,
"ratio": 3.2075,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9287547946860706,
"avg_score": 0.0028680411878588725,
"num_lines": 322
} |
__author__ = 'amka'
__created__ = '16.12.12'
class Condition(object):
"""
Store forecast weather data for one day.
"""
def __init__(self, observation_time=None, temp_C=None, temp_F=None,
weatherCode=None, weatherIconUrl=None, weatherDesc=None,
windspeedMiles=None, windspeedKmph=None, winddirDegree=None, winddir16Point=None,
precipMM=None, humidity=None, visibility=None, pressure=None, cloudcover=None):
self.observation_time = observation_time
self.temp_C = temp_C
self.temp_F = temp_F
self.weatherCode = weatherCode
self.weatherIconUrl = weatherIconUrl
self.weatherDesc = weatherDesc
self.windspeedMiles = windspeedMiles
self.windspeedKmph = windspeedKmph
self.winddirDegree = winddirDegree
self.winddir16Point = winddir16Point
self.precipMM = precipMM
self.humidity = humidity
self.visibility = visibility
self.pressure = pressure
self.cloudcover = cloudcover
def __repr__(self):
return "<Condition({0}, {1})>".format(self.observation_time, self.temp_C) | {
"repo_name": "amka/wwolib",
"path": "condition.py",
"copies": "1",
"size": "1161",
"license": "mit",
"hash": 6975775635603916000,
"line_mean": 36.4838709677,
"line_max": 98,
"alpha_frac": 0.6425495263,
"autogenerated": false,
"ratio": 3.70926517571885,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9844540242909783,
"avg_score": 0.0014548918218134491,
"num_lines": 31
} |
from neurokernel.core import Manager
from neurokernel.LPU.LPU import LPU
from neurokernel.tools.comm import get_random_port
import neurokernel.base as base
from neurokernel.pattern import Pattern
def tracefunc(frame, event, arg, indent=[0]):
if event == "call":
indent[0] += 2
print "-" * indent[0] + "> call function", frame.f_code.co_name
elif event == "return":
print "<" + "-" * indent[0], "exit function", frame.f_code.co_name
indent[0] -= 2
return tracefunc
import sys
#sys.settrace(tracefunc)
logger = base.setup_logger(file_name='neurokernel.log', screen=False)
dt = 1e-4
dur = 1.0
Nt = int(dur/dt)
#logger = base.setup_logger(file_name='neurokernel.log', screen=False)
port_data = get_random_port()
port_ctrl = get_random_port()
port_time = get_random_port()
(n_dict, s_dict) = LPU.lpu_parser('./data/simple_lpu_0.gexf.gz')
lpu_0 = LPU(dt, n_dict, s_dict, input_file='./data/simple_input.h5', output_file='simple_output_0.h5', port_ctrl=port_ctrl, port_data=port_data, port_time=port_time, device=0, id='lpu_0', debug=False)
(n_dict, s_dict) = LPU.lpu_parser('./data/simple_lpu_1.gexf.gz')
lpu_1 = LPU(dt, n_dict, s_dict, input_file=None, output_file='simple_output_1.h5', port_ctrl=port_ctrl, port_data=port_data, port_time=port_time, device=1, id='lpu_1', debug=False)
#____________________________________________________________
out_ports_gpot_0 = '/lpu_0/out/gpot/0'
in_ports_gpot_0 = '/lpu_1/in/gpot/0'
pat = Pattern(out_ports_gpot_0, in_ports_gpot_0)
pat.interface[out_ports_gpot_0] = [0, 'out', 'gpot']
pat.interface[in_ports_gpot_0] = [1, 'in', 'gpot']
pat[out_ports_gpot_0, in_ports_gpot_0] = 1
#_________________________________________________
man = Manager(port_data, port_ctrl, port_time)
man.add_brok()
man.add_mod(lpu_0)
man.add_mod(lpu_1)
man.connect(lpu_0, lpu_1, pat, 0, 1)
man.start(steps=Nt)
man.stop()
| {
"repo_name": "cerrno/neurokernel",
"path": "examples/testLPU_ports/testLPU_exec.py",
"copies": "1",
"size": "1977",
"license": "bsd-3-clause",
"hash": -7533036324109362000,
"line_mean": 28.5074626866,
"line_max": 200,
"alpha_frac": 0.6358118361,
"autogenerated": false,
"ratio": 2.5411311053984575,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8569716581319231,
"avg_score": 0.021445272035845453,
"num_lines": 67
} |
from neurokernel.core import Manager
from neurokernel.LPU.LPU import LPU
from neurokernel.tools.comm import get_random_port
import neurokernel.base as base
from neurokernel.realtime_interface import io_interface
from neurokernel.pattern import Pattern
def tracefunc(frame, event, arg, indent=[0]):
if event == "call":
indent[0] += 2
print "-" * indent[0] + "> call function", frame.f_code.co_name
elif event == "return":
print "<" + "-" * indent[0], "exit function", frame.f_code.co_name
indent[0] -= 2
return tracefunc
import sys
#sys.settrace(tracefunc)
logger = base.setup_logger(file_name='neurokernel.log', screen=False)
dt = 1e-4
dur = 1.0
Nt = int(dur/dt)
#logger = base.setup_logger(file_name='neurokernel.log', screen=False)
port_data = get_random_port()
port_ctrl = get_random_port()
port_time = get_random_port()
#init the realtime interface
num_ports = 1
id = 'interface_0'
interface = io_interface(num_ports, id, 0, port_data, port_ctrl, port_time)
print interface.cached_data
(n_dict, s_dict) = LPU.lpu_parser('./data/simple_lpu_1.gexf.gz')
lpu_1 = LPU(dt, n_dict, s_dict, input_file=None, output_file='simple_output_1.h5', port_ctrl=port_ctrl, port_data=port_data, port_time=port_time, device=1, id='lpu_1', debug=False)
#____________________________________________________________
out_ports_gpot_0 = '/' + id + '/out/gpot/0'
in_ports_gpot_0 = '/lpu_1/in/gpot/0'
pat = Pattern(out_ports_gpot_0, in_ports_gpot_0)
pat.interface[out_ports_gpot_0] = [0, 'out', 'gpot']
pat.interface[in_ports_gpot_0] = [1, 'in', 'gpot']
pat[out_ports_gpot_0, in_ports_gpot_0] = 1
#_________________________________________________
man = Manager(port_data, port_ctrl, port_time)
man.add_brok()
man.add_mod(interface)
man.add_mod(lpu_1)
man.connect(interface, lpu_1, pat, 0, 1)
man.start(steps=Nt)
man.stop()
| {
"repo_name": "cerrno/neurokernel",
"path": "examples/testLPU_io/testLPU_exec.py",
"copies": "1",
"size": "1950",
"license": "bsd-3-clause",
"hash": 5469243456748894000,
"line_mean": 25.3513513514,
"line_max": 180,
"alpha_frac": 0.6435897436,
"autogenerated": false,
"ratio": 2.6859504132231407,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8728961570142975,
"avg_score": 0.020115717336033078,
"num_lines": 74
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.