text stringlengths 0 1.05M | meta dict |
|---|---|
__author__ = 'anthonyfullam'
import sys
import argparse
import pandas as pd
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=('''\
Takes ASCAT output file and modifies it to add an event descriptor
'''))
parser.add_argument('-f', '--ASCATfile', help='ASCAT file', metavar='')
args = parser.parse_args()
if len(sys.argv) == 1:
parser.print_help()
sys.exit('\n \n Need to provide an ASCAT file! \n \n')
file_name = args.ASCATfile
output_file = args.ASCATfile.split('.csv')[0] + '_events.csv'
cytobands = pd.read_csv('cytoBand.txt', delimiter="\t",
names=['Chromosome',
'Start_band',
'End_band',
'Band_name',
'Stain'])
ascatfile = pd.read_csv(file_name,
names=['segment_number',
'Chromosome',
'Start_CNregion',
'End_CNregion',
'Norm_total_CN',
'Norm_minor_allele_CN',
'Tumour_total_CN',
'Tumour_minor_allele_CN'])
ascatfile['event'] = ""
def definewindow(workingframe):
lregion = workingframe[(workingframe.Start_band > start_cn_change)]
if len(lregion) == 0:
lindex = workingframe.index[-1]
else:
lindex = lregion.index[0] - 1
uregion = workingframe[(workingframe.End_band < end_cn_change)]
if len(uregion) == 0:
uindex = workingframe.index[0]
else:
uindex = uregion.index[-1] + 1
return lindex, uindex
for index, row in ascatfile.iterrows():
if row.Norm_total_CN == row.Tumour_total_CN and row.Norm_minor_allele_CN == row.Tumour_minor_allele_CN:
cnevent = ""
elif row.Norm_total_CN < row.Tumour_total_CN:
cnevent = 'gain'
elif row.Norm_total_CN > row.Tumour_total_CN:
cnevent = 'del'
else:
cnevent = 'change'
start_cn_change, end_cn_change = row.Start_CNregion, row.End_CNregion
one_chromosome_frame = cytobands[cytobands.Chromosome == row['Chromosome']]
start_index, end_index = definewindow(one_chromosome_frame)
start_band = one_chromosome_frame.ix[start_index]['Band_name']
end_band = one_chromosome_frame.ix[end_index]['Band_name']
if cnevent == "":
ascatfile.loc[index,'event'] = ""
else:
if start_band == end_band:
ascatfile.loc[index,'event'] = '{0}({1})'.format(cnevent,start_band)
else:
ascatfile.loc[index,'event'] = '{0}({1}-{2})'.format(cnevent,start_band,end_band)
ascatfile.to_csv(output_file)
| {
"repo_name": "fullama/Tools",
"path": "add_cytoband_to_ascat.py",
"copies": "1",
"size": "2776",
"license": "mit",
"hash": -159375459585933120,
"line_mean": 32.8536585366,
"line_max": 107,
"alpha_frac": 0.5576368876,
"autogenerated": false,
"ratio": 3.4271604938271607,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.448479738142716,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Anthony Mansour'
import urllib.request
import smtplib
import getpass
from time import asctime, sleep
def find_between(s, first, last): # Returns substring with in 's' using 'first' and 'last' as string bounds
try:
start = s.index(first) + len(first)
end = s.index(last, start)
return s[start:end]
except ValueError:
return ""
print('script started @ ' + asctime())
url = input('Enter Kijiji url (default: http://www.kijiji.ca/b-art-collectibles/banff-canmore/c12l1700234): ' + '\n')
if len(url) > 23: # Checks if url is from kijiji.ca
if url[:21] != "http://www.kijiji.ca/":
url = 'http://www.kijiji.ca/b-art-collectibles/banff-canmore/c12l1700234'
else:
url = 'http://www.kijiji.ca/b-art-collectibles/banff-canmore/c12l1700234'
print("Current url: " + url + '\n')
req = urllib.request.Request(url)
sender = input('Enter sender email (Currently @gmail.com only): ' + '\n')
password = getpass.getpass()
receiver = input('\n' + 'Enter phone number (Currently Telus/Koodo Only): ' + '\n') + '@msg.telus.com'
ad_start = '<table class=" regular-ad js-hover "'
image_start = '<img src="'
image_end = '"'
link_start = '<a href="'
link_end = '"'
title_start = '-flag" >'
title_end = '</a>'
description_start = '<p>'
description_end = '</p>'
price_start = 'price">'
price_end = '</td>'
sub_bottom = '<div id="AdsenseBottom"'
server = smtplib.SMTP("smtp.gmail.com:587")
loop = True
open('ads.txt', 'a+').close()
while loop:
file_string = ''
with urllib.request.urlopen(req) as response:
page = response.read().decode(response.headers.get_content_charset())
front_page_ads = page.count(ad_start)
page_sub = page.replace('\n', '').replace('\r', '')
response.close()
page_sub = find_between(page_sub, ad_start, sub_bottom)
ads = []
for x in range(0, front_page_ads):
ads.append([])
ads[x].append(find_between(page_sub, title_start, title_end).strip(' \t\n\r'))
ads[x].append('http://www.kijiji.ca' + find_between(page_sub, link_start, link_end).strip(' \t\n\r'))
ads[x].append(find_between(page_sub, description_start, description_end).strip(' \t\n\r'))
ads[x].append(find_between(page_sub, image_start, image_end).strip(' \t\n\r'))
ads[x].append(find_between(page_sub, price_start, price_end).strip(' \t\n\r'))
page_sub = page_sub[page_sub.index(price_end, page_sub.index(price_start) + len(price_start)) + 4:]
for i in range(0, front_page_ads):
for j in range(0, 4):
file_string += ads[i][j] + ' , '
file_string += '\n'
f = open('ads.txt', 'r+')
if f.read() != file_string.strip():
f.seek(0)
f.truncate()
f.write(file_string.strip())
msg = "\r\n".join([
" ",
ads[0][0],
ads[0][1],
ads[0][2],
ads[0][3],
ads[0][4]
])
try:
server.connect('smtp.gmail.com', '587')
server.ehlo()
server.starttls()
server.login(sender, password)
server.sendmail(sender, receiver, msg)
server.quit()
print('Update sent @ ' + asctime())
except:
print('ERROR, FAILED TO SEND SMTP! @ ' + asctime())
loop = False
f.close()
sleep(5)
print('Exited')
| {
"repo_name": "Tontiwisk/kijiji_alert",
"path": "kijiji_alert.py",
"copies": "1",
"size": "3379",
"license": "mit",
"hash": 5635426562814232000,
"line_mean": 32.1274509804,
"line_max": 117,
"alpha_frac": 0.5794613791,
"autogenerated": false,
"ratio": 3.0062277580071175,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4085689137107117,
"avg_score": null,
"num_lines": null
} |
__author__ = 'anthony <>'
from collections import OrderedDict
from django import forms
class FormOrderMixin(object):
def order_fields(self, field_order):
"""
Rearranges the fields according to field_order.
field_order is a list of field names specifying the order. Fields not
included in the list are appended in the default order for backward
compatibility with subclasses not overriding field_order. If field_order
is None, all fields are kept in the order defined in the class.
Unknown fields in field_order are ignored to allow disabling fields in
form subclasses without redefining ordering.
"""
if field_order is None:
return
fields = OrderedDict()
for key in field_order:
try:
fields[key] = self.fields.pop(key)
except KeyError: # ignore unknown fields
pass
fields.update(self.fields) # add remaining fields in original order
self.fields = fields
def get_form_field_no_validation(fieldname):
class FieldNoValidation(fieldname):
def clean(self, value):
return value
return FieldNoValidation
class Icons(object):
icons = {}
| {
"repo_name": "unicefuganda/uSurvey",
"path": "survey/forms/form_helper.py",
"copies": "1",
"size": "1258",
"license": "bsd-3-clause",
"hash": -2947720120880769000,
"line_mean": 28.9523809524,
"line_max": 80,
"alpha_frac": 0.6470588235,
"autogenerated": false,
"ratio": 4.7293233082706765,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5876382131770677,
"avg_score": null,
"num_lines": null
} |
__author__ = 'anthony'
from django.conf import settings
from django.core.exceptions import ValidationError
from django.utils.safestring import mark_safe
from django import forms
from form_helper import FormOrderMixin, get_form_field_no_validation
from survey.models import (
ListingSample,
Answer,
Interview,
VideoAnswer,
AudioAnswer,
ImageAnswer,
TextAnswer,
NumericalAnswer,
MultiChoiceAnswer,
MultiSelectAnswer,
DateAnswer,
SurveyAllocation,
EnumerationArea,
Survey,
QuestionSet,
Interviewer,
InterviewerAccess,
USSDAccess,
QuestionOption,
GeopointAnswer)
class USSDSerializable(object):
def render_prepend_ussd(self):
if 'value' in self.fields:
return '%s ' % self.fields['value'].label
return ''
def render_extra_ussd(self):
"""Basically used by implementing classes\
to render ussd versions of their forms
:return:
"""
pass
def render_extra_ussd_html(self):
"""Basically used by implementing classes to render
\ussd Preview versions of their forms on HTML
:return:
"""
pass
def text_error(self):
if self.errors:
return self.errors['value'][0]
def get_answer_form(interview, access=None):
question = interview.last_question
if not question:
interview.last_question = interview.question_set.to_exact.g_first_question
question = interview.last_question
answer_class = Answer.get_class(question.answer_type)
if access is None:
access = InterviewerAccess.get(id=interview.interview_channel.id)
else:
access = access
class AnswerForm(forms.ModelForm, USSDSerializable):
class Meta:
model = answer_class
fields = ['value']
def __init__(self, *args, **kwargs):
super(AnswerForm, self).__init__(*args, **kwargs)
self.question = question
# self.fields['uid'] = forms.CharField(initial=access.user_identifier, widget=forms.HiddenInput)
if question.answer_type == DateAnswer.choice_name():
self.fields['value'] = forms.DateField(
label='Answer',
input_formats=[
settings.DATE_FORMAT,
],
widget=forms.DateInput(
attrs={
'placeholder': 'Answer',
'class': 'datepicker'},
format=settings.DATE_FORMAT))
if question.answer_type == GeopointAnswer.choice_name():
model_field = get_form_field_no_validation(forms.CharField)
self.fields['value'] = model_field(label='Answer', widget=forms.TextInput(
attrs={'placeholder': 'Lat[space]Long[space]Altitude[space]Precision'}))
if question.answer_type == MultiChoiceAnswer.choice_name():
self.fields['value'] = forms.ChoiceField(choices=[(opt.order, opt.text) for opt
in question.options.all()], widget=forms.RadioSelect)
self.fields['value'].empty_label = None
if access.choice_name() == USSDAccess.choice_name():
self.fields['value'].widget = forms.NumberInput()
if question.answer_type == MultiSelectAnswer.choice_name():
self.fields['value'] = forms.ModelMultipleChoiceField(
queryset=question.options.all(), widget=forms.CheckboxSelectMultiple)
accept_types = {AudioAnswer.choice_name(): 'audio/*',
VideoAnswer.choice_name(): 'video/*',
ImageAnswer.choice_name(): 'image/*'
}
if question.answer_type in [
AudioAnswer.choice_name(),
VideoAnswer.choice_name(),
ImageAnswer.choice_name()]:
self.fields['value'].widget.attrs = {
'accept': accept_types.get(
question.answer_type, '|'.join(
accept_types.values()))}
if access.choice_name() == USSDAccess.choice_name():
self.fields['value'].label = ''
else:
self.fields['value'].label = 'Answer'
def full_clean(self):
try:
return super(AnswerForm, self).full_clean()
except ValueError:
if question.answer_type == GeopointAnswer.choice_name():
self.cleaned_data['value'] = self.data['value']
else:
raise
def render_extra_ussd(self):
text = []
if question.options.count() > 0:
map(lambda opt: text.append('%s: %s' %
(opt.order, opt.text)), question.options.all())
# elif hasattr(interview.last_question, 'loop_started'):
# text.append('%s: %s' %
# (question.text, self.initial.get('value', 1)))
# # text.append('Enter any key to continue')
return mark_safe('\n'.join(text))
def render_extra_ussd_html(self):
text = []
if question.options.count() > 0:
map(lambda opt: text.append('%s: %s' %
(opt.order, opt.text)), question.options.all())
# elif hasattr(interview.last_question, 'loop_started'):
# text.append('%s: %s' %
# (question.text, self.initial.get('value', 1)))
return mark_safe('<br />'.join(text))
def clean_value(self):
if question.answer_type == MultiChoiceAnswer.choice_name():
# try:
self.cleaned_data['value'] = question.options.get(order=self.data['value'])
# except QuestionOption.DoesNotExist:
# raise ValidationError('Please select a valid option')
if question.answer_type == GeopointAnswer.choice_name():
float_entries = self.data['value'].split(' ')
valid = False
try:
map(lambda entry: float(entry), float_entries)
if len(float_entries) == 4:
valid = True
except BaseException:
pass
if not valid:
raise ValidationError(
'Please enter in format: lat[space]long[space]altitude[space]precision')
# validate the response if the last question has validation
if interview.last_question and interview.last_question.response_validation:
response_validation = interview.last_question.response_validation
if response_validation.validate(self.cleaned_data['value'], interview.last_question) is False:
raise ValidationError(response_validation.dconstraint_message)
return self.cleaned_data['value']
def save(self, *args, **kwargs):
return answer_class.create(interview, question, self.cleaned_data['value'])
return AnswerForm
class BaseSelectInterview(forms.ModelForm):
def __init__(self, request, access, *args, **kwargs):
super(BaseSelectInterview, self).__init__(*args, **kwargs)
self.access = access
if 'data' in kwargs:
kwargs['data']._mutable = True
kwargs['data']['uid'] = access.user_identifier
kwargs['data']._mutable = False
if request.user.is_authenticated():
self.user = request.user
else:
self.user = None
self.interviewer = access.interviewer
self.fields['uid'] = forms.CharField(
initial=access.user_identifier,
widget=forms.HiddenInput)
class Meta:
model = Interview
fields = []
def save(self, commit=True):
if self.user:
instance = super(BaseSelectInterview, self).save(commit=False)
instance.uploaded_by = self.user
if commit:
instance.save()
return instance
else:
return super(BaseSelectInterview, self).save(commit=commit)
class AddMoreLoopForm(BaseSelectInterview, USSDSerializable):
"""Just looks like answer form. But used to confirm whether to continue loop for user selected loop scenarios
"""
ADD_MORE = 1
DO_NOT_ADD = 2
CHOICES = [(ADD_MORE, 'Yes'), (DO_NOT_ADD, 'No')]
DEFAULT_LOOP_PROMPT = 'Do you want to add another Loop?'
def __init__(self, request, access, *args, **kwargs):
super(AddMoreLoopForm, self).__init__(request, access, *args, **kwargs)
self.fields['value'] = forms.ChoiceField(choices=self.CHOICES, widget=forms.RadioSelect)
if self.access.choice_name() == USSDAccess.choice_name():
self.fields['value'].widget = forms.NumberInput()
self.fields['value'].label = ''
else:
self.fields['value'].label = 'Answer'
def render_extra_ussd(self):
text = []
map(lambda choice: text.append('%s: %s' % choice), self.CHOICES)
return mark_safe('\n'.join(text))
def render_extra_ussd_html(self):
text = []
map(lambda choice: text.append('%s: %s' % choice), self.CHOICES)
return mark_safe('<br />'.join(text))
class Meta:
model = Interview
fields = []
class UserAccessForm(forms.Form):
uid = forms.CharField(label='Mobile/ODK ID', max_length=25)
def text_error(self):
if self.errors:
return self.errors['uid'][0]
def clean_uid(self):
try:
access = InterviewerAccess.get(
user_identifier=self.cleaned_data['uid'])
except InterviewerAccess.DoesNotExist:
raise ValidationError('No such interviewer')
if access:
if access.interviewer.is_blocked:
raise ValidationError('This interviewer have been blocked')
return access
class UssdTimeoutForm(forms.Form):
use_timeout = forms.ChoiceField(
widget=forms.RadioSelect, choices=[
(1, 'Use Timeout'), (2, 'No Timeout')], initial=2, label='')
class SurveyAllocationForm(
BaseSelectInterview,
FormOrderMixin,
USSDSerializable):
def __init__(self, request, access, *args, **kwargs):
super(SurveyAllocationForm, self).__init__(request, access, *args, **kwargs)
self.CHOICES = [(idx + 1, sa.allocation_ea.name) for idx, sa
in enumerate(self.interviewer.unfinished_assignments.order_by('allocation_ea__name'))]
self.fields['value'] = forms.ChoiceField(choices=self.CHOICES,widget=forms.RadioSelect)
if self.access.choice_name() == USSDAccess.choice_name():
self.fields['value'].widget = forms.NumberInput()
self.fields['value'].label = 'Select EA'
self.order_fields(['value', 'test_data'])
def render_extra_ussd(self):
text = []
map(lambda choice: text.append('%s: %s' % choice), self.CHOICES)
return mark_safe('\n'.join(text))
def render_extra_ussd_html(self):
text = []
map(lambda choice: text.append('%s: %s' % choice), self.CHOICES)
return mark_safe('<br />'.join(text))
def clean_value(self):
selected = int(self.cleaned_data['value'])
return self.interviewer.unfinished_assignments.order_by(
'allocation_ea__name')[selected - 1].allocation_ea
def selected_allocation(self):
if self.is_valid():
selected = int(self.data['value'])
return self.interviewer.unfinished_assignments.order_by('allocation_ea__name')[
selected - 1]
def save(self, commit=True):
instance = super(SurveyAllocationForm, self).save(commit=commit)
instance.survey = self.selected_allocation().survey
instance.ea = self.cleaned_data['value']
instance.interviewer = self.interviewer
return instance
class Meta:
model = Interview
fields = ['test_data', ]
class ReferenceInterviewForm(BaseSelectInterview, USSDSerializable):
"""Basically used to select random sample for sampled surveys
"""
def __init__(self, request, access, survey, allocation_ea, *args, **kwargs):
super(ReferenceInterviewForm, self).__init__(request, access, *args, **kwargs)
self.survey = survey
self.random_samples = ListingSample.get_or_create_samples(survey, allocation_ea).order_by('interview__created')
choices = [(idx + 1, sample.get_display_label()) for idx, sample in enumerate(self.random_samples)]
self.fields['value'] = forms.ChoiceField(choices=choices, widget=forms.RadioSelect)
if self.access.choice_name() == USSDAccess.choice_name():
self.fields['value'].widget = forms.NumberInput()
self.listing_form = survey.preferred_listing.listing_form if survey.preferred_listing else survey.listing_form
self.fields['value'].label = 'Select %s' % self.listing_form.name
def clean_value(self):
selected = int(self.cleaned_data['value'])
return self.random_samples.values_list('interview', flat=True)[selected - 1]
def render_extra_ussd(self):
text = []
map(lambda choice: text.append('%s: %s' %
choice), self.fields['value'].choices)
return mark_safe('\n'.join(text))
def render_extra_ussd_html(self):
text = []
map(lambda choice: text.append('%s: %s' %
choice), self.fields['value'].choices)
return mark_safe('<br />'.join(text))
class SelectBatchOrListingForm(BaseSelectInterview, USSDSerializable):
LISTING = '1'
BATCH = '2'
def __init__(self, request, access, *args, **kwargs):
super(SelectBatchOrListingForm, self).__init__(request, access, *args, **kwargs)
self.fields['value'] = forms.ChoiceField()
self.fields['value'].choices = [(self.LISTING, 'Listing'), (self.BATCH, 'Batch')]
self.fields['value'].label = 'Continue Listing or Start Batch'
def render_extra_ussd(self):
text = []
map(lambda choice: text.append('%s: %s' % choice), self.fields['value'].choices)
return mark_safe('\n'.join(text))
def render_extra_ussd_html(self):
text = []
map(lambda choice: text.append('%s: %s' % choice), self.fields['value'].choices)
return mark_safe('<br />'.join(text))
class SelectBatchForm(BaseSelectInterview, USSDSerializable):
def __init__(self, request, access, survey_allocation, *args, **kwargs):
super(SelectBatchForm, self).__init__(request, access, *args, **kwargs)
survey = survey_allocation.survey
self.survey = survey
self.batches = survey_allocation.open_batches()
self.fields['value'] = forms.ChoiceField()
self.fields['value'].choices = [(idx + 1, batch.name) for idx, batch in enumerate(self.batches)]
self.fields['value'].label = 'Select Batch'
def clean_value(self):
selected = int(self.cleaned_data['value'])
return self.batches[selected - 1]
def render_extra_ussd(self):
text = []
map(lambda choice: text.append('%s: %s' %
choice), self.fields['value'].choices)
return mark_safe('\n'.join(text))
def render_extra_ussd_html(self):
text = []
map(lambda choice: text.append('%s: %s' %
choice), self.fields['value'].choices)
return mark_safe('<br />'.join(text))
class SelectInterviewerForm(forms.Form):
interviewer = forms.ModelChoiceField(queryset=Interviewer.objects.all())
class Meta:
widgets = {
'interviewer': forms.Select(attrs={'class': 'chzn-select', }),
}
| {
"repo_name": "unicefuganda/uSurvey",
"path": "survey/forms/answer.py",
"copies": "1",
"size": "16166",
"license": "bsd-3-clause",
"hash": -3717709061841165000,
"line_mean": 39.0148514851,
"line_max": 119,
"alpha_frac": 0.5784980824,
"autogenerated": false,
"ratio": 4.195691668829483,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0010532474340223376,
"num_lines": 404
} |
__author__ = 'anthony'
from django.core.management.base import BaseCommand
from django.contrib.auth.models import User, Permission, Group
from django.contrib.contenttypes.models import ContentType
from survey.models import AnswerAccessDefinition, AutoResponse, NumericalAnswer, TextAnswer, \
MultiChoiceAnswer, MultiSelectAnswer, ImageAnswer, GeopointAnswer, DateAnswer, AudioAnswer, VideoAnswer, \
USSDAccess, ODKAccess, WebAccess
class Command(BaseCommand):
help = 'Creates default parameters'
def handle(self, *args, **kwargs):
#self.stdout.write('Creating permissions....')
content_type = ContentType.objects.get_for_model(User)
can_enter_data, _ = Permission.objects.get_or_create(
codename='can_enter_data', name='Can enter data', content_type=content_type)
can_view_batches, _ = Permission.objects.get_or_create(
codename='can_view_batches', name='Can view Batches', content_type=content_type)
can_view_interviewer, _ = Permission.objects.get_or_create(
codename='can_view_interviewers', name='Can view Interviewers', content_type=content_type)
can_view_aggregates, _ = Permission.objects.get_or_create(
codename='can_view_aggregates', name='Can view Aggregates', content_type=content_type)
can_view_com_surveys, _ = Permission.objects.get_or_create(
codename='view_completed_survey', name='Can view Completed Surveys', content_type=content_type)
can_view_households, _ = Permission.objects.get_or_create(
codename='can_view_househs', name='Can view Households', content_type=content_type)
can_view_locations, _ = Permission.objects.get_or_create(
codename='can_view_locations', name='Can view Locations', content_type=content_type)
can_view_users, _ = Permission.objects.get_or_create(
codename='can_view_users', name='Can view Users', content_type=content_type)
can_receive_email, _ = Permission.objects.get_or_create(
codename='can_receive_email', name='Can Receive Email', content_type=content_type)
can_have_super_powers, _ = Permission.objects.get_or_create(
codename='can_have_super_powers', name='Can Have Super Powers', content_type=content_type)
#self.stdout.write('Permissions created.')
#self.stdout.write('Creating some groups...')
group, _ = Group.objects.get_or_create(name='Administrator')
group.permissions.add(can_enter_data)
group.permissions.add(can_view_aggregates)
group.permissions.add(can_view_batches)
group.permissions.add(can_view_com_surveys)
group.permissions.add(can_view_households)
group.permissions.add(can_view_interviewer)
group.permissions.add(can_view_locations)
group.permissions.add(can_view_users)
group.permissions.add(can_receive_email)
group.permissions.add(can_have_super_powers)
group, _ = Group.objects.get_or_create(name='Researcher')
group.permissions.add(can_enter_data)
group.permissions.add(can_view_aggregates)
group.permissions.add(can_view_batches)
group.permissions.add(can_view_com_surveys)
group.permissions.add(can_view_households)
group.permissions.add(can_view_interviewer)
group.permissions.add(can_view_locations)
group.permissions.add(can_receive_email)
group, _ = Group.objects.get_or_create(name='Supervisor')
group.permissions.add(can_enter_data)
group.permissions.add(can_view_aggregates)
group.permissions.add(can_view_batches)
group.permissions.add(can_view_com_surveys)
group.permissions.add(can_view_households)
group.permissions.add(can_view_locations)
group, _ = Group.objects.get_or_create(name='Data collector')
group.permissions.add(can_enter_data)
group.permissions.add(can_view_batches)
group, _ = Group.objects.get_or_create(name='Viewer')
group.permissions.add(can_view_aggregates)
group.permissions.add(can_view_batches)
group.permissions.add(can_view_com_surveys)
group.permissions.add(can_receive_email)
group, _ = Group.objects.get_or_create(name='Data Email Reports')
group.permissions.add(can_receive_email)
#self.stdout.write('Created groups.')
#self.stdout.write('Creating answer definition... ')
# ussd definition
AnswerAccessDefinition.reload_answer_categories()
#self.stdout.write('Successfully imported!')
| {
"repo_name": "unicefuganda/uSurvey",
"path": "survey/management/commands/load_parameters.py",
"copies": "1",
"size": "4593",
"license": "bsd-3-clause",
"hash": 5453410104468545000,
"line_mean": 56.4125,
"line_max": 110,
"alpha_frac": 0.6886566514,
"autogenerated": false,
"ratio": 3.704032258064516,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9877932746564417,
"avg_score": 0.0029512325800199087,
"num_lines": 80
} |
__author__ = 'anthony <>'
from django.db import models
from survey.models.base import BaseModel
from survey.models.generics import TemplateQuestion
from survey.models.questions import Question, QuestionSet, QuestionOption, QuestionFlow
from survey.models.interviews import Answer
from survey.models.interviews import MultiChoiceAnswer
class ParameterTemplate(TemplateQuestion):
class Meta:
app_label = 'survey'
def __unicode__(self):
return self.identifier
class RespondentGroup(BaseModel):
name = models.CharField(max_length=50)
description = models.TextField()
def has_interviews(self):
from survey.models import Interview
return self.questions.exists() and Interview.objects.filter(
question_set__pk=self.questions.first().qset.pk).exists()
def remove_related_questions(self):
self.question_templates.all().delete()
def __unicode__(self):
return self.name
def parameter_questions(self):
return ParameterTemplate.objects.filter(group_condition__respondent_group=self)
class RespondentGroupCondition(BaseModel):
VALIDATION_TESTS = [(validator.__name__, validator.__name__)
for validator in Answer.validators()]
respondent_group = models.ForeignKey(
RespondentGroup, related_name='group_conditions')
test_question = models.ForeignKey(
ParameterTemplate,
related_name='group_condition')
validation_test = models.CharField(
max_length=200, null=True, blank=True, choices=VALIDATION_TESTS)
class Meta:
app_label = 'survey'
@property
def test_params(self):
return [t.param for t in self.test_arguments]
def params_display(self):
params = []
for arg in self.text_arguments:
if self.question.answer_type == MultiChoiceAnswer.choice_name():
params.append(self.question.options.get(order=arg.param).text)
else:
params.append(arg.param)
return params
@property
def test_arguments(self):
return GroupTestArgument.objects.filter(
group_condition=self).order_by('position')
class GroupTestArgument(BaseModel):
group_condition = models.ForeignKey(
RespondentGroupCondition,
related_name='arguments')
position = models.PositiveIntegerField()
param = models.CharField(max_length=100)
def __unicode__(self):
return self.param
class Meta:
app_label = 'survey'
get_latest_by = 'position'
class ParameterQuestion(Question):
def next_question(self, reply):
next_question = super(ParameterQuestion, self).next_question(reply)
if next_question is None and self.e_qset.batch:
next_question = self.e_qset.batch.start_question
return next_question
class SurveyParameterList(
QuestionSet): # basically used to tag survey grouping questions
batch = models.OneToOneField(
'Batch',
related_name='parameter_list',
null=True,
blank=True)
@property
def parameters(self):
return ParameterQuestion.objects.filter(qset__id=self.id)
class Meta:
app_label = 'survey'
@classmethod
def update_parameter_list(cls, batch):
"""Updates the parameter list for this batch.
Basically checks all the target_groups registered in this batch and ensures required parameter list is updated.
Presently because the entire group parameters required in a batch would typically be less than 10, The strategy
employed here shall be to delete all parameters and create new when called.
Questions in returned question set does not necessarily belong to any flow.
:param batch:
:return:
"""
param_list, _ = cls.objects.get_or_create(batch=batch)
param_list.questions.all().delete()
# now create a new
target_groups = RespondentGroup.objects.filter(
questions__qset__id=batch.id)
question_ids = []
# loop through target_groups to get required template parameters
for group in target_groups:
map(lambda condition: question_ids.append(
condition.test_question.id), group.group_conditions.all())
parameters = ParameterTemplate.objects.filter(
id__in=question_ids).order_by('identifier')
prev_question = None
for param in parameters:
# going this route because param questions typically would be small
question = ParameterQuestion(**{'identifier': param.identifier,
'text': param.text,
'answer_type': param.answer_type,
'qset': param_list})
question.save()
if prev_question:
QuestionFlow.objects.create(
question=prev_question, next_question=question)
prev_question = question
if question.answer_type in [
MultiChoiceAnswer.choice_name(),
MultiChoiceAnswer]:
for option in param.options.all():
QuestionOption.objects.create(
order=option.order, text=option.text, question=question)
return param_list
# class SurveyParameter(Question): # this essentially shall be made from copying from survey paramaters.
# pass # It is required to
| {
"repo_name": "unicefuganda/uSurvey",
"path": "survey/models/respondents.py",
"copies": "1",
"size": "5585",
"license": "bsd-3-clause",
"hash": -3146114916379316000,
"line_mean": 35.2662337662,
"line_max": 119,
"alpha_frac": 0.6349149508,
"autogenerated": false,
"ratio": 4.574119574119574,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5709034524919574,
"avg_score": null,
"num_lines": null
} |
__author__ = 'anthony <>'
from django.db import models
from survey.models.questions import Question
from survey.models.respondents import RespondentGroup, SurveyParameterList
class BatchQuestion(Question):
group = models.ForeignKey(
RespondentGroup,
related_name='questions',
null=True,
blank=True,
on_delete=models.SET_NULL)
module = models.ForeignKey(
"QuestionModule",
related_name="questions",
default='',
on_delete=models.SET_NULL,
null=True,
blank=True)
def save(self, *args, **kwargs):
instance = super(BatchQuestion, self).save(*args, **kwargs)
update_parameter_list(self)
return instance
def update_parameter_list(batch_question):
# check if this group has been previously assigned to this Question set.
from survey.models import Batch
if batch_question.group and RespondentGroup.objects.filter(questions__qset__id=batch_question.qset.id,
id=batch_question.group.id).exists():
SurveyParameterList.update_parameter_list(Batch.get(pk=batch_question.qset.pk))
| {
"repo_name": "unicefuganda/uSurvey",
"path": "survey/models/batch_questions.py",
"copies": "1",
"size": "1178",
"license": "bsd-3-clause",
"hash": -4441865973716074500,
"line_mean": 34.696969697,
"line_max": 106,
"alpha_frac": 0.6502546689,
"autogenerated": false,
"ratio": 4.133333333333334,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5283588002233334,
"avg_score": null,
"num_lines": null
} |
__author__ = 'anthony <>'
from model_utils.managers import InheritanceManager
from django.db import models
from survey.models.base import BaseModel
from survey.models.response_validation import ResponseValidation
from survey.models.interviews import Answer
class GenericQuestion(BaseModel):
ANSWER_TYPES = [(name, name) for name in Answer.answer_types()]
identifier = models.CharField(max_length=100, verbose_name='Variable Name')
text = models.CharField(max_length=250)
answer_type = models.CharField(
max_length=100, blank=False, null=False, choices=ANSWER_TYPES)
response_validation = models.ForeignKey(ResponseValidation, related_name='%(class)s', null=True, blank=True,
verbose_name='Validation Rule')
@classmethod
def type_name(cls):
return cls._meta.verbose_name.title()
class Meta:
abstract = True
def validators(self):
return Answer.get_class(self.answer_type).validators()
def validator_names(self):
return [v.__name__ for v in Answer.get_class(self.answer_type).validators()]
def odk_constraint(self):
if self.response_validation:
return self.response_validation.get_odk_constraint(self)
def odk_constraint_msg(self):
if self.response_validation:
return self.response_validation.dconstraint_message
class TemplateQuestion(GenericQuestion):
objects = InheritanceManager()
class Meta:
abstract = False
class TemplateOption(BaseModel):
question = models.ForeignKey(
TemplateQuestion,
null=True,
related_name="options")
text = models.CharField(max_length=150, blank=False, null=False)
order = models.PositiveIntegerField()
@property
def to_text(self):
return "%d: %s" % (self.order, self.text)
| {
"repo_name": "unicefuganda/uSurvey",
"path": "survey/models/generics.py",
"copies": "1",
"size": "1855",
"license": "bsd-3-clause",
"hash": -6978053095705062000,
"line_mean": 31.5438596491,
"line_max": 112,
"alpha_frac": 0.6819407008,
"autogenerated": false,
"ratio": 4.103982300884955,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5285923001684956,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Anthony'
"""
Solve the unique lowest-cost assignment problem using the
Hungarian algorithm (also known as Munkres algorithm).
"""
# Based on original code by Brain Clapper, adapted to NumPy by Gael Varoquaux.
# Heavily refactored by Lars Buitinck.
# Copyright (c) 2008 Brian M. Clapper <bmc@clapper.org>, Gael Varoquaux
# Author: Brian M. Clapper, Gael Varoquaux
# LICENSE: BSD
import numpy as np
def linear_assignment(X):
"""Solve the linear assignment problem using the Hungarian algorithm.
The problem is also known as maximum weight matching in bipartite graphs.
The method is also known as the Munkres or Kuhn-Munkres algorithm.
Parameters
----------
X : array
The cost matrix of the bipartite graph
Returns
-------
indices : array,
The pairs of (row, col) indices in the original array giving
the original ordering.
References
----------
1. http://www.public.iastate.edu/~ddoty/HungarianAlgorithm.html
2. Harold W. Kuhn. The Hungarian Method for the assignment problem.
*Naval Research Logistics Quarterly*, 2:83-97, 1955.
3. Harold W. Kuhn. Variants of the Hungarian method for assignment
problems. *Naval Research Logistics Quarterly*, 3: 253-258, 1956.
4. Munkres, J. Algorithms for the Assignment and Transportation Problems.
*Journal of the Society of Industrial and Applied Mathematics*,
5(1):32-38, March, 1957.
5. http://en.wikipedia.org/wiki/Hungarian_algorithm
"""
indices = _hungarian(X).tolist()
indices.sort()
# Re-force dtype to ints in case of empty list
indices = np.array(indices, dtype=int)
# Make sure the array is 2D with 2 columns.
# This is needed when dealing with an empty list
indices.shape = (-1, 2)
return indices
class _HungarianState(object):
"""State of one execution of the Hungarian algorithm.
Parameters
----------
cost_matrix : 2D matrix
The cost matrix. Does not have to be square.
"""
def __init__(self, cost_matrix):
cost_matrix = np.atleast_2d(cost_matrix)
# If there are more rows (n) than columns (m), then the algorithm
# will not be able to work correctly. Therefore, we
# transpose the cost function when needed. Just have to
# remember to swap the result columns back later.
transposed = (cost_matrix.shape[1] < cost_matrix.shape[0])
if transposed:
self.C = (cost_matrix.T).copy()
else:
self.C = cost_matrix.copy()
self.transposed = transposed
# At this point, m >= n.
n, m = self.C.shape
self.row_uncovered = np.ones(n, dtype=np.bool)
self.col_uncovered = np.ones(m, dtype=np.bool)
self.Z0_r = 0
self.Z0_c = 0
self.path = np.zeros((n + m, 2), dtype=int)
self.marked = np.zeros((n, m), dtype=int)
def _find_prime_in_row(self, row):
"""
Find the first prime element in the specified row. Returns
the column index, or -1 if no starred element was found.
"""
col = np.argmax(self.marked[row] == 2)
if self.marked[row, col] != 2:
col = -1
return col
def _clear_covers(self):
"""Clear all covered matrix cells"""
self.row_uncovered[:] = True
self.col_uncovered[:] = True
def _hungarian(cost_matrix):
"""The Hungarian algorithm.
Calculate the Munkres solution to the classical assignment problem and
return the indices for the lowest-cost pairings.
Parameters
----------
cost_matrix : 2D matrix
The cost matrix. Does not have to be square.
Returns
-------
indices : 2D array of indices
The pairs of (row, col) indices in the original array giving
the original ordering.
"""
state = _HungarianState(cost_matrix)
# No need to bother with assignments if one of the dimensions
# of the cost matrix is zero-length.
step = None if 0 in cost_matrix.shape else _step1
while step is not None:
step = step(state)
# Look for the starred columns
results = np.array(np.where(state.marked == 1)).T
# We need to swap the columns because we originally
# did a transpose on the input cost matrix.
if state.transposed:
results = results[:, ::-1]
return results
# Individual steps of the algorithm follow, as a state machine: they return
# the next step to be taken (function to be called), if any.
def _step1(state):
"""Steps 1 and 2 in the Wikipedia page."""
# Step1: For each row of the matrix, find the smallest element and
# subtract it from every element in its row.
state.C -= state.C.min(axis=1)[:, np.newaxis]
# Step2: Find a zero (Z) in the resulting matrix. If there is no
# starred zero in its row or column, star Z. Repeat for each element
# in the matrix.
for i, j in zip(*np.where(state.C == 0)):
if state.col_uncovered[j] and state.row_uncovered[i]:
state.marked[i, j] = 1
state.col_uncovered[j] = False
state.row_uncovered[i] = False
state._clear_covers()
return _step3
def _step3(state):
"""
Cover each column containing a starred zero. If n columns are covered,
the starred zeros describe a complete set of unique assignments.
In this case, Go to DONE, otherwise, Go to Step 4.
"""
marked = (state.marked == 1)
state.col_uncovered[np.any(marked, axis=0)] = False
if marked.sum() < state.C.shape[0]:
return _step4
def _step4(state):
"""
Find a noncovered zero and prime it. If there is no starred zero
in the row containing this primed zero, Go to Step 5. Otherwise,
cover this row and uncover the column containing the starred
zero. Continue in this manner until there are no uncovered zeros
left. Save the smallest uncovered value and Go to Step 6.
"""
# We convert to int as numpy operations are faster on int
C = (state.C == 0).astype(np.int)
covered_C = C * state.row_uncovered[:, np.newaxis]
covered_C *= state.col_uncovered.astype(np.int)
n = state.C.shape[0]
m = state.C.shape[1]
while True:
# Find an uncovered zero
row, col = np.unravel_index(np.argmax(covered_C), (n, m))
if covered_C[row, col] == 0:
return _step6
else:
state.marked[row, col] = 2
# Find the first starred element in the row
star_col = np.argmax(state.marked[row] == 1)
if not state.marked[row, star_col] == 1:
# Could not find one
state.Z0_r = row
state.Z0_c = col
return _step5
else:
col = star_col
state.row_uncovered[row] = False
state.col_uncovered[col] = True
covered_C[:, col] = C[:, col] * (
state.row_uncovered.astype(np.int))
covered_C[row] = 0
def _step5(state):
"""
Construct a series of alternating primed and starred zeros as follows.
Let Z0 represent the uncovered primed zero found in Step 4.
Let Z1 denote the starred zero in the column of Z0 (if any).
Let Z2 denote the primed zero in the row of Z1 (there will always be one).
Continue until the series terminates at a primed zero that has no starred
zero in its column. Unstar each starred zero of the series, star each
primed zero of the series, erase all primes and uncover every line in the
matrix. Return to Step 3
"""
count = 0
path = state.path
path[count, 0] = state.Z0_r
path[count, 1] = state.Z0_c
while True:
# Find the first starred element in the col defined by
# the path.
row = np.argmax(state.marked[:, path[count, 1]] == 1)
if not state.marked[row, path[count, 1]] == 1:
# Could not find one
break
else:
count += 1
path[count, 0] = row
path[count, 1] = path[count - 1, 1]
# Find the first prime element in the row defined by the
# first path step
col = np.argmax(state.marked[path[count, 0]] == 2)
if state.marked[row, col] != 2:
col = -1
count += 1
path[count, 0] = path[count - 1, 0]
path[count, 1] = col
# Convert paths
for i in range(count + 1):
if state.marked[path[i, 0], path[i, 1]] == 1:
state.marked[path[i, 0], path[i, 1]] = 0
else:
state.marked[path[i, 0], path[i, 1]] = 1
state._clear_covers()
# Erase all prime markings
state.marked[state.marked == 2] = 0
return _step3
def _step6(state):
"""
Add the value found in Step 4 to every element of each covered row,
and subtract it from every element of each uncovered column.
Return to Step 4 without altering any stars, primes, or covered lines.
"""
# the smallest uncovered value in the matrix
if np.any(state.row_uncovered) and np.any(state.col_uncovered):
minval = np.min(state.C[state.row_uncovered], axis=0)
minval = np.min(minval[state.col_uncovered])
state.C[np.logical_not(state.row_uncovered)] += minval
state.C[:, state.col_uncovered] -= minval
return _step4 | {
"repo_name": "amlozano1/kalman_car_counter",
"path": "hungarian.py",
"copies": "1",
"size": "9370",
"license": "mit",
"hash": 7571692663038496000,
"line_mean": 32.4678571429,
"line_max": 78,
"alpha_frac": 0.614941302,
"autogenerated": false,
"ratio": 3.654446177847114,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47693874798471136,
"avg_score": null,
"num_lines": null
} |
import numpy as np
from utils.distances import euclidianDist
class Kmeans:
def __init__(self, data, k=1, distance=euclidianDist):
'''
:param data: Training data set with samples as row elements
:param k: number of cluster
:param distance: metric measurement of the space. default : euclidianDist
for k-means use euclidianDist, for k-median use manhattanDist, for k-medoids use any
metric function which return d[i,j] the distance between point i and center j
'''
if k < 1:
raise Exception("[kmeans][init] k must be greater than zero")
self.data = data
self.k = k
self.distance = distance
def clustering(self, data):
'''
Find the cluster each data point belongs to
:param data: set of data point as row vector
:return: ID of the cluster for each data point
'''
#distances = np.sum((data - self.centers[:, np.newaxis]) ** 2, axis=2)
return np.argmin(self.distance(data, self.centers), axis=1)
def train(self, nbIte=100):
'''
Lloyd's algorithm with pure numpy.
:param nbIte: Maximum number of iterations if convergence is not reached before
'''
def __hasConverged(c1, c2):
'''Convergence criterion. Find similarities between two set of centers'''
return set([tuple(x) for x in c1]) == set([tuple(x) for x in c2])
def __update(data, clusters, K):
'''Update rule to find better centers'''
return np.array([np.mean(data[clusters == k], axis=0) for k in range(K)])
# Random init with data point
select = np.random.choice(np.shape(self.data)[0], self.k, replace= False)
self.centers = self.data[select]
select = np.random.choice(np.shape(self.data)[0], self.k, replace= False)
newCenters = self.data[select]
t = 0
while not __hasConverged(self.centers, newCenters) and t < nbIte:
self.centers = newCenters
clusters = self.clustering(self.data)
newCenters = __update(self.data, clusters, self.k)
t += 1
if __name__ == "__main__":
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
dataA = np.random.multivariate_normal([-2, 0, 1], [[1,0,0],[0,1,0],[0,0,1]], 100)
dataB = np.random.multivariate_normal([+3, 0, 2], [[1,0,0],[0,1,0],[0,0,1]], 100)
dataC = np.random.multivariate_normal([0, +2, 0], [[1,0,0],[0,1,0],[0,0,1]], 100)
data = np.concatenate((dataA, dataB, dataC))
# shuffle
p = np.random.permutation(np.shape(data)[0])
data = data[p]
km = Kmeans(data, k=3)
km.train()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(data[:, 0], data[:, 1], data[:, 2], c=km.clustering(data))
ax.scatter(km.centers[:, 0], km.centers[:, 1], km.centers[:, 2], c='r', s=100)
plt.show()
| {
"repo_name": "antoinebrl/practice-ML",
"path": "kmeans.py",
"copies": "1",
"size": "3022",
"license": "mit",
"hash": 1422231827983793200,
"line_mean": 36.3086419753,
"line_max": 96,
"alpha_frac": 0.5929847783,
"autogenerated": false,
"ratio": 3.434090909090909,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4527075687390909,
"avg_score": null,
"num_lines": null
} |
import numpy as np
class KNN:
'''K-Nearest Neighbors classifier'''
def __init__(self, inputs, targets, k=1):
if k < 1:
raise Exception("[KNN][init] k must be greater than zero")
self.inputs = inputs
self.targets = targets
self.k = k
def train(self):
return
def predict(self, data):
distances = np.sum((data[:, np.newaxis] - self.inputs)**2, axis=2)
labelNearests = np.take(self.targets, np.argsort(distances)[:,0:self.k])
freq = np.apply_along_axis(np.bincount, axis=1, arr=labelNearests,
minlength=np.max(self.targets)+1) # Use lot of memory
return freq.argmax(axis=1)
if __name__ == "__main__":
import matplotlib.pyplot as plt
dataA = np.random.multivariate_normal([2, -2], [[1,0],[0,1]], 500)
dataB = np.random.multivariate_normal([-2, 2], [[1,0],[0,1]], 500)
dataC = np.random.multivariate_normal([2, 2], [[1,0],[0,1]], 500)
dataD = np.random.multivariate_normal([-2, -2], [[1,0],[0,1]], 500)
data = np.concatenate((dataA, dataB, dataC, dataD))
# shuffle
p = np.random.permutation(np.shape(data)[0])
data = data[p]
training = np.array([[-1,-1], [-1,1], [1,-1], [1,1]])
classes = np.array([[0],[1],[2],[3]])
knn = KNN(training, classes, k=1)
c = knn.predict(data)
x = np.arange(-6, 6, 0.01)
y = np.arange(-4, 4, 0.01)
xx0, yy0 = np.meshgrid(x, y)
xx = np.reshape(xx0, (xx0.shape[0]*xx0.shape[1],1))
yy = np.reshape(yy0, (yy0.shape[0]*yy0.shape[1],1))
grid = np.concatenate((xx,yy), axis=1)
area = knn.predict(grid)
plt.scatter(data[:,0], data[:,1], c=c, s=30)
plt.contour(xx0, yy0, area.reshape(xx0.shape))
plt.show()
| {
"repo_name": "antoinebrl/practice-ML",
"path": "knn.py",
"copies": "1",
"size": "2038",
"license": "mit",
"hash": 2520039606085003300,
"line_mean": 34.1379310345,
"line_max": 141,
"alpha_frac": 0.5961727184,
"autogenerated": false,
"ratio": 2.7841530054644807,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.38803257238644806,
"avg_score": null,
"num_lines": null
} |
import numpy as np
class LinReg:
'''Linear Regression. Least-squares optimisation'''
def __init__(self, inputs, targets):
'''Constructor'''
# target is a column vector
#if np.ndim(targets) != 2 or np.shape(targets)[1] != 1:
# raise Exception('[linreg][init] targets variable must be a column vector')
self.inputs = self.__addColumn(inputs) # add bias
self.targets = targets
self.W = np.linalg.inv(np.dot(np.transpose(self.inputs), self.inputs))
self.W = np.dot(np.dot(self.W, np.transpose(self.inputs)), targets)
def __addColumn(self, inputs):
'''Insert column with ones'''
return np.concatenate((inputs, np.ones((np.shape(inputs)[0],1))),axis=1)
def eval(self, inputs=None):
if inputs is None:
inputs = self.inputs
else:
inputs = self.__addColumn(inputs) # add bias
return np.dot(inputs, self.W)
def error(self, inputs=None, targets=None):
'''RSS. Residual sum of squares'''
if inputs is None or targets is None:
inputs = self.inputs
targets = self.targets
else:
inputs = self.__addColumn(inputs) # add bias
output = np.dot(inputs, self.W)
error = np.sum((output - targets)**2)
return error
if __name__ == "__main__":
inputs = np.array([[0,0], [0,1], [1,0], [1,1]])
ORtargets = np.array([[0], [1], [1], [1]])
linreg = LinReg(inputs, ORtargets)
output = linreg.eval(inputs)
print "Regression :"
print output
print "Error :", linreg.error()
classification = np.where(output >= 0.5, 1, 0)
print "Classification :"
print classification
| {
"repo_name": "antoinebrl/practice-ML",
"path": "linreg.py",
"copies": "1",
"size": "1977",
"license": "mit",
"hash": -1559392116206620400,
"line_mean": 30.380952381,
"line_max": 120,
"alpha_frac": 0.6064744562,
"autogenerated": false,
"ratio": 3.4929328621908127,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4599407318390813,
"avg_score": null,
"num_lines": null
} |
import numpy as np
import math
class Node:
def __init__(self, attr, child=None):
self.attr = attr
self.child = {} if child == None else child
def addChild(self, value, node):
self.child[value] = node
def __repr__(self):
return repr(self.__dict__)
class Leaf:
def __init__(self, value):
'''a label choice'''
self.value = value
def __repr__(self):
return str(self.value)
class Dtree:
'''Decision tree'''
def __init__(self, split='best', nbFeatures=None, featureSelection='rand'):
'''
:param split: Split rule :
'best' : choose the best attribute not used
'rand' : randomly select an attribute nor use
:param nbFeatures: Number of attribute to consider :
None : all attributes will be used
n int: the number of selected features (limited by the actual number of feature)
:featureSelection: strategy for feature selection
'order': take the n first ones
'rand' : choose n random features.
'''
if not split is 'best' and not split is 'rand':
raise Exception("[Dtree][__init__] unrecognized splitter")
self.split = split
self.nbFeatures = nbFeatures
if not split is 'order' and not split is 'rand':
raise Exception("[Dtree][__init__] unrecognized feature selection rule")
self.featureSelection = featureSelection
def histogram(self, data):
''':return: elements present in data and their distribution'''
values, freq = np.unique(data, return_counts=True)
freq = freq.astype(np.float)
freq /= np.sum(freq)
return values, freq
def mostCommon(self, data):
''':return: value of the most common element'''
values, freq = self.histogram(data)
return values[np.argmax(freq)]
def entropy(self, data):
''':return: entropy of the data set'''
values, counts = self.histogram(data)
clog2 = np.vectorize(lambda x : x * math.log(x,2))
p = clog2(counts)
return - np.sum(p)
def gain(self, input, attr, target):
'''
Information gain if we look at the component attr of the data set
:param input: data set as a matrix. Samples as row elements
:param attr: The index of the attribute to focus on
:param target: targeted labels
:return: entropy reduction : Ent(S) - sum{v in values(attr)}{p(v)*Ent(S|attr=v)}
'''
gain = self.entropy(target)
attrValues, freq = self.histogram(input[:, attr])
for v, p in zip(attrValues, freq):
gain -= p * self.entropy(target[input[:, attr] == v])
return gain
def bestGain(self, input, mask, target):
'''Identify the most interesting attribute
:param mask: mask[i] == True is the i-th attribute is considered as already used
:return: index of the most interesting attributes
'''
gains = [self.gain(input, i, target) if not mask[i] else 0.0 for i in range(input.shape[1])]
return np.argmax(gains)
def randAttribute(self, mask):
'''Return an index lower than mask length with mask[index] == False'''
if mask.all():
return None
idx = np.random.randint(mask.shape[0])
while mask[idx]:
idx = np.random.randint(mask.shape[0])
return idx
def train(self, data, target, maxDepth=1000000):
'''Training algorithm is based on ID3 Heuristics'''
def buildTree(data, target, mask, maxDepth=1000000):
'''
:param mask: mask[i] == True is the i-th attribute is considered as already used
:return: the generated (sub)tree
'''
if data is None or data.ndim == 0:
return Leaf(self.defaultTarget)
if maxDepth < 1:
return Leaf(self.mostCommon(target))
if mask.all():
return Leaf(self.mostCommon(target))
if np.unique(target).shape[0] == 1:
return Leaf(self.mostCommon(target))
if self.split is 'best':
att = self.bestGain(data, mask, target)
elif self.split is 'rand':
att = self.randAttribute(mask)
newMask = np.copy(mask)
newMask[att] = True
values = np.unique(data[:,att])
nd = Node(attr=att)
for v in values:
relevantIdx = (data[:,att] == v)
subTree = buildTree(data[relevantIdx], target[relevantIdx],
mask=newMask, maxDepth=maxDepth-1)
nd.addChild(v, subTree)
return nd
self.defaultTarget = self.mostCommon(target)
mask = np.full(data.shape[1], False, dtype=bool)
if not self.nbFeatures is None:
mask[-(data.shape[1]-self.nbFeatures):] = True
if self.featureSelection is 'rand':
np.random.shuffle(mask)
self.tree = buildTree(data, target, mask, maxDepth)
def predict(self, input, tree=None):
''':param tree: The tree to work on. Default None. If None, self.tree is used.
:return: vector of labels for each sample of input'''
def followTree(tree, x):
if isinstance(tree, Leaf):
return tree.value
if tree is None:
return self.defaultTarget
if not x[tree.attr] in tree.child:
return self.defaultTarget
return followTree(tree.child[x[tree.attr]], x)
if tree == None:
tree = self.tree
if input.ndim == 1:
input = input[np.newaxis, :]
return np.array([[followTree(tree, x)] for x in input])
def eval(self, input, target, tree=None):
'''Evaluation of the classification on the given set
:param tree: The tree to work on. Default None. If None, self.tree is used.
:return: ration of well classified samples'''
output = self.predict(input=input, tree=tree)
return np.sum(output == target).astype(np.float) / target.shape[0]
def prune(self, validData, validTarget):
'''Pruning algorithm to reduce complexity. The generated tree will be replaced if a
smaller tree which give same result on the validation set is found.'''
def replaceNode(tree):
'''Recursive method to generate all possible trees'''
# Todo : find better pruning strategy
if isinstance(tree, Leaf):
return ()
alternatives = (Leaf(self.defaultTarget),)
for v in tree.child :
for subTree in replaceNode(tree.child[v]):
newConnection = tree.child.copy()
newConnection[v] = subTree
alternatives += (Node(tree.attr, newConnection),)
return alternatives
defaultValidationError = self.eval(validData, validTarget)
allPossibleTrees = replaceNode(self.tree)
score = np.array([self.eval(validData, validTarget, tree) for tree in allPossibleTrees])
score[score < defaultValidationError] = 0
bestTreeIndex = np.argmax(score)
if score[bestTreeIndex] != 0:
self.tree = allPossibleTrees[bestTreeIndex]
if __name__ == "__main__":
data = np.array([[1,2],[1,2],[2,1],[1,1]])
target = np.array([[True], [True], [False], [True]])
dt = Dtree(split='rand',nbFeatures=2, featureSelection='rand')
data = np.array([[1, 1, 1, 1, 3, 1],[1, 1, 1, 1, 3, 2],[1, 1, 1, 3, 2, 1],[1, 1, 1, 3, 3, 2],
[1, 1, 2, 1, 2, 1],[1, 1, 2, 1, 2, 2],[1, 1, 2, 2, 3, 1],[1, 1, 2, 2, 4, 1],[1, 1, 2, 3, 1, 2],
[1, 2, 1, 1, 1, 2],[1, 2, 1, 1, 2, 1],[1, 2, 1, 1, 3, 1],[1, 2, 1, 1, 4, 2],[1, 2, 1, 2, 1, 1],
[1, 2, 1, 2, 3, 1],[1, 2, 1, 2, 3, 2],[1, 2, 1, 2, 4, 2],[1, 2, 1, 3, 2, 1],[1, 2, 1, 3, 4, 2],
[1, 2, 2, 1, 2, 2],[1, 2, 2, 2, 3, 2],[1, 2, 2, 2, 4, 1],[1, 2, 2, 2, 4, 2],[1, 2, 2, 3, 2, 2],
[1, 2, 2, 3, 3, 1],[1, 2, 2, 3, 3, 2],[1, 3, 1, 1, 2, 1],[1, 3, 1, 1, 4, 1],[1, 3, 1, 2, 2, 1],
[1, 3, 1, 2, 4, 1],[1, 3, 1, 3, 1, 2],[1, 3, 1, 3, 2, 2],[1, 3, 1, 3, 3, 1],[1, 3, 1, 3, 4, 1],
[1, 3, 1, 3, 4, 2],[1, 3, 2, 1, 2, 2],[1, 3, 2, 2, 1, 2],[1, 3, 2, 2, 2, 2],[1, 3, 2, 2, 3, 2],
[1, 3, 2, 2, 4, 1],[1, 3, 2, 2, 4, 2],[1, 3, 2, 3, 1, 1],[1, 3, 2, 3, 2, 1],[1, 3, 2, 3, 4, 1],
[1, 3, 2, 3, 4, 2],[2, 1, 1, 1, 3, 1],[2, 1, 1, 1, 3, 2],[2, 1, 1, 2, 1, 1],[2, 1, 1, 2, 1, 2],
[2, 1, 1, 2, 2, 2],[2, 1, 1, 2, 3, 1],[2, 1, 1, 2, 4, 1],[2, 1, 1, 2, 4, 2],[2, 1, 1, 3, 4, 1],
[2, 1, 2, 1, 2, 2],[2, 1, 2, 1, 3, 1],[2, 1, 2, 1, 4, 2],[2, 1, 2, 2, 3, 1],[2, 1, 2, 2, 4, 2],
[2, 1, 2, 3, 2, 2],[2, 1, 2, 3, 4, 1],[2, 2, 1, 1, 2, 1],[2, 2, 1, 1, 2, 2],[2, 2, 1, 1, 3, 1],
[2, 2, 1, 2, 3, 2],[2, 2, 1, 3, 1, 1],[2, 2, 1, 3, 1, 2],[2, 2, 1, 3, 2, 2],[2, 2, 1, 3, 3, 2],
[2, 2, 1, 3, 4, 2],[2, 2, 2, 1, 1, 1],[2, 2, 2, 1, 3, 2],[2, 2, 2, 1, 4, 1],[2, 2, 2, 1, 4, 2],
[2, 2, 2, 2, 2, 1],[2, 2, 2, 3, 4, 1],[2, 3, 1, 1, 1, 1],[2, 3, 1, 2, 1, 1],[2, 3, 1, 2, 3, 1],
[2, 3, 1, 3, 1, 2],[2, 3, 1, 3, 3, 1],[2, 3, 1, 3, 4, 2],[2, 3, 2, 1, 3, 2],[2, 3, 2, 2, 1, 1],
[2, 3, 2, 2, 1, 2],[2, 3, 2, 2, 2, 1],[2, 3, 2, 3, 3, 2],[3, 1, 1, 1, 1, 1],[3, 1, 1, 1, 1, 2],
[3, 1, 1, 2, 1, 1],[3, 1, 1, 2, 2, 2],[3, 1, 1, 3, 2, 2],[3, 1, 2, 1, 1, 1],[3, 1, 2, 1, 2, 2],
[3, 1, 2, 2, 2, 2],[3, 1, 2, 2, 3, 2],[3, 1, 2, 3, 2, 2],[3, 2, 1, 1, 1, 1],[3, 2, 1, 1, 4, 2],
[3, 2, 1, 2, 1, 2],[3, 2, 1, 2, 4, 2],[3, 2, 2, 1, 1, 1],[3, 2, 2, 1, 1, 2],[3, 2, 2, 1, 3, 2],
[3, 2, 2, 3, 1, 1],[3, 2, 2, 3, 2, 1],[3, 2, 2, 3, 4, 1],[3, 3, 1, 1, 1, 1],[3, 3, 1, 1, 2, 1],
[3, 3, 1, 1, 4, 2],[3, 3, 1, 2, 3, 2],[3, 3, 1, 2, 4, 2],[3, 3, 1, 3, 1, 2],[3, 3, 1, 3, 2, 1],
[3, 3, 1, 3, 2, 2],[3, 3, 1, 3, 4, 2],[3, 3, 2, 1, 1, 1],[3, 3, 2, 1, 3, 2],[3, 3, 2, 1, 4, 1],
[3, 3, 2, 1, 4, 2],[3, 3, 2, 3, 1, 2],[3, 3, 2, 3, 2, 2],[3, 3, 2, 3, 3, 2],[3, 3, 2, 3, 4, 2]])
target = np.array([[True],[True],[True],[True],[True],[True],[True],[True],[True],[True],
[False],[False],[False],[True],[False],[False],[False],[False],[False],[False],[False],[False],
[False],[False],[False],[False],[False],[False],[False],[False],[True],[False],[False],[False],
[False],[False],[True],[False],[False],[False],[False],[True],[False],[False],[False],[False],
[False],[True],[True],[False],[False],[False],[False],[False],[False],[False],[False],[False],
[False],[False],[False],[True],[True],[True],[True],[True],[True],[True],[True],[True],[True],
[True],[True],[True],[True],[True],[True],[True],[False],[True],[False],[False],[False],[True],
[True],[False],[False],[True],[True],[True],[False],[False],[True],[False],[False],[False],
[False],[True],[False],[True],[False],[True],[True],[False],[True],[False],[False],[True],
[True],[True],[True],[True],[True],[True],[True],[True],[True],[True],[True],[True],[True],
[True],[True],[True]])
dt.train(data,target)
values, freq = np.unique(target - dt.predict(data), return_counts=True)
print values
print freq
print dt.tree
t = target[0:20]
t[0] = [False]
print dt.eval(data[0:20], target[0:20])
dt.prune(data[0:10], target[0:10])
print dt.tree
print dt.eval(data[0:20], target[0:20])
| {
"repo_name": "antoinebrl/practice-ML",
"path": "dtree.py",
"copies": "1",
"size": "11660",
"license": "mit",
"hash": -1770192916410395100,
"line_mean": 45.8273092369,
"line_max": 101,
"alpha_frac": 0.5169811321,
"autogenerated": false,
"ratio": 3.046772929187353,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8997757974454427,
"avg_score": 0.01319921736658523,
"num_lines": 249
} |
import numpy as np
from kmeans import Kmeans
from pcn import PCN
from utils.distances import euclidianDist
class RBF:
'''Radial Basis Function Network. Can be used for classification or function approximation'''
def __init__(self, inputs, targets, n=1, sigma=0, distance=euclidianDist,
weights=None, usage='class', normalization=False):
'''
:param inputs: set of data points as row vectors
:param targets: set of targets as row vectors
:param n: (int) number of weights.
:param sigma: (float) spread of receptive fields
:param distance: (function) compute metric between points
:param weights: set of weights. If None, weights are generated with K-means algorithm.
Otherwise provided weights are used no matter the value of n.
:param usage: (string) Should be equal to 'class' for classification and 'fctapprox' for
function approximation. Otherwise raise an error.
:param normalization: (bool) If true, perform a normalization of the hidden layer.
'''
if not usage is 'class' and not usage is 'fctapprox':
raise Exception('[RBF][__init__] the usage is unrecognized. Should be equal to '
'"class" for classification and "fctapprox" for function approximation')
self.targets = targets
self.inputs = inputs
self.dist = distance
self.n = n
self.weights = weights
self.usage = usage
self.normalization = normalization
if sigma == 0:
self.sigma = (inputs.max(axis=0)-inputs.min(axis=0)).max() / np.sqrt(2*n)
else:
self.sigma = sigma
def fieldActivation(self, inputs, weights, sigma, dist):
hidden = dist(inputs, weights)
hidden = np.exp(- hidden / sigma)
return hidden
def train(self, nbIte=100):
if self.weights is None:
km = Kmeans(self.inputs, k=self.n, distance=self.dist)
km.train(nbIte=1000)
self.weights = km.centers
hidden = self.fieldActivation(self.inputs, self.weights, self.sigma, self.dist)
if self.normalization:
hidden = hidden / np.sum(hidden, axis=1)[:, np.newaxis]
if self.usage is 'class':
self.pcn = PCN(inputs=hidden, targets=self.targets, delta=True)
return self.pcn.train(nbIte=nbIte)
else : # linear regression
self.weights2 = np.linalg.inv(np.dot(hidden.T, hidden))
self.weights2 = np.dot(self.weights2, np.dot(hidden.T, self.targets))
return np.dot(hidden, self.weights2)
def predict(self, data):
h = self.fieldActivation(data, self.weights, self.sigma, self.dist)
if self.usage is 'class':
return self.pcn.predict(h)
else:
return np.dot(h, self.weights2)
if __name__ == "__main__":
# Classification
inputs = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
XORtargets = np.array([[0], [1], [1], [0]])
rbf = RBF(inputs=inputs, targets=XORtargets, n=4)
print rbf.train(nbIte=300)
# Function approximation
import matplotlib.pyplot as plt
x = np.linspace(start=0, stop=2*np.pi, num=63)
y = np.sin(x)
w = np.linspace(start=0, stop=2 * np.pi, num=8)
x = x[:, np.newaxis]
y = y[:, np.newaxis]
w = w[:, np.newaxis]
rbf = RBF(inputs=x, targets=y, usage='fctapprox', weights=w, normalization=True)
out = rbf.train()
plt.plot(x,y, 'r')
plt.plot(x,out, 'b')
plt.show()
| {
"repo_name": "antoinebrl/practice-ML",
"path": "rbf.py",
"copies": "1",
"size": "3759",
"license": "mit",
"hash": 6610266292566702000,
"line_mean": 36.59,
"line_max": 100,
"alpha_frac": 0.6134610269,
"autogenerated": false,
"ratio": 3.607485604606526,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9651493747343096,
"avg_score": 0.0138905768326859,
"num_lines": 100
} |
import numpy as np
import sys
class MLP:
'''Multi-layers Perceptron.'''
def __init__(self, inputs, targets, nbNodes=1, outputType='logic'):
'''
Constructor
:param inputs: set of data points as row vectors
:param nbNodes: number of hidden nodes
:param outputType: can be 'logic' with a sigmoid, 'linear', or 'softmax'
'''
# Prerequisites
if np.ndim(inputs) > 2:
raise Exception('[pcn][__init__] The input should be a matrix with maximun 2 indexes')
if np.shape(inputs)[0] != np.shape(targets)[0]:
raise Exception('[pcn][__init__] The input and target matrixs do not have the same number of samples')
# Parameters
dimensions = np.shape(inputs)
self.nbSamples = dimensions[0]
self.dimIn = 1 if np.ndim(inputs) == 1 else dimensions[1]
self.dimOut = 1 if np.ndim(targets) <= 1 else np.shape(targets)[1]
self.nbNodes = nbNodes
self.outputType = outputType
# Data
self.targets = targets
self.inputs = np.concatenate((inputs, np.ones((self.nbSamples, 1))), axis=1)
# Initialise network
# uniform distribution of weigths in [-1/sqrt(n), 1/sqrt(n)] with n number of input node
self.w1 = 2*(np.random.rand(self.dimIn + 1, self.nbNodes) - 0.5) / np.sqrt(self.dimIn)
self.w2 = 2*(np.random.rand(self.nbNodes + 1, self.dimOut) - 0.5) / np.sqrt(self.nbNodes)
def __addColumn(self, inputs):
return np.concatenate((inputs, np.ones((np.shape(inputs)[0],1))),axis=1)
def __phi(self,x):
'''Sigmoid function for activation'''
return 1.0 / (1.0 + np.exp(-0.8 * x))
def __deltaPhi(self,x):
'''Derivative of the Sigmoid function phi'''
return 0.8 * np.exp(-0.6 * x) * self.__phi(x)**2
def predict(self, inputs=None, training=False):
'''
Recall/Forward step of the back-propagation algorithm
:param inputs:
:param training: if called with training = True, temporary calculations are returned
:return: In case training = True :
oout: output of the network. oout = phi(oin)
oin: input of output nodes. oin = hout*W2
hout : output of the first layer. hout = phi(hin)
hin : intput of the hidden nodes. hin = inputs*W1
Otherwise : oout
:warn: be careful with matrix dimensions due to the bias terms
'''
if inputs is None:
inputs = self.inputs
else:
inputs = self.__addColumn(inputs)
hin = np.dot(inputs, self.w1)
hout = self.__phi(hin)
oin = np.dot(self.__addColumn(hout), self.w2)
if self.outputType == 'linear':
result = oin, oin, hout, hin
elif self.outputType == 'logic':
result = self.__phi(oin), oin, hout, hin
elif self.outputType == 'softmax':
result = np.exp(oin)/np.sum(np.exp(oin)), oin, hout, hin
else:
raise Exception('[mlp][fwd] outputType not valid')
if training:
return result
else:
return result[0]
def train(self, eta=0.1, beta=None, nbIte=100, momentum=0.7, validData=None,
validTargets=None, eps=10**(-6)):
'''
Training using back-propagation
:param eta: learning rate for the hidden layer.
:param beta: learning rate for the output layer.
:param nbIte: number of iterations.
:param momentum: update inertia. If no momentum is required it should be equal to 0.
In case of an early stop, the momentum will be set to 0.
:param validData: validation set for early stopping.
:param validTargets: target values for the validation set for early stopping.
:param eps: early stop criterion. Stop training if the two previous updates generate a
sum of squared errors lower than eps.
'''
if beta is None:
beta = eta
updatew1 = np.zeros(self.w1.shape)
updatew2 = np.zeros(self.w2.shape)
earlyStop = True if validData is not None and validTargets is not None else False
momentum = 0 if earlyStop else momentum
valErr0 = 0 if not earlyStop else np.sum((self.predict(validData) - validTargets )**2) # current
valErr1 = valErr0+2*eps # previous error
valErr2 = valErr1+2*eps
for n in range(nbIte):
if earlyStop and n > 10 and (valErr1 - valErr0) < eps and (valErr2 - valErr1) < eps:
break
outputs, oin, hout, hin = self.predict(training=True)
if np.mod(n,100) == 0:
print >> sys.stderr, "Iter: ",n, " error(SSE): ", np.sum((outputs-self.targets)**2)
if self.outputType == 'linear':
deltaO = (outputs - self.targets)
elif self.outputType == 'logic':
deltaO = (outputs - self.targets) * self.__deltaPhi(oin)
elif self.outputType == 'softmax':
deltaO = beta * (outputs - self.targets) * outputs * (1.0 - outputs)
else:
raise Exception('[mlp][train] outputType not valid')
deltaH = np.dot(deltaO, np.transpose(self.w2[:-1,:])) * self.__deltaPhi(hin)
updatew1 = eta * np.dot(np.transpose(self.inputs), deltaH) + momentum * updatew1
updatew2 = beta * np.dot(np.transpose(self.__addColumn(hout)), deltaO) + momentum * updatew2
self.w1 -= updatew1
self.w2 -= updatew2
if earlyStop:
valErr2 = valErr1
valErr1 = valErr0
valErr0 = np.sum((self.predict(validData) - validTargets )**2)
print >> sys.stderr, "Iter: ", n, " error(SSE): ", np.sum((outputs - self.targets) ** 2)
return self.predict()
if __name__ == "__main__":
'''Logic Tests'''
eta = 0.1
inputs = np.array([[0,0], [0,1], [1,0], [1,1]])
ANDtargets = np.array([[0], [0], [0], [1]])
ORtargets = np.array([0, 1, 1, 1]) # second format for 1 dimensional targets
XORtargets = np.array([[0], [1], [1], [0]]) # non linearly separable
print "XOR"
mlp = MLP(inputs, XORtargets, nbNodes=3)
output = mlp.train(eta, eta, 2000)
print "Perceptron learning rule"
print output
'''2D test'''
import matplotlib.pyplot as plt
# Data parameters
n = 150
sigma = 0.8
cov = [[sigma, 0], [0, sigma]]
p = 2
# Data generation
dataA = np.random.multivariate_normal([p, -p], cov, n)
dataB = np.random.multivariate_normal([-p, p], cov, n)
dataC = np.random.multivariate_normal([p, p], cov, n)
dataD = np.random.multivariate_normal([-p, -p], cov, n)
targetA = np.repeat(np.array([[1,0,0,0]]), n, axis=0)
targetB = np.repeat(np.array([[0,1,0,0]]), n, axis=0)
targetC = np.repeat(np.array([[0,0,1,0]]), n, axis=0)
targetD = np.repeat(np.array([[0,0,0,1]]), n, axis=0)
data = np.concatenate((dataA, dataB, dataC, dataD))
target = np.concatenate((targetA, targetB, targetC, targetD))
# Shuffle
p = np.random.permutation(np.shape(data)[0])
data = data[p]
target = target[p]
# Normalize
#data = (data - np.mean(data, axis=0)) / np.var(data, axis=0)
# Split
trainData = data[::2]
validData = data[1::4]
testData = data[3::4]
trainTarget = target[::2]
validTarget = target[1::4]
testTarget = target[3::4]
# Learning
mlp = MLP(trainData, trainTarget, nbNodes=2)
out = mlp.train(nbIte=100000, eta=0.1, validData=validData, validTargets=validTarget)
c = np.argmax(out, axis=1)
plt.scatter(trainData[:,0], trainData[:,1], c=c, s=120, marker='.')
# Evaluation
x = np.arange(-6, 6, 0.01)
y = np.arange(-4, 4, 0.01)
xx0, yy0 = np.meshgrid(x, y)
xx = np.reshape(xx0, (xx0.shape[0]*xx0.shape[1],1))
yy = np.reshape(yy0, (yy0.shape[0]*yy0.shape[1],1))
grid = np.concatenate((xx,yy), axis=1)
area = mlp.predict(grid)
plt.scatter(validData[:, 0], validData[:, 1], c=np.argmax(validTarget, axis=1), s=120,marker='*')
plt.contour(xx0, yy0, np.argmax(area, axis=1).reshape(xx0.shape))
plt.show()
| {
"repo_name": "antoinebrl/practice-ML",
"path": "mlp.py",
"copies": "1",
"size": "8561",
"license": "mit",
"hash": 8686671399922136000,
"line_mean": 36.548245614,
"line_max": 114,
"alpha_frac": 0.5742319822,
"autogenerated": false,
"ratio": 3.3999205718824466,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9447209239216863,
"avg_score": 0.005388662973116654,
"num_lines": 228
} |
import numpy as np
import sys
class PCN:
'''Perceptron. Based on McCulloch and Pitts neurons'''
def __init__(self, inputs, targets, bias=True, delta=False):
'''
Constructor
:param inputs : set of data points as row vectors
:param targets : set of targets as row vectors. For 1D, the format could be a single row
:param bias : deals with bias and non zero thresholds
:param delta : use the delta learning rule or the Perceptron learning rule
'''
# Prerequisites
if np.ndim(inputs) > 2:
raise Exception('[pcn][__init__] The input should be a matrix with maximum 2 indexes')
if np.shape(inputs)[0] != np.shape(targets)[0]:
raise Exception('[pcn][__init__] The input and target matrices do not have the same '
'number of samples')
# Parameters
dimensions = np.shape(inputs)
self.nbSamples = dimensions[0]
self.dimIn = 1 if np.ndim(inputs) == 1 else dimensions[1]
self.dimOut = 1 if np.ndim(targets) <= 1 else np.shape(targets)[1]
self.delta = delta
# Data
self.targets = targets if np.ndim(targets) > 1 else np.transpose([targets])
if delta:
self.targets = np.where(self.targets > 0, 1, -1) # bipolar encoding
if bias:
self.inputs = np.concatenate((inputs, np.ones((self.nbSamples, 1))), axis=1)
else:
self.inputs = inputs
# Init network
self.weights = np.random.rand(self.dimIn + 1 * bias, self.dimOut) - 0.5 # mean = 0
def predict(self, data):
'''Recall. Compute the activation'''
# sum
activation = np.dot(data, self.weights)
# activation/thresholding
return np.where(activation > 0, 1, 0)
def fwd(self):
'''Forward step of the training process'''
# sum
activation = np.dot(self.inputs, self.weights)
# activation/thresholding
return np.where(activation > 0, 1, 0) if not self.delta else activation
def train(self, eta=0.1, nbIte=10, batch=True):
'''
Training using back-propagation
:param eta: learning rate for the hidden layer
:param beta: learning rate for the output layer
:param nbIte: number of iterations
:param batch: use batch (synchronised) or on-line (asynchronised) learning
'''
if batch:
for n in range(nbIte):
activation = self.fwd()
self.weights -= eta * np.dot(np.transpose(self.inputs), activation - self.targets)
if np.mod(n,10) == 0:
print >> sys.stderr, "epoch: ", n
else: # sequential
for n in range(nbIte):
M = np.shape(self.inputs)[1]
for data in range(self.nbSamples):
for j in range(self.dimOut):
activation = 0
for i in range(M):
activation += self.inputs[data][i] * self.weights[i][j]
# Thresholding
if not self.delta :
if activation > 0:
activation = 1
else:
activation = 0
for i in range(M):
self.weights[i][j] -= eta * (activation - self.targets[data][j]) * self.inputs[data][i]
if np.mod(n,10) == 0:
print >> sys.stderr, "epoch: ", n
return self.predict(self.inputs)
if __name__ == "__main__":
'''Logic Tests'''
eta = 0.1
inputs = np.array([[0,0], [0,1], [1,0], [1,1]])
ANDtargets = np.array([[0], [0], [0], [1]])
ORtargets = np.array([0, 1, 1, 1]) # second format for 1 dimensional targets
XORtargets = np.array([0, 1, 1, 0]) # non linearly separable
# AND
print "AND"
pcn = PCN(inputs, ANDtargets)
output = pcn.train(eta, 20)
print "Perceptron learning rule"
print output
pcn = PCN(inputs, ANDtargets, delta=True)
output = pcn.train(eta, 20)
print "Delta learning rule"
print output
# OR
print "OR"
pcn = PCN(inputs, ORtargets)
output = pcn.train(eta, 20)
print "Perceptron learning rule"
print output
pcn = PCN(inputs, ORtargets, delta=True)
output = pcn.train(eta, 20)
print "Delta learning rule"
print output
# XOR
print "XOR"
pcn = PCN(inputs, XORtargets)
output = pcn.train(eta, 20)
print "Perceptron learning rule"
print output
pcn = PCN(inputs, XORtargets, delta=True)
output = pcn.train(eta, 20)
print "Delta learning rule"
print output
'''2D test'''
import pylab as plt
dataA = np.random.multivariate_normal([-3, +1], [[1,0],[0,1]], 100)
dataB = np.random.multivariate_normal([+3, -1], [[1,0],[0,1]], 100)
data = np.concatenate((dataA, dataB))
targets = np.concatenate((np.ones((100,1)), np.zeros((100,1))))
# shuffle
p = np.random.permutation(np.shape(data)[0])
data = data[p]
targets = targets[p]
pcn = PCN(data, targets, delta=True, bias=False)
output = pcn.train(nbIte=120)
x = np.arange(-6, 6, 0.01)
y = np.arange(-4, 4, 0.01)
xx0, yy0 = np.meshgrid(x, y)
xx = np.reshape(xx0, (xx0.shape[0]*xx0.shape[1],1))
yy = np.reshape(yy0, (yy0.shape[0]*yy0.shape[1],1))
grid = np.concatenate((xx,yy), axis=1)
area = pcn.predict(grid)
plt.scatter(data[:, 0], data[:, 1], c=output[:,0], s=120)
plt.contour(xx0, yy0, area.reshape(xx0.shape))
plt.show()
| {
"repo_name": "antoinebrl/practice-ML",
"path": "pcn.py",
"copies": "1",
"size": "5893",
"license": "mit",
"hash": 6499169524509226000,
"line_mean": 32.867816092,
"line_max": 115,
"alpha_frac": 0.5543865603,
"autogenerated": false,
"ratio": 3.5758495145631066,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46302360748631066,
"avg_score": null,
"num_lines": null
} |
import os, sys, atexit
class DaemonContext:
"""
following the PEP 3143
"""
is_open = False
def __init__(self, stdin=None, stdout=None, stderr=None,
working_directory='/', chroot_directory=None,
uid=os.getuid(), gid=os.getgid(), files_preserve=None,
umask=0, pidfile=None,
detach_process=True, signal_map=None, prevent_core=True):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.working_directory = working_directory
self.chroot_directory = chroot_directory
self.uid = uid
self.gid = gid
self.files_preserve = files_preserve
self.umask = umask
self.pidfile = pidfile
self.detach_process = detach_process
self.signal_map = signal_map
self.prevent_core = prevent_core
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self.close()
def open(self):
if self.is_open:
return None
self._set_system_limits()
if self.chroot_directory:
os.chroot(self.chroot)
os.setuid(self.uid)
os.setgid(self.gid)
self._close_all_file_descriptors()
os.chdir(self.working_directory)
os.umask(self.umask)
if self.detach_process and os.fork():
sys.exit(0)
self._set_signal_handlers()
self.is_open = True
return None
def close(self):
if not self.is_open:
return None
self.is_open = False
return None
def _set_system_limits(self):
pass
def _close_all_file_descriptors(self):
pass
def _set_signal_handlers(self):
pass
| {
"repo_name": "apointeau/python-daemonizer",
"path": "daemon/DaemonContext.py",
"copies": "1",
"size": "2022",
"license": "mit",
"hash": 7501072426225854000,
"line_mean": 23.962962963,
"line_max": 73,
"alpha_frac": 0.5766567755,
"autogenerated": false,
"ratio": 3.6830601092896176,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47597168847896176,
"avg_score": null,
"num_lines": null
} |
author = 'Anton Bobrov<bobrov@vl.ru>/Fabien Devaux<fdev31@gmail.com>'
name = 'Pylint'
desc = 'Advanced python linter'
langs = ['python']
import os.path
import re
from snaked.core.problems import mark_problems, attach_to_editor, clear_problems
def init(manager):
manager.add_global_option('PYLINT_CHECK_ON_SAVE', False, 'Run pylint on every file save')
manager.add_global_option('PYLINT_CMD', 'pylint -f parseable -r n -i y',
'Command to run pylint')
manager.add_shortcut('python-run-pylint', 'F4', 'Python', 'Run pylint', add_job)
manager.add_shortcut('python-clear-pylint-warns', '<shift>F4', 'Python',
'Clear pylint warnings', clear_pylint_warns)
def editor_opened(editor):
attach_to_editor(editor)
if editor.snaked_conf['PYLINT_CHECK_ON_SAVE']:
editor.connect('file-saved', on_file_saved)
def on_file_saved(editor):
add_job(editor)
def clear_pylint_warns(editor):
clear_problems(editor, 'pylint')
qrex = re.compile(r".*?'(.*?)'.*")
dqrex = re.compile(r'.*?"(.*?)".*')
rex = re.compile(
r'.*?:(?P<lineno>\d+):\s*\[(?P<what>[A-Z]\d{4})(,\s+(?P<where>[^\]]+))?\]\s+(?P<message>.*)')
def parse_name(line, lineno, what, where, message):
if where and where in line:
return where
m = dqrex.match(message)
if m:
return m.group(1)
if where:
w = where.rsplit('.', 1)[-1]
if w in line:
return w
if where:
m = qrex.match(where)
if m:
return m.group(1)
return line.strip()
active_process = [None]
def get_problem_list(filename, workingdir, pylint_cmd):
import subprocess
import shlex
data = open(filename).readlines()
cmd = ['/usr/bin/env']
cmd.extend(shlex.split(pylint_cmd))
cmd.append(filename)
proc = subprocess.Popen(cmd, cwd=workingdir, stdout=subprocess.PIPE,stderr=subprocess.PIPE)
active_process[0] = proc
stdout, stderr = proc.communicate()
last_line = None
res = []
for line in stdout.split('\n'):
if not line.strip():
continue
m = rex.match(line)
if m:
d = m.groupdict()
d['lineno'] = i = int(d['lineno'])
d['where'] = parse_name(data[i-1], **d)
res.append(d)
else:
if last_line:
if '^' not in line:
continue
res[-1]['where'] = last_line[line.index('^'):line.rindex('^')+1]
last_line = None
else:
last_line = line
return [(r['lineno'], r['where'], r['what'] + ' ' + r['message']) for r in res]
def stop_already_runned_jobs():
proc = active_process[0]
if proc and proc.poll() is None:
proc.terminate()
proc.wait()
def add_job(editor):
from threading import Thread
from uxie.utils import idle
def job():
try:
problems = get_problem_list(editor.uri, editor.project_root, editor.snaked_conf['PYLINT_CMD'])
except Exception, e:
idle(editor.message, str(e), 5000)
return
idle(mark_problems, editor, 'pylint', problems)
stop_already_runned_jobs()
Thread(target=job).start()
| {
"repo_name": "baverman/snaked",
"path": "snaked/plugins/python_lint/__init__.py",
"copies": "1",
"size": "3208",
"license": "mit",
"hash": 6533604438474508000,
"line_mean": 26.6551724138,
"line_max": 106,
"alpha_frac": 0.5798004988,
"autogenerated": false,
"ratio": 3.3347193347193347,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4414519833519335,
"avg_score": null,
"num_lines": null
} |
author = 'Anton Bobrov<bobrov@vl.ru>'
name = 'Bad python code save preventer'
desc = 'Prevents from saving of code with syntax errors'
import weakref
import time
import glib
last_feedbacks = weakref.WeakKeyDictionary()
def init(injector):
injector.on_ready('editor-with-new-buffer', editor_created)
from snaked.core.prefs import add_option
add_option('PYTHON_BCSP_GOTO_TO_ERROR', True,
'Automatically jumps to line where syntax error occured')
def editor_created(editor):
editor.connect('before-file-save', on_editor_before_file_save)
def last_save_occurred_in(fb, seconds):
return fb and time.time() - fb.start < seconds
def process_error(editor, fb, newfb):
last_feedbacks[editor] = newfb
if last_save_occurred_in(fb, 0.5):
return False
else:
editor.before_file_save.stop_emission()
return True
def on_editor_before_file_save(editor):
from snaked.plugins.python import handlers
try:
h = handlers[editor]
except KeyError:
return False
last_fb = last_feedbacks.pop(editor, None)
if last_fb:
last_fb.cancel()
error = h.env.check_syntax(editor.utext)
if error:
location, msg = error
if location[0] == 'end-of-file':
lineno = editor.buffer.get_line_count()
else:
lineno = location[1]
message = '%s at line <b>%d</b>' % (glib.markup_escape_text(msg), lineno)
new_fb = editor.message(message, 'error', 10000, markup=True)
if editor.conf['PYTHON_BCSP_GOTO_TO_ERROR']:
if editor.cursor.get_line() != lineno - 1:
editor.add_spot()
if location[0] == 'line-offset':
it = editor.buffer.get_iter_at_line_offset(lineno-1, location[2])
elif location[0] == 'end-of-line':
it = editor.buffer.get_iter_at_line(location[1] - 1)
if not it.ends_line():
it.forward_to_line_end()
elif location[0] == 'end-of-file':
it = editor.buffer.get_bounds()[1]
else:
it = editor.cursor
editor.buffer.place_cursor(it)
editor.scroll_to_cursor()
return process_error(editor, last_fb, new_fb)
if last_save_occurred_in(last_fb, 10):
editor.message('Good job!', 'done') | {
"repo_name": "baverman/snaked",
"path": "snaked/plugins/python_bcsp/__init__.py",
"copies": "1",
"size": "2403",
"license": "mit",
"hash": -3999120746779475000,
"line_mean": 30.6315789474,
"line_max": 85,
"alpha_frac": 0.5925925926,
"autogenerated": false,
"ratio": 3.5234604105571847,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9585948150094403,
"avg_score": 0.006020970612556189,
"num_lines": 76
} |
author = 'Anton Bobrov<bobrov@vl.ru>'
name = 'Complete words'
desc = 'Cycle through possible word completions'
from gobject import timeout_add
from string import whitespace
from uxie.utils import refresh_gui, connect, idle
buffers_to_update = []
def init(injector):
injector.bind('editor-active', 'complete-word', 'Edit/Complete _word#80', cycle).to('<alt>slash')
timeout_add(3000, update_words_timer)
injector.on_ready('buffer-loaded', buffer_loaded)
def buffer_loaded(buf):
buf.complete_words_on_changed_handler_id = connect(buf, 'changed', on_buffer_changed, True, True)
idle(add_update_job, buf)
def add_update_job(buf):
import words
words.add_job(buf.uri, buf.get_text(*buf.get_bounds()))
def update_words_timer():
if buffers_to_update:
for buf in buffers_to_update:
add_update_job(buf)
buffers_to_update[:] = []
return True
def on_buffer_changed(buf, *args):
buf.complete_words_changed = True
if not buf in buffers_to_update:
buffers_to_update.append(buf)
def is_valid_character(c):
if c in whitespace:
return False
return c.isalpha() or c.isdigit() or (c in ("-", "_"))
def backward_to_word_begin(iterator):
if iterator.starts_line(): return iterator
iterator.backward_char()
while is_valid_character(iterator.get_char()):
iterator.backward_char()
if iterator.starts_line(): return iterator
iterator.forward_char()
return iterator
def forward_to_word_end(iterator):
if iterator.ends_line(): return iterator
if not is_valid_character(iterator.get_char()): return iterator
while is_valid_character(iterator.get_char()):
iterator.forward_char()
if iterator.ends_line(): return iterator
return iterator
def get_word_before_cursor(buf, iterator):
# If the cursor is in front of a valid character we ignore
# word completion.
if is_valid_character(iterator.get_char()):
return None, None
if iterator.starts_line():
return None, None
iterator.backward_char()
if not is_valid_character(iterator.get_char()):
return None, None
start = backward_to_word_begin(iterator.copy())
end = forward_to_word_end(iterator.copy())
word = buf.get_text(start, end).strip()
return word, start
def get_matches(string):
import words
if not words.words:
return None
result = []
for word, files in words.words.iteritems():
if word != string and word.startswith(string):
result.append((word, sum(files.values())))
result.sort(key=lambda r: r[1], reverse=True)
return [r[0] for r in result]
def cycle(editor):
buf, it = editor.buffer, editor.cursor
word_to_complete, start = get_word_before_cursor(buf, it)
if not word_to_complete:
return False
if getattr(buf, 'complete_words_changed', False):
editor.complete_words_data = None, None
buf.complete_words_changed = False
try:
start_word, start_offset = editor.complete_words_data
except AttributeError:
start_word, start_offset = editor.complete_words_data = None, None
if not start_word or start_offset != start.get_offset():
start_word = word_to_complete
start_offset = start.get_offset()
editor.complete_words_data = start_word, start_offset
matches = get_matches(start_word)
if matches:
idx = 0
try:
idx = matches.index(word_to_complete)
idx = (idx + 1) % len(matches)
except ValueError:
pass
if matches[idx] == word_to_complete:
editor.message("Word completed already")
return False
buf.handler_block(buf.complete_words_on_changed_handler_id)
end = editor.cursor
buf.delete(start, end)
buf.insert(start, matches[idx])
refresh_gui()
buf.handler_unblock(buf.complete_words_on_changed_handler_id)
else:
editor.message("No word to complete")
| {
"repo_name": "baverman/snaked",
"path": "snaked/plugins/complete_words/__init__.py",
"copies": "1",
"size": "4035",
"license": "mit",
"hash": 3105432452709599700,
"line_mean": 27.8214285714,
"line_max": 101,
"alpha_frac": 0.648574969,
"autogenerated": false,
"ratio": 3.6748633879781423,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48234383569781425,
"avg_score": null,
"num_lines": null
} |
author = 'Anton Bobrov<bobrov@vl.ru>'
name = 'Edit and Select'
desc = 'Various edit shortcuts'
import weakref
import gtk
from uxie.floating import TextFeedback
last_smart_selections = weakref.WeakKeyDictionary()
def init(injector):
injector.add_context('editor-with-cursor-in-string', 'editor', in_string)
injector.bind('editor-active', 'delete-line', 'Edit/_Delete line#20', delete_line).to('<ctrl>d')
injector.bind('editor-active', 'smart-select', 'Edit/Smart _select', smart_select).to('<alt>w')
injector.bind('editor-with-selection', 'smart-unselect', 'Edit/Smart _unselect',
smart_unselect).to('<shift><alt>w')
injector.bind_check('editor-active', 'show_offset', 'Tools/Show offset and column#10',
toggle_offset).to('<ctrl><alt>o')
injector.bind('editor-with-selection', 'wrap-text', 'Edit/_Wrap block', wrap_text).to('<alt>f')
injector.bind('editor-with-selection', 'move-selection-left',
'Edit/Move selection _left', move_word_left).to('<alt>Left')
injector.bind('editor-with-selection', 'move-selection-right',
'Edit/Move selection _right', move_word_right).to('<alt>Right')
injector.bind('editor-with-cursor-in-string', 'swap-quotes',
'Edit/Swap _quotes', swap_quotes).to('<alt>apostrophe')
from snaked.core.prefs import add_option
add_option('DOUBLE_BRACKET_MATCHER', True, "Enable custom bracket matcher")
add_option('COPY_DELETED_LINE', True, "Put deleted line into clipboard")
injector.on_ready('editor-with-new-buffer', editor_created)
def editor_created(editor):
if editor.conf['DOUBLE_BRACKET_MATCHER']:
from bracket_matcher import attach
attach(editor)
def in_string(editor):
from .util import cursor_in_string
return editor if cursor_in_string(editor.cursor) else None
def delete_line(editor):
from util import get_line_bounds, line_is_empty
bounds = get_line_bounds(editor.cursor)
if not line_is_empty(editor.cursor) and editor.conf['COPY_DELETED_LINE']:
clipboard = editor.view.get_clipboard(gtk.gdk.SELECTION_CLIPBOARD)
editor.buffer.select_range(*bounds)
editor.buffer.copy_clipboard(clipboard)
editor.buffer.begin_user_action()
editor.buffer.delete(*bounds)
editor.buffer.end_user_action()
def update_last_smart_select(editor, start, end):
start, end = start.get_offset(), end.get_offset()
if editor in last_smart_selections and last_smart_selections[editor]:
os, oe = last_smart_selections[editor][0]
if start <= os and end >= oe:
last_smart_selections[editor].insert(0, (start, end))
return
last_smart_selections[editor] = [(start, end)]
def smart_select(editor):
from smart_select import get_smart_select
if editor.buffer.get_has_selection():
update_last_smart_select(editor, *editor.buffer.get_selection_bounds())
else:
update_last_smart_select(editor, editor.cursor, editor.cursor)
start, end = get_smart_select(editor)
editor.buffer.select_range(end, start)
def smart_unselect(editor):
if editor not in last_smart_selections or not last_smart_selections[editor]:
editor.message('Unselect what?')
return
start, end = map(gtk.TextIter.get_offset, editor.buffer.get_selection_bounds())
ts, te = last_smart_selections[editor].pop(0)
if ts >= start and te <= end:
editor.buffer.select_range(*map(editor.buffer.get_iter_at_offset, (te, ts)))
else:
last_smart_selections[editor][:] = []
editor.message('Nothing to unselect')
offset_feedbacks = weakref.WeakKeyDictionary()
def get_offset_message(editor):
cursor = editor.cursor
return 'offset: %d\ncolumn: %d' % (cursor.get_offset(), cursor.get_line_offset())
def on_buffer_cursor_changed(_buf, _prop, editor_ref):
editor = editor_ref()
offset_feedbacks[editor].label.set_text(get_offset_message(editor))
def toggle_offset(editor, is_set):
if is_set:
if editor in offset_feedbacks:
offset_feedbacks[editor].cancel()
else:
feedback = offset_feedbacks[editor] = editor.window.floating_manager.add(editor.view,
TextFeedback(get_offset_message(editor), 'info'), 10)
editor.window.push_escape(feedback, 10)
editor_ref = weakref.ref(editor)
hid = editor.buffer.connect_after('notify::cursor-position', on_buffer_cursor_changed,
editor_ref)
def on_cancel(_feedback):
editor = editor_ref()
if editor:
offset_feedbacks.pop(editor, None)
editor.buffer.handler_disconnect(hid)
feedback.on_cancel(on_cancel)
else:
return editor in offset_feedbacks
def wrap_text(editor):
buf = editor.buffer
import textwrap, re
from util import get_whitespace
start, end = buf.get_selection_bounds()
start.order(end)
if end.starts_line():
end.backward_visible_cursor_position()
si = ''
second_line = start.copy()
second_line.set_line(start.get_line() + 1)
if second_line.get_offset() < end.get_offset():
si = get_whitespace(second_line)
text = buf.get_text(start, end).decode('utf-8')
text = re.sub('(?m)^\s+', '', text)
text = textwrap.fill(text, subsequent_indent=si ,width=editor.view.get_right_margin_position())
buf.begin_user_action()
buf.place_cursor(end)
buf.delete(start, end)
buf.insert_at_cursor(text)
buf.end_user_action()
def move_word(buf, fromiter, tomark):
toiter = fromiter.copy()
toiter.forward_char()
text = fromiter.get_text(toiter)
buf.begin_user_action()
buf.delete(fromiter, toiter)
buf.insert(buf.get_iter_at_mark(tomark), text)
buf.end_user_action()
def move_word_left(editor):
buf = editor.buffer
start, end = map(gtk.TextIter.copy, buf.get_selection_bounds())
start.order(end)
if not start.backward_char():
editor.message('You are already at begin of file')
return
move_word(buf, start, buf.create_mark(None, end))
start, end = buf.get_selection_bounds()
start.order(end)
end.backward_char()
buf.select_range(start, end)
def move_word_right(editor):
buf = editor.buffer
start, end = map(gtk.TextIter.copy, buf.get_selection_bounds())
start.order(end)
if end.is_end():
editor.message('You are already at end of file')
return
move_word(buf, end, buf.create_mark(None, start))
def swap_quotes(editor):
from .util import source_view_pairs_parser
start, end = source_view_pairs_parser(editor.cursor)
buf = editor.buffer
text = buf.get_text(start, end).decode('utf-8')
q = text[0]
if q == '"':
aq = "'"
elif q == "'":
aq = '"'
else:
editor.message('Swap quote? What quote?', 'warn')
return
if text[-1] == q:
text = aq + text[1:-1].replace(aq, '\\' + aq).replace('\\' + q, q) + aq
else:
editor.message('Swap quote? What quote?', 'warn')
return
offset = editor.cursor.get_offset()
buf.begin_user_action()
buf.delete(start, end)
buf.insert_at_cursor(text)
buf.place_cursor(buf.get_iter_at_offset(offset))
buf.end_user_action()
| {
"repo_name": "baverman/snaked",
"path": "snaked/plugins/edit_and_select/__init__.py",
"copies": "1",
"size": "7314",
"license": "mit",
"hash": 7166423206878682000,
"line_mean": 32.2454545455,
"line_max": 100,
"alpha_frac": 0.649165983,
"autogenerated": false,
"ratio": 3.406613879832324,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45557798628323243,
"avg_score": null,
"num_lines": null
} |
author = 'Anton Bobrov<bobrov@vl.ru>'
name = 'Goto line'
desc = 'Navigates to specified line'
import gtk
from uxie.utils import idle
def init(injector):
injector.bind('editor-active', 'goto-line', 'Edit/_Goto line#50', goto_line).to('<ctrl>l')
def goto_line(editor):
widget = get_widget(editor)
editor.widget.pack_start(widget, False)
widget.entry.grab_focus()
widget.show_all()
def get_widget(editor):
widget = gtk.HBox(False, 0)
label = gtk.Label()
label.set_text('Goto line:')
widget.pack_start(label, False)
entry = gtk.Entry()
widget.pack_start(entry, False)
entry.connect('activate', on_entry_activate, editor, widget)
entry.connect('focus-out-event', on_focus_out, editor, widget)
entry.connect('key-press-event', on_key_press, editor, widget)
widget.entry = entry
return widget
def hide(editor, widget):
if widget and widget.get_parent():
editor.widget.remove(widget)
widget.destroy()
editor.view.grab_focus()
def on_focus_out(sender, event, editor, widget):
idle(hide, editor, widget)
def on_entry_activate(sender, editor, widget):
idle(hide, editor, widget)
try:
line = int(sender.get_text())
editor.add_spot()
idle(editor.goto_line, line)
except ValueError:
pass
def on_key_press(sender, event, editor, widget):
if event.keyval == gtk.keysyms.Escape:
idle(hide, editor, widget)
return True
return False | {
"repo_name": "baverman/snaked",
"path": "snaked/plugins/goto_line/__init__.py",
"copies": "1",
"size": "1488",
"license": "mit",
"hash": -7508768625825674000,
"line_mean": 24.2372881356,
"line_max": 94,
"alpha_frac": 0.6565860215,
"autogenerated": false,
"ratio": 3.2920353982300883,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9411727429841452,
"avg_score": 0.007378797977727352,
"num_lines": 59
} |
author = 'Anton Bobrov<bobrov@vl.ru>'
name = 'Hash comment'
desc = '(Un)Comments line or selection with hashes'
import gtk
import re
langs = ['python', 'sh', 'ruby', 'perl']
def init(injector):
injector.add_context('hash-comment-aware', 'editor-active',
lambda e: e if e.buffer.lang in langs else None)
injector.bind('hash-comment-aware', 'comment-code', 'Edit/(Un)_comment#60',
comment).to('<ctrl>slash', 1)
def comment(editor):
r = get_bounds(editor)
traversor = make_line_traversor(editor.buffer, r)
if all(line_is_hashed(l) for l in traversor()):
uncomment_range(editor, traversor)
else:
comment_range(editor, traversor)
def make_line_traversor(buffer, r):
start, end = r
start, stop = start.get_line(), end.get_line() + 1
def inner():
for i in xrange(start, stop):
yield buffer.get_iter_at_line(i)
return inner
def get_bounds(editor):
if editor.buffer.get_has_selection():
start, end = editor.buffer.get_selection_bounds()
if start.ends_line():
start.set_line(start.get_line() + 1)
if end.starts_line():
end.set_line(end.get_line() - 1)
return start, end
else:
cursor = editor.cursor
return cursor, cursor.copy()
def line_is_hashed(iter):
text = get_line_text(iter).strip()
return not text or text[0] == u'#'
def get_line_text(iter):
if not iter.starts_line():
iter = iter.copy()
iter.set_line(iter.get_line())
end = iter.copy()
if not end.ends_line():
end.forward_to_line_end()
return iter.get_text(end)
def line_is_empty(iter):
return get_line_text(iter).strip() == u''
def comment_range(editor, traversor):
ws_match = re.compile(r'[ \t]*')
min_indent = 1000
min_indent_text = ''
for iter in traversor():
if not line_is_empty(iter):
match = ws_match.match(get_line_text(iter))
if match and len(match.group()) < min_indent:
min_indent = len(match.group())
min_indent_text = match.group()
if min_indent == 1000:
min_indent = 0
editor.buffer.begin_user_action()
for iter in traversor():
if line_is_empty(iter):
line_end = iter.copy()
line_end.forward_to_line_end()
if iter.get_line() == line_end.get_line():
editor.buffer.delete(iter, line_end)
editor.buffer.insert(iter, min_indent_text)
else:
iter.forward_chars(min_indent)
editor.buffer.insert(iter, u'#')
editor.buffer.end_user_action()
def uncomment_range(editor, traversor):
editor.buffer.begin_user_action()
for iter in traversor():
if not line_is_empty(iter):
if get_line_text(iter).strip() == '#':
line_end = iter.copy()
line_end.forward_to_line_end()
editor.buffer.delete(iter, line_end)
else:
editor.buffer.delete(*iter.forward_search(u'#', gtk.TEXT_SEARCH_VISIBLE_ONLY))
editor.buffer.end_user_action()
| {
"repo_name": "baverman/snaked",
"path": "snaked/plugins/hash_comment/__init__.py",
"copies": "1",
"size": "3128",
"license": "mit",
"hash": 8769296276037808000,
"line_mean": 28.2336448598,
"line_max": 94,
"alpha_frac": 0.5882352941,
"autogenerated": false,
"ratio": 3.3454545454545452,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44336898395545454,
"avg_score": null,
"num_lines": null
} |
author = 'Anton Bobrov<bobrov@vl.ru>'
name = 'Python REPL'
desc = 'Slim and slick python console'
import os.path
import gtk.gdk, pango
import gtksourceview2
from cPickle import dumps
from snaked.core.prefs import update_view_preferences
def init(injector):
injector.add_context('python-repl', 'editor',
lambda e: get_repl_widget(e) if get_repl_widget(e).view.is_focus() else None)
injector.add_context('python-repl-result-chunk', 'python-repl',
lambda p: p if cursor_in_result_chunk(p) else None)
injector.bind('editor', 'python-repl', 'View/Python console', toggle_repl).to('<alt>2')
injector.bind(('editor', 'python-repl'), 'python-repl-exec',
'Python/_Execute', exec_code).to('<ctrl>Return', 1)
injector.bind('python-repl-result-chunk', 'python-repl-squash-result-chunk',
'Python/S_quash result chunk', squash_result_chunk).to('<ctrl>d')
repl_widget = None
def get_repl_widget(editor):
global repl_widget
if repl_widget:
return repl_widget
repl_widget = create_repl_widget(editor)
editor.window.append_panel(repl_widget).on_activate(lambda p: p.view.grab_focus())
return repl_widget
def create_repl_widget(editor):
panel = gtk.ScrolledWindow()
#panel.set_border_width(5)
panel.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
panel.view = gtksourceview2.View()
panel.buffer = gtksourceview2.Buffer()
panel.view.set_buffer(panel.buffer)
panel.add(panel.view)
panel.view.show_all()
editor.window.manager.set_buffer_prefs(panel.buffer, '', 'python')
panel.buffer.config.prefs.insert(0, {'show-line-numbers':False, 'highlight-current-line':False})
update_view_preferences(panel.view, panel.buffer)
style = panel.buffer.get_style_scheme().get_style('text')
color = gtk.gdk.color_parse(style.props.background)
mul = 1.4 if color.value < 0.5 else 1/1.4
color = str(gtk.gdk.color_from_hsv(color.hue, color.saturation, color.value * mul))
panel.buffer.create_tag('exec-result', editable=False, scale=0.9, indent=20,
foreground=style.props.foreground, background=color, background_full_height=True,
paragraph_background=color, weight=pango.WEIGHT_NORMAL)
return panel
def toggle_repl(editor):
repl = get_repl_widget(editor)
editor.window.popup_panel(repl)
server = None
def get_server_conn(editor):
global server
if not server:
from .executor import run_server
from ..python.utils import get_executable
root = editor.project_root
if not root:
root = os.path.dirname(editor.uri)
server = run_server(root, get_executable(editor.conf))
return server
def cursor_in_result_chunk(panel):
buf = panel.buffer
tag = buf.get_tag_table().lookup('exec-result')
cursor = buf.get_iter_at_mark(buf.get_insert())
if cursor.has_tag(tag):
cursor.backward_char()
return cursor.has_tag(tag)
return False
def squash_result_chunk(panel):
buf = panel.buffer
tag = buf.get_tag_table().lookup('exec-result')
cursor = buf.get_iter_at_mark(buf.get_insert())
start = cursor.copy()
if not start.toggles_tag(tag):
start.backward_to_tag_toggle(tag)
end = cursor
end.forward_to_tag_toggle(tag)
buf.begin_user_action()
buf.delete(start, end)
buf.end_user_action()
def exec_code(editor, panel):
buf = panel.buffer
tag = buf.get_tag_table().lookup('exec-result')
cursor = buf.get_iter_at_mark(buf.get_insert())
if cursor.has_tag(tag):
cursor.backward_char()
if cursor.has_tag(tag):
editor.window.message('You are at result chunk. Nothing to exec', 'warn', parent=panel.view)
return True
start = cursor.copy()
if not start.toggles_tag(tag):
start.backward_to_tag_toggle(tag)
end = cursor
end.forward_to_tag_toggle(tag)
source = buf.get_text(start, end).decode('utf-8')
_, conn = get_server_conn(editor)
conn.send_bytes(dumps(('run', source, start.get_line() + 1), 2))
result = conn.recv()
start = end
end = start.copy()
end.forward_to_tag_toggle(tag)
end_mark = buf.create_mark(None, end)
buf.begin_user_action()
if start.starts_line():
start.backward_char()
buf.delete(start, end)
if not result.endswith('\n'):
result += '\n'
result = '\n' + result
buf.insert_with_tags_by_name(start, result, 'exec-result')
buf.end_user_action()
buf.place_cursor(buf.get_iter_at_mark(end_mark))
panel.view.scroll_mark_onscreen(buf.get_insert())
buf.delete_mark(end_mark) | {
"repo_name": "baverman/snaked",
"path": "snaked/plugins/python_repl/__init__.py",
"copies": "1",
"size": "4645",
"license": "mit",
"hash": -6066174497733822000,
"line_mean": 29.3660130719,
"line_max": 104,
"alpha_frac": 0.6626480086,
"autogenerated": false,
"ratio": 3.2482517482517483,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44108997568517483,
"avg_score": null,
"num_lines": null
} |
author = 'Anton Bobrov<bobrov@vl.ru>'
name = 'Search'
desc = 'Searches words in document'
import re
import weakref
import gtk
import glib
from uxie.utils import idle, refresh_gui, widget_is_child_of, text_buffer_user_action
from uxie.escape import Escapable
active_search_widgets = weakref.WeakKeyDictionary()
active_replace_widgets = weakref.WeakKeyDictionary()
search_selections = []
mark_task_is_in_queue = False
class SearchSelection(object):
def __init__(self, search):
self.search = search
def init(injector):
injector.add_context('search', 'ctx_getter', search_context)
injector.add_context('replace', 'ctx_getter', replace_context)
injector.add_context('replace-all', 'replace',
lambda view: None if view_has_multiline_selection(view) else view)
injector.add_context('replace-in-selection', 'replace',
lambda view: view if view_has_multiline_selection(view) else None)
injector.bind('textview-active', 'search', 'Edit/_Search#30/_Find', search).to('<ctrl>f')
injector.bind('textview-active', 'mark-selection', 'Edit/Search/_Mark', mark_selection).to('<ctrl>h')
injector.bind('search', 'replace', 'Edit/Search/_Replace', replace).to('<ctrl>r')
injector.bind('replace', 'replace-next', 'Edit/Search/Replace and goto ne_xt',
replace_next).to('<ctrl>Return', 10)
injector.bind('replace-all', 'replace-all', 'Edit/Search/Replace _all',
replace_all, False).to('<ctrl><shift>Return', 10)
injector.bind('replace-in-selection', 'replace-in-selection',
'Edit/Search/Replace in _selection', replace_all, True).to('<ctrl><shift>Return', 10)
injector.bind_check('search', 'search-ignore-case', 'Edit/Search/_Ignore case',
ignore_case).to('<alt>i')
injector.bind_check('search', 'search-use-regex', 'Edit/Search/Use _RegEx',
use_regex).to('<alt>r')
injector.bind('search', 'next', 'Edit/Search/Find _next', find_next)
injector.bind('search', 'prev', 'Edit/Search/Find _prev', find_prev)
from snaked.core.prefs import add_internal_option
add_internal_option('SEARCH_IGNORE_CASE', False)
add_internal_option('SEARCH_REGEX', False)
add_internal_option('LAST_SEARCHES', list)
add_internal_option('LAST_REPLACES', list)
def search_context(ctx_getter):
view = ctx_getter('textview-active')
if view:
if (search_selections or view in active_search_widgets):
return view
else:
window = ctx_getter('window')
focused_widget = window.get_focus()
for view, search_widget in active_search_widgets.iteritems():
if widget_is_child_of(focused_widget, search_widget):
return view
def replace_context(ctx_getter):
view = ctx_getter('textview-active')
if view:
if view in active_replace_widgets:
return view
else:
window = ctx_getter('window')
focused_widget = window.get_focus()
for view, replace_widget in active_replace_widgets.iteritems():
if widget_is_child_of(focused_widget, replace_widget):
return view
def view_has_multiline_selection(view):
buf = view.get_buffer()
if buf.get_has_selection():
start, end = buf.get_selection_bounds()
return start.get_line() != end.get_line()
else:
return False
def search(view):
if view in active_search_widgets:
widget = active_search_widgets[view]
else:
viewref = weakref.ref(view)
def on_cancel(_feedback):
view = viewref()
if view:
active_search_widgets.pop(view, None)
delete_all_marks(view)
view.grab_focus()
widget = create_search_widget(view)
active_search_widgets[view] = widget
window = view.get_toplevel()
window.push_escape(
window.feedback(widget, priority=5, parent=view).on_cancel(on_cancel), 5)
buf = view.get_buffer()
if buf.get_has_selection():
start, end = buf.get_selection_bounds()
if start.get_line() == end.get_line():
refresh_gui()
search = start.get_text(end)
if is_regex(view):
search = re.escape(search)
buf.place_cursor(start)
update_last_search(view, search)
widget.entry.set_text(search)
else:
set_last_search(view, widget.entry)
widget.entry.grab_focus()
else:
set_last_search(view, widget.entry)
widget.entry.grab_focus()
def set_last_search(view, entry):
searches = entry.get_toplevel().manager.conf['LAST_SEARCHES']
try:
search, icase, regex = searches[0]
except IndexError:
pass
else:
view.get_toplevel().manager.conf['SEARCH_REGEX'] = regex
view.get_toplevel().manager.conf['SEARCH_IGNORE_CASE'] = icase
if view in active_search_widgets:
update_state_widget(view, active_search_widgets[view])
entry.set_text(search)
def update_last_search(view, search, icase=None, regex=None):
if not search:
return
if icase is None:
icase = is_icase(view)
if regex is None:
regex = is_regex(view)
searches = view.get_toplevel().manager.conf['LAST_SEARCHES']
value = search, icase, regex
while True:
try:
searches.remove(value)
except ValueError:
break
searches.insert(0, value)
searches[:] = searches[:30]
def replace(view):
if view in active_replace_widgets:
widget = active_replace_widgets[view]
else:
viewref = weakref.ref(view)
def on_cancel(_feedback):
view = viewref()
if view:
active_replace_widgets.pop(view, None)
view.grab_focus()
widget = create_replace_widget(view)
active_replace_widgets[view] = widget
window = view.get_toplevel()
window.push_escape(
window.feedback(widget, priority=6, parent=view).on_cancel(on_cancel), 5)
widget.entry.grab_focus()
def backward_search(matcher, text, endpos):
match = None
for m in matcher.finditer(text):
if m.end() > endpos:
return match
match = m
return match
def scroll_to_buffer_cursor(view):
view.scroll_mark_onscreen(view.get_buffer().get_insert())
ed = getattr(view, 'editor_ref', None)
if ed:
ed().clear_cursor()
def get_find_params(view):
if view in active_search_widgets:
search = active_search_widgets[view].entry.get_text()
ignore_case = is_icase(view)
regex = is_regex(view)
elif search_selections:
search = search_selections[0].search
ignore_case = False
regex = False
else:
return None
return search, ignore_case, regex
def do_find(view, dir, start_from=None):
search, ignore_case, regex = get_find_params(view)
matcher = get_matcher(view, search, ignore_case, regex)
if not matcher:
return
buf = view.get_buffer()
iter = start_from
if not iter:
if buf.get_has_selection() and dir == 1:
iter = buf.get_iter_at_mark(buf.get_selection_bound())
else:
iter = buf.get_iter_at_mark(buf.get_insert())
utext = buf.get_text(*buf.get_bounds()).decode('utf-8')
match = None
if dir == 0:
match = matcher.search(utext, iter.get_offset())
else:
match = backward_search(matcher, utext, iter.get_offset())
if match:
bounds = map(buf.get_iter_at_offset, match.span())
buf.select_range(bounds[1], bounds[0])
scroll_to_buffer_cursor(view)
if start_from:
view.get_toplevel().message('Wrap search', 'info', parent=view)
return True
elif not start_from:
return do_find(view, dir, buf.get_bounds()[dir])
else:
view.get_toplevel().message('Text not found', 'info', parent=view)
return False
def find_prev(view):
do_find(view, 1)
def find_next(view, grab_focus=False):
if do_find(view, 0) and grab_focus:
view.grab_focus()
def update_state_widget(view, widget):
text = ''
if is_icase(view):
text += 'I'
if is_regex(view):
text += 'R'
widget.opt_state.set_markup('<b>%s</b>' % text)
def create_search_widget(view):
widget = gtk.EventBox()
frame = gtk.Frame()
widget.add(frame)
hbox = gtk.HBox(False, 3)
frame.add(hbox)
hbox.pack_start(gtk.image_new_from_stock(gtk.STOCK_FIND, gtk.ICON_SIZE_SMALL_TOOLBAR), False)
widget.opt_state = gtk.Label()
hbox.pack_start(widget.opt_state, False)
update_state_widget(view, widget)
entry = gtk.Entry()
hbox.pack_start(entry, False)
widget.entry = entry
entry.connect('activate', on_search_activate, view, widget)
entry.connect_after('changed', on_search_changed, view, widget)
return widget
def create_replace_widget(view):
widget = gtk.EventBox()
frame = gtk.Frame()
widget.add(frame)
hbox = gtk.HBox(False, 3)
frame.add(hbox)
hbox.pack_start(gtk.image_new_from_stock(
gtk.STOCK_FIND_AND_REPLACE, gtk.ICON_SIZE_SMALL_TOOLBAR), False)
widget.entry = entry = gtk.Entry()
hbox.pack_start(entry, False)
entry.connect('activate', on_replace_activate, view)
return widget
def get_tag(view):
buf = view.get_buffer()
table = buf.get_tag_table()
tag = table.lookup('search')
if not tag:
tag = buf.create_tag('search')
style = buf.get_style_scheme().get_style('search-match')
if style:
if style.props.background_set:
tag.props.background = style.props.background
if style.props.foreground_set:
tag.props.foreground = style.props.foreground
else:
style = buf.get_style_scheme().get_style('text')
if style.props.background_set:
tag.props.foreground = style.props.background
if style.props.foreground_set:
tag.props.background = style.props.foreground
return tag
def delete_all_marks(view):
buf = view.get_buffer()
start, end = buf.get_bounds()
if buf.get_tag_table().lookup('search'):
buf.remove_tag_by_name('search', start, end)
def get_matcher(view, search, ignore_case, regex, show_feedback=True):
flags = re.UNICODE
if ignore_case:
flags |= re.IGNORECASE
if regex:
try:
return re.compile(unicode(search), flags)
except Exception, e:
if show_feedback:
view.get_toplevel().message('Bad regex: ' + str(e), 'error', 3000, parent=view)
if view in active_search_widgets:
idle(active_search_widgets[view].entry.grab_focus)
return None
else:
return re.compile(re.escape(unicode(search)), flags)
def add_mark_task(view, search, ignore_case, regex, show_feedback=True):
global mark_task_is_in_queue
if not mark_task_is_in_queue:
mark_task_is_in_queue = True
idle(mark_occurences, view, search, ignore_case, regex,
show_feedback, priority=glib.PRIORITY_LOW)
def mark_occurences(view, search, ignore_case, regex, show_feedback=True):
global mark_task_is_in_queue
mark_task_is_in_queue = False
matcher = get_matcher(view, search, ignore_case, regex, show_feedback)
if not matcher:
return False
count = 0
buf = view.get_buffer()
utext = buf.get_text(*buf.get_bounds()).decode('utf-8')
for m in matcher.finditer(utext):
buf.apply_tag(get_tag(view),
*map(buf.get_iter_at_offset, m.span()))
count += 1
if count == 1:
if show_feedback:
idle(view.get_toplevel().message, 'One occurrence is marked', 'done', parent=view)
elif count > 1:
if show_feedback:
idle(view.get_toplevel().message, '%d occurrences are marked' % count, 'done', parent=view)
else:
if show_feedback:
idle(view.get_toplevel().message, 'Text not found', 'warn', parent=view)
return False
return True
def is_icase(view):
return view.get_toplevel().manager.conf['SEARCH_IGNORE_CASE']
def is_regex(view):
return view.get_toplevel().manager.conf['SEARCH_REGEX']
def use_regex(view, is_set):
if is_set:
view.get_toplevel().manager.conf['SEARCH_REGEX'] = not is_regex(view)
if view in active_search_widgets:
update_state_widget(view, active_search_widgets[view])
else:
return is_regex(view)
def ignore_case(view, is_set):
if is_set:
view.get_toplevel().manager.conf['SEARCH_IGNORE_CASE'] = not is_icase(view)
if view in active_search_widgets:
update_state_widget(view, active_search_widgets[view])
else:
return is_icase(view)
def on_search_activate(sender, view, widget):
delete_all_marks(view)
editor = getattr(view, 'editor_ref', None)
if editor:
editor().add_spot()
update_last_search(view, widget.entry.get_text())
if mark_occurences(view, widget.entry.get_text(), is_icase(view), is_regex(view)):
find_next(view, True)
def on_search_changed(sender, view, widget):
search = widget.entry.get_text()
idle(delete_all_marks, view)
if search and ( len(search) != 1 or ( not search.isdigit() and not search.isalpha()
and not search.isspace() ) ):
idle(add_mark_task, view, search, is_icase(view), is_regex(view), False)
def mark_selection(view):
buf = view.get_buffer()
if not buf.get_has_selection():
view.get_toplevel().message('Select something', 'warn', parent=view)
return
if search_selections:
search_selections[:] = []
delete_all_marks(view)
occur = SearchSelection(buf.get_text(*buf.get_selection_bounds()))
search_selections.append(occur)
def remove_all(view, occur):
search_selections[:] = []
delete_all_marks(view)
update_last_search(view, occur.search, False, False)
mark_occurences(view, occur.search, False, False)
view.get_toplevel().push_escape(Escapable(remove_all, view, occur), 5)
def find_search_tag(view, start, wrap):
tag = get_tag(view)
it = start.copy()
while True:
if it.toggles_tag(tag) and it.has_tag(tag):
return it
if not it.forward_to_tag_toggle(tag) or it.is_end():
if wrap:
wrap = False
it = view.get_buffer().get_bounds()[0]
else:
return None
def get_search_tag_end(view, start):
tag = get_tag(view)
it = start.copy()
if it.forward_to_tag_toggle(tag):
return it
else:
raise Exception('Something goes wrong')
def do_replace(view, matcher, start, replace):
end = get_search_tag_end(view, start)
utext = start.get_text(end).decode('utf-8')
match = matcher.search(utext)
if not match or match.start() != 0:
return False
buf = view.get_buffer()
buf.place_cursor(start)
buf.delete(start, end)
buf.insert_at_cursor(match.expand(replace))
def get_leftmost_cursor(buf):
if buf.get_has_selection():
return buf.get_selection_bounds()[0]
else:
return buf.get_iter_at_mark(buf.get_insert())
def on_replace_activate(_entry, view):
replace_next(view)
def replace_next(view):
buf = view.get_buffer()
cursor = get_leftmost_cursor(buf)
it = find_search_tag(view, cursor, True)
if it:
view.grab_focus()
if it.equal(cursor):
matcher = get_matcher(view, *get_find_params(view))
replace = unicode(active_replace_widgets[view].entry.get_text())
with text_buffer_user_action(buf):
do_replace(view, matcher, it, replace)
it = find_search_tag(view, buf.get_iter_at_mark(buf.get_insert()), True)
if it:
buf.place_cursor(it)
scroll_to_buffer_cursor(view)
else:
view.get_toplevel().message('Replace what?', 'warn', parent=view)
def replace_all(view, is_selection):
matcher = get_matcher(view, *get_find_params(view))
replace = unicode(active_replace_widgets[view].entry.get_text())
if not matcher:
return
buf = view.get_buffer()
if is_selection:
start, end = buf.get_selection_bounds()
start.order(end)
else:
start, end = buf.get_bounds()
end_mark = buf.create_mark(None, end)
cursor = buf.get_iter_at_mark(buf.get_insert())
line, offset = cursor.get_line(), cursor.get_line_offset()
count = 0
it = start
with text_buffer_user_action(buf):
while True:
it = find_search_tag(view, it, False)
if not it or it.compare(buf.get_iter_at_mark(end_mark)) > 0:
break
do_replace(view, matcher, it, replace)
it = buf.get_iter_at_mark(buf.get_insert())
count += 1
if not count:
view.get_toplevel().message('Nothing to replace', 'info', parent=view)
elif count == 1:
view.get_toplevel().message('One occurrence was replaced', 'done', parent=view)
else:
view.get_toplevel().message('%d occurrences were replaced' % count, 'done', parent=view)
cursor = buf.get_iter_at_mark(buf.get_insert())
cursor.set_line(line)
cursor.set_line_offset(offset)
buf.place_cursor(cursor)
scroll_to_buffer_cursor(view)
view.grab_focus() | {
"repo_name": "baverman/snaked",
"path": "snaked/plugins/search/__init__.py",
"copies": "1",
"size": "17544",
"license": "mit",
"hash": -2434180063020168000,
"line_mean": 30.4426523297,
"line_max": 105,
"alpha_frac": 0.6171910625,
"autogenerated": false,
"ratio": 3.5024955080854463,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4619686570585446,
"avg_score": null,
"num_lines": null
} |
author = 'Anton Bobrov<bobrov@vl.ru>'
name = 'Snippets'
desc = "SnipMate's clone"
import os.path
import re
import weakref
import gtk
from uxie.utils import idle, join_to_data_dir
from snaked.core.completer import attach_completer, add_completion_provider, Provider
from .parser import parse_snippets_from
loaded_snippets = {}
existing_snippet_contexts = {'not_initialized':True}
snippets_match_hash = {}
completion_providers = {}
stop_managers = weakref.WeakKeyDictionary()
def init(injector):
injector.on_ready('editor', editor_opened)
injector.on_ready('buffer-created', buffer_opened)
#from snaked.core.prefs import register_dialog
#register_dialog('Snippets', show_snippet_preferences, 'snippet')
def show_snippet_preferences(editor):
if 'not_initialized' in existing_snippet_contexts:
existing_snippet_contexts.clear()
discover_snippet_contexts()
from prefs import PreferencesDialog
PreferencesDialog(existing_snippet_contexts).show(editor)
def buffer_opened(buf):
if 'not_initialized' in existing_snippet_contexts:
existing_snippet_contexts.clear()
discover_snippet_contexts()
if not any(ctx in existing_snippet_contexts for ctx in buf.contexts):
return
prior = 50
contexts = [c for c in buf.contexts if c in existing_snippet_contexts]
buf.snippet_contexts = contexts
for ctx in contexts:
if ctx not in loaded_snippets:
load_snippets_for(ctx, prior)
prior -= 1
add_completion_provider(buf, *completion_providers[ctx])
buf.connect_after('changed', on_buffer_changed)
def editor_opened(editor):
if hasattr(editor.buffer, 'snippet_contexts'):
editor.view.connect('key-press-event', on_view_key_press_event,
editor.buffer.snippet_contexts, weakref.ref(editor))
attach_completer(editor.view)
def load_snippets_for(ctx, prior=None):
snippets = parse_snippets_from(existing_snippet_contexts[ctx])
loaded_snippets[ctx] = snippets
snippet_names = [s.snippet for s in snippets.values()]
for name in snippet_names:
snippets_match_hash.setdefault(ctx, {}).setdefault(len(name), {})[name] = True
if prior is not None:
completion_providers[ctx] = SnippetsCompletionProvider(ctx), prior
def discover_snippet_contexts():
dirs_to_scan = [
os.path.join(os.path.dirname(__file__), 'snippets'),
join_to_data_dir('snaked', 'snippets'),
]
for d in dirs_to_scan:
if os.path.exists(d):
for name in os.listdir(d):
path = os.path.join(d, name)
nm, ext = os.path.splitext(name)
if ext == '.snippets' and os.path.isfile(path):
existing_snippet_contexts[nm] = path
def get_match(iter, ctx):
names = snippets_match_hash.get(ctx, {})
if not names:
return None
for cnt in sorted(names, reverse=True):
end = iter.copy()
end.backward_chars(cnt)
match = end.get_slice(iter)
if match in names[cnt]:
return match
return None
def get_iter_at_cursor(buffer):
return buffer.get_iter_at_mark(buffer.get_insert())
def on_view_key_press_event(view, event, contexts, editor_ref):
if event.keyval == gtk.keysyms.Tab:
buffer = view.get_buffer()
cursor = get_iter_at_cursor(buffer)
matches = {}
for ctx in contexts:
match = get_match(cursor, ctx)
if match:
matches[ctx] = find_all_snippets(ctx, match)
if matches:
return expand_snippet(editor_ref, matches)
if buffer in stop_managers:
sm = stop_managers[buffer]
if sm.cursor_in_snippet_range(cursor):
return sm.goto_next_stop()
else:
del stop_managers[buffer]
elif event.keyval == gtk.keysyms.ISO_Left_Tab:
buffer = view.get_buffer()
cursor = get_iter_at_cursor(buffer)
if buffer in stop_managers:
sm = stop_managers[buffer]
if sm.cursor_in_snippet_range(cursor):
return sm.goto_next_stop(True)
else:
del stop_managers[buffer]
return False
def on_buffer_changed(buffer):
if buffer in stop_managers:
cursor = get_iter_at_cursor(buffer)
sm = stop_managers[buffer]
if sm.cursor_in_snippet_range(cursor):
if sm.snippet_collapsed():
del stop_managers[buffer]
else:
idle(sm.replace_inserts)
else:
del stop_managers[buffer]
def find_all_snippets(ctx, match):
return [s for s in loaded_snippets[ctx].values() if s.snippet == match]
def expand_snippet(editor_ref, matches):
if not matches:
return False
elif len(matches) == 1:
snippets = matches.values()[0]
if not snippets:
return False
if len(snippets) == 1:
insert_snippet(editor_ref, editor_ref().cursor, snippets[0])
return True
show_proposals(editor_ref().view, editor_ref().cursor, matches.keys())
return True
match_ws = re.compile(u'(?u)^[ \t]*')
def get_whitespace(start):
match = match_ws.search(line_text(start))
if match:
return match.group(0)
else:
return u''
def line_text(iter):
if not iter.starts_line():
iter = iter.copy()
iter.set_line(iter.get_line())
end = iter.copy()
if not end.ends_line():
end.forward_to_line_end()
return iter.get_text(end)
def insert_snippet(editor_ref, iter, snippet):
editor = editor_ref()
buffer = editor.buffer
view = editor.view
expand_tabs = view.get_insert_spaces_instead_of_tabs()
tab_width = view.get_tab_width()
indent = unicode(get_whitespace(iter))
buffer.begin_user_action()
if not iter_at_whitespace(iter):
start = iter.copy()
start.backward_chars(len(snippet.snippet))
while not start.equal(iter):
txt = start.get_text(iter).decode('utf-8')
if snippet.snippet.startswith(txt):
buffer.delete(start, iter)
break
start.forward_char()
offset = get_iter_at_cursor(buffer).get_offset()
body, stop_offsets, insert_offsets = snippet.get_body_and_offsets(
indent, expand_tabs, tab_width)
buffer.insert_at_cursor(body)
buffer.end_user_action()
stop_managers[buffer] = StopManager(editor_ref, offset, stop_offsets, insert_offsets)
def show_proposals(view, iter, contexts):
view.completer.complete(view, [completion_providers[c][0] for c in contexts], iter)
class StopManager(object):
def __init__(self, editor_ref, offset, stop_offsets, insert_offsets):
buf = self.buffer = editor_ref().buffer
self.editor_ref = editor_ref
self.start_mark = buf.create_mark(None, buf.get_iter_at_offset(offset), True)
self.end_mark = buf.create_mark(None, get_iter_at_cursor(buf))
self.stop_marks = {}
for i in sorted(stop_offsets):
s, e = stop_offsets[i]
s = buf.create_mark(None, buf.get_iter_at_offset(offset + s), True)
e = buf.create_mark(None, buf.get_iter_at_offset(offset + e))
self.stop_marks[i] = s, e
self.insert_marks = {}
for i in sorted(insert_offsets):
for s, e in insert_offsets[i]:
s = buf.create_mark(None, buf.get_iter_at_offset(offset + s), True)
e = buf.create_mark(None, buf.get_iter_at_offset(offset + e))
self.insert_marks.setdefault(i, []).append((s, e))
try:
self.goto_stop(min(self.stop_marks))
except ValueError:
pass
def goto_stop(self, idx):
self.buffer.select_range(*reversed(self.get_iter_pair(*self.stop_marks[idx])))
self.editor_ref().view.scroll_mark_onscreen(self.buffer.get_insert())
def get_iter_pair(self, start_mark, end_mark):
return (self.buffer.get_iter_at_mark(start_mark),
self.buffer.get_iter_at_mark(end_mark))
def cursor_in_snippet_range(self, cursor):
return self.in_range(cursor, *self.get_iter_pair(self.start_mark, self.end_mark))
def snippet_collapsed(self):
s, e = self.get_iter_pair(self.start_mark, self.end_mark)
return s.equal(e)
def in_range(self, cursor, start, end):
return cursor.in_range(start, end) or cursor.equal(end)
def get_current_stop_idx(self, cursor):
for i, (s, e) in self.stop_marks.iteritems():
if self.in_range(cursor, *self.get_iter_pair(s, e)):
return i
return None
def goto_next_stop(self, back=False):
if self.buffer.get_has_selection():
cursor = self.buffer.get_selection_bounds()[1]
else:
cursor = get_iter_at_cursor(self.buffer)
idx = self.get_current_stop_idx(cursor)
if idx is not None:
try:
if back:
idx = max(i for i in self.stop_marks if i < idx)
else:
idx = min(i for i in self.stop_marks if i > idx)
except ValueError:
if back:
self.buffer.place_cursor(self.buffer.get_iter_at_mark(self.start_mark))
else:
self.buffer.place_cursor(self.buffer.get_iter_at_mark(self.end_mark))
self.editor_ref().view.scroll_mark_onscreen(self.buffer.get_insert())
self.remove()
return True
self.goto_stop(idx)
return True
return False
def replace_inserts(self):
cursor = get_iter_at_cursor(self.buffer)
if not cursor.equal(self.buffer.get_iter_at_mark(self.end_mark)):
idx = self.get_current_stop_idx(cursor)
if idx is not None:
if idx in self.insert_marks:
txt = self.buffer.get_text(*self.get_iter_pair(*self.stop_marks[idx]))
self.buffer.handler_block_by_func(on_buffer_changed)
self.buffer.begin_user_action()
for s, e in self.insert_marks[idx]:
self.buffer.delete(*self.get_iter_pair(s, e))
self.buffer.insert(self.buffer.get_iter_at_mark(s), txt)
self.buffer.end_user_action()
self.buffer.handler_unblock_by_func(on_buffer_changed)
return
self.remove()
def remove(self):
self.editor_ref().message('Snippet was completed')
try:
del stop_managers[self.buffer]
except KeyError:
pass
def iter_at_whitespace(iter):
if iter.starts_line():
return True
else:
start = iter.copy()
start.set_line(iter.get_line())
char = start.get_text(iter).decode('utf-8')[-1]
return char.isspace() or char in ('>', ')', '}', ']')
class SnippetsCompletionProvider(Provider):
def __init__(self, ctx):
self.ctx = ctx
def get_name(self):
return '%s snippets' % self.ctx
def is_match(self, iter):
return iter
def complete(self, iter, is_interactive):
snippets = []
all_snippets = sorted(loaded_snippets[self.ctx].values(), key=lambda r:r.label)
if is_interactive:
if iter_at_whitespace(iter):
snippets = all_snippets
else:
names = snippets_match_hash.get(self.ctx, {})
if names:
already_added = {}
for cnt in range(max(names), 0, -1):
end = iter.copy()
end.backward_chars(cnt)
match = end.get_slice(iter)
for s in (s for s in all_snippets if s not in already_added):
if s.snippet.startswith(match):
already_added[s] = True
snippets.append(s)
else:
match = get_match(iter, self.ctx)
snippets = [s for s in all_snippets if match == s.snippet]
if snippets:
for s in snippets:
yield s.label, s
else:
return
def activate(self, view, snippet):
buf = view.get_buffer()
it = buf.get_iter_at_mark(buf.get_insert())
insert_snippet(view.editor_ref, it, snippet)
| {
"repo_name": "baverman/snaked",
"path": "snaked/plugins/snippets/__init__.py",
"copies": "1",
"size": "12534",
"license": "mit",
"hash": -1359475713241179000,
"line_mean": 31.5558441558,
"line_max": 91,
"alpha_frac": 0.5863251955,
"autogenerated": false,
"ratio": 3.720391807658059,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48067170031580586,
"avg_score": null,
"num_lines": null
} |
author = 'Anton Bobrov<bobrov@vl.ru>'
name = 'Spell check'
desc = 'Attaches spell checker to current editor'
import weakref
import glib
import gtk
attached_spells = weakref.WeakKeyDictionary()
def init(injector):
injector.bind('editor', 'toggle-spell-check', 'Edit/Toggle spel_l check#50',
toggle_spell).to('F7', 1)
injector.on_ready('editor-with-buffer', editor_opened)
from snaked.core.prefs import add_editor_preferences
add_editor_preferences(
on_preferences_dialog_created,
on_preferences_dialog_refresh, {
'default':{
'spell-check': False
}
})
def editor_opened(editor):
if editor.buffer.config['spell-check']:
toggle_spell(editor)
def on_preferences_dialog_created(dialog):
""":type dialog: snaked.core.gui.editor_prefs.PreferencesDialog"""
dialog.spell_check = gtk.CheckButton('Spell chec_k')
dialog.spell_check.connect('toggled', dialog.on_checkbox_toggled, 'spell-check')
dialog.spell_check.show()
dialog.vbox.pack_start(dialog.spell_check, False, False)
def on_preferences_dialog_refresh(dialog, pref):
""":type dialog: snaked.core.gui.editor_prefs.PreferencesDialog"""
dialog.spell_check.set_active(pref['spell-check'])
def toggle_spell(editor):
if editor in attached_spells:
spell = attached_spells[editor]
spell.detach()
del attached_spells[editor]
else:
try:
from gtkspell import Spell
from locale import getdefaultlocale
attached_spells[editor] = Spell(editor.view, getdefaultlocale()[0])
except ImportError:
editor.message('Spellcheck not available. You need to install pygtkspell')
except glib.GError:
editor.message('Spellcheck not available. Perhaps you have no dictionaries')
| {
"repo_name": "baverman/snaked",
"path": "snaked/plugins/spell/__init__.py",
"copies": "1",
"size": "1836",
"license": "mit",
"hash": 2805823160802279400,
"line_mean": 31.7857142857,
"line_max": 88,
"alpha_frac": 0.6775599129,
"autogenerated": false,
"ratio": 3.592954990215264,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4770514903115264,
"avg_score": null,
"num_lines": null
} |
import os
import sys
import re
TOC_LIST_PREFIX = "-"
# TOC_LIST_PREFIX = "*"
HEADER_LINE_RE = re.compile("^(#+)\s*(.*?)\s*(#+$|$)", re.IGNORECASE)
HEADER1_UNDERLINE_RE = re.compile("^-+$")
HEADER2_UNDERLINE_RE = re.compile("^=+$")
# Dictionary of anchor name to number of instances found so far
anchors = {}
def print_usage():
print("\nUsage: md-to-toc <markdown_file>")
def to_github_anchor(title):
'''
Converts markdown header title (without #s) to GitHub-formatted anchor.
Note that this function attempts to recreate GitHub's anchor-naming logic.
'''
# Convert to lower case and replace spaces with dashes
anchor_name = title.strip().lower().replace(' ', '-')
# Strip all invalid characters
anchor_name = re.sub("[^A-Za-z0-9\-_]", "", anchor_name)
# If we've encountered this anchor name before, append next instance count
count = anchors.get(anchor_name)
if count == None:
anchors[anchor_name] = 0
else:
count = count + 1
anchors[anchor_name] = count
anchor_name = anchor_name + '-' + str(count)
return '#' + anchor_name
def toggles_block_quote(line):
'''Returns true if line toggles block quotes on or off (i.e. finds odd number of ```)'''
n = line.count("```")
return n > 0 and line.count("```") % 2 != 0
def main(argv = None):
if argv is None:
argv = sys.argv
if len(argv) < 2:
print_usage()
return 0
filespec = argv[1]
in_block_quote = False
results = [] # list of (header level, title, anchor) tuples
last_line = ""
file = open(filespec)
for line in file.readlines():
if toggles_block_quote(line):
in_block_quote = not in_block_quote;
if in_block_quote:
continue
found_header = False
header_level = 0
m = HEADER_LINE_RE.match(line)
if m != None:
header_level = len(m.group(1))
title = m.group(2)
found_header = True
if not found_header:
m = HEADER1_UNDERLINE_RE.match(line)
if m != None:
header_level = 1
title = last_line.rstrip()
found_header = True
if not found_header:
m = HEADER2_UNDERLINE_RE.match(line)
if m != None:
header_level = 2
title = last_line.rstrip()
found_header = True
if found_header:
results.append( (header_level, title, to_github_anchor(title)) )
last_line = line
# Compute min header level so we can offset output to be flush with left edge
min_header_level = min(results, key=lambda e: e[0])[0]
for r in results:
header_level = r[0]
spaces = " " * (header_level - min_header_level)
print("{}{} [{}]({})".format(spaces, TOC_LIST_PREFIX, r[1], r[2]))
if __name__ == "__main__":
sys.exit(main())
| {
"repo_name": "julianpistorius/magnetosphere",
"path": ".repo-scripts/md-to-toc.py",
"copies": "2",
"size": "2628",
"license": "bsd-3-clause",
"hash": 8797843081328236000,
"line_mean": 23.3333333333,
"line_max": 89,
"alpha_frac": 0.6468797565,
"autogenerated": false,
"ratio": 2.903867403314917,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4550747159814917,
"avg_score": null,
"num_lines": null
} |
import os
import sys
import re
TOC_LIST_PREFIX = "-"
# TOC_LIST_PREFIX = "*"
HEADER_LINE_RE = re.compile("^(#+)\s*(.*?)\s*(#+$|$)", re.IGNORECASE)
# Dictionary of anchor name to number of instances found so far
anchors = {}
def print_usage():
print("\nUsage: md-to-toc <markdown_file>")
def to_github_anchor(title):
'''
Converts markdown header title (without #s) to GitHub-formatted anchor.
Note that this function attempts to recreate GitHub's anchor-naming logic.
'''
# Convert to lower case and replace spaces with dashes
anchor_name = title.strip().lower().replace(' ', '-')
# Strip all invalid characters
anchor_name = re.sub("[^A-Za-z0-9\-_]", "", anchor_name)
# If we've encountered this anchor name before, append next instance count
count = anchors.get(anchor_name)
if count == None:
anchors[anchor_name] = 0
else:
count = count + 1
anchors[anchor_name] = count
anchor_name = anchor_name + '-' + str(count)
return '#' + anchor_name
def toggles_block_quote(line):
'''Returns true if line toggles block quotes on or off (i.e. finds odd number of ```)'''
n = line.count("```")
return n > 0 and line.count("```") % 2 != 0
def main(argv = None):
if argv is None:
argv = sys.argv
if len(argv) < 2:
print_usage()
return 0
filespec = argv[1]
in_block_quote = False
results = [] # list of (header level, title, anchor) tuples
file = open(filespec)
for line in file.readlines():
if toggles_block_quote(line):
in_block_quote = not in_block_quote;
if in_block_quote:
continue
m = HEADER_LINE_RE.match(line)
if m != None:
header_level = len(m.group(1))
title = m.group(2)
spaces = " " * (header_level - 1)
results.append( (header_level, title, to_github_anchor(title)) )
# Compute min header level so we can offset output to be flush with left edge
min_header_level = min(results, key=lambda e: e[0])[0]
for r in results:
header_level = r[0]
spaces = " " * (header_level - min_header_level)
print("{}{} [{}]({})".format(spaces, TOC_LIST_PREFIX, r[1], r[2]))
if __name__ == "__main__":
sys.exit(main())
| {
"repo_name": "wilsonmar/md-to-toc",
"path": "md-to-toc.py",
"copies": "1",
"size": "2158",
"license": "mit",
"hash": -1905618930589763300,
"line_mean": 24.3882352941,
"line_max": 89,
"alpha_frac": 0.6459684893,
"autogenerated": false,
"ratio": 2.9400544959128063,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4086022985212806,
"avg_score": null,
"num_lines": null
} |
import os
import sys
import tempfile
import platform
def PrintUsage():
print """
Plots an HSM defined in cpp file(s) via hsmToDot -> dot -> default image viewer
Requires GraphViz (Windows: https://graphviz.gitlab.io/_pages/Download/Download_windows.html)
Usage: {} <filespec>
""".format(os.path.basename(sys.argv[0]))
def GetScriptPath():
return os.path.dirname(os.path.realpath(sys.argv[0]))
def ExecCommand(command):
print('[Exec] ' + command)
result = os.system(command)
if result != 0:
raise Exception("Command failed!")
def OpenImage(command):
curr_platform = platform.system()
if curr_platform == 'Linux':
ExecCommand('xdg-open ' + command)
elif curr_platform == 'Windows':
ExecCommand(command)
else:
raise Exception("Unknown platform")
def main(argv = None):
if argv is None:
argv = sys.argv
if len(argv) < 2:
PrintUsage()
return 0
filespec = argv[1]
# Write dot file
dotFile = os.path.join(tempfile.gettempdir(), os.path.basename(filespec) + '.dot')
ExecCommand('"{}" {}'.format(sys.executable, os.path.join(GetScriptPath(), 'hsmToDot.py') + ' ' + filespec + ' > ' + dotFile))
# Invoke dot to produce image
pngFile = dotFile + '.png'
ExecCommand('dot ' + dotFile + ' -Tpng -o' + pngFile)
# Open default image viewer
OpenImage(pngFile)
if __name__ == "__main__":
sys.exit(main())
| {
"repo_name": "amaiorano/hsm",
"path": "tools/plotHsm.py",
"copies": "1",
"size": "1395",
"license": "mit",
"hash": 9150551174184714000,
"line_mean": 23.9107142857,
"line_max": 127,
"alpha_frac": 0.6817204301,
"autogenerated": false,
"ratio": 3,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8481807105648206,
"avg_score": 0.13998266489035888,
"num_lines": 56
} |
import sys
import re
TOC_LIST_PREFIX = "-"
# TOC_LIST_PREFIX = "*"
HEADER_LINE_RE = re.compile("^(#+)\s*(.*?)\s*(#+$|$)", re.IGNORECASE)
HEADER1_UNDERLINE_RE = re.compile("^-+$")
HEADER2_UNDERLINE_RE = re.compile("^=+$")
# Dictionary of anchor name to number of instances found so far
anchors = {}
def print_usage():
print("\nUsage: md-to-toc <markdown_file>")
def to_github_anchor(title):
'''
Converts markdown header title (without #s) to GitHub-formatted anchor.
Note that this function attempts to recreate GitHub's anchor-naming logic.
'''
# Convert to lower case and replace spaces with dashes
anchor_name = title.strip().lower().replace(' ', '-')
# Strip all invalid characters
anchor_name = re.sub(r"[\[\]\"!#$%&'()*+,./:;<=>?@\^{|}~]", "", anchor_name)
# If we've encountered this anchor name before, append next instance count
count = anchors.get(anchor_name)
if count is None:
anchors[anchor_name] = 0
else:
count = count + 1
anchors[anchor_name] = count
anchor_name = anchor_name + '-' + str(count)
anchor_name = anchor_name.replace('`', '')
return '#' + anchor_name
def toggles_block_quote(line):
'''Returns true if line toggles block quotes on or off'''
'''(i.e. finds odd number of ```)'''
n = line.count("```")
return n > 0 and line.count("```") % 2 != 0
def main(argv=None):
if argv is None:
argv = sys.argv
if len(argv) < 2:
print_usage()
return 0
filespec = argv[1]
in_block_quote = False
results = [] # list of (header level, title, anchor) tuples
last_line = ""
file = open(filespec)
for line in file.readlines():
if toggles_block_quote(line):
in_block_quote = not in_block_quote
if in_block_quote:
continue
found_header = False
header_level = 0
m = HEADER_LINE_RE.match(line)
if m is not None:
header_level = len(m.group(1))
title = m.group(2)
found_header = True
if not found_header:
m = HEADER1_UNDERLINE_RE.match(line)
if m is not None:
header_level = 1
title = last_line.rstrip()
found_header = True
if not found_header:
m = HEADER2_UNDERLINE_RE.match(line)
if m is not None:
header_level = 2
title = last_line.rstrip()
found_header = True
if found_header:
results.append((header_level, title, to_github_anchor(title)))
last_line = line
# Compute min header level so we can offset output to be flush with
# left edge
min_header_level = min(results, key=lambda e: e[0])[0]
for r in results:
header_level = r[0]
spaces = " " * (header_level - min_header_level)
print("{}{} [{}]({})".format(spaces, TOC_LIST_PREFIX, r[1], r[2]))
if __name__ == "__main__":
sys.exit(main())
| {
"repo_name": "amaiorano/md-to-toc",
"path": "md-to-toc.py",
"copies": "1",
"size": "3087",
"license": "mit",
"hash": 5929427442366323000,
"line_mean": 25.8434782609,
"line_max": 80,
"alpha_frac": 0.5607385811,
"autogenerated": false,
"ratio": 3.5523590333716917,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4613097614471691,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Antonio Segura Cano'
__name__ = 'utils'
import os
import math
import tkMessageBox
def fix_size(s, l):
r = s
if s.__len__() != l:
r = fix_size("0"+s, l)
return r
def hasindeterminacy(edge):
res = False
if '-' in edge[1]:
res = True
return res
def matchindeterminacy(s0, s1):
res = True
for bit0, bit1 in zip(s0, s1):
if bit0 != '-' and bit0 != bit1:
res = False
break
return res
def log(filepath, numline):
print "Format kiss2 wrong at line " + numline.__str__()
os.system('(date "+DATE: %Y-%m-%d%nTIME: %H:%M" && echo "' + filepath +
' wrong at line '+numline.__str__() + '") >> ../logs/error.log')
def treatment_size(s, l):
return s.__len__() == int(l)
def getPatternsAux(statesdict, actual, statesvisited, value0):
state = statesdict[actual]
value = value0
statesvisited.append(actual)
store = ""
for edge in state:
value += edge[1]
if edge[0] in statesvisited:
return
if edge[2] == "1":
statesvisited.append(edge[1])
store += store +"\n"+ value
else:
getPatternsAux(statesdict, edge[0], statesvisited, value)
tkMessageBox.showinfo("The patterns are the following: ",store)
# http://stackoverflow.com/questions/36380379/python-create-all-possible-unique-lists-with-1-or-0-of-specific-length
def getPatternList(maxLenght, step):
l = ['']
for n in range(maxLenght):
tmp = []
for el in l:
tmp.append(el+'0')
tmp.append(el+'1')
tmp.append('')
l = tmp
l =sorted(set(l))
l = l[1:]
res = []
for i in l:
if i.__len__() % step == 0:
res.append(i)
return res
def inaccess (statesdict, visited, index, canModify):
if visited.__len__() == 0:
visited = ["s0"]
if visited.__len__() < index + 1:
return canModify
state = statesdict[visited[index]]
for edge in state:
if not edge[0] in canModify:
if edge[0] not in visited:
visited.append(edge[0])
canModify[edge[0]] = []
else :
canModify[edge[0]].append(visited[index])
index += 1
return inaccess(statesdict, visited, index, canModify)
| {
"repo_name": "GOYUSO/FSpyChine",
"path": "src/FSM_utils.py",
"copies": "1",
"size": "2392",
"license": "mit",
"hash": -2731391135184983600,
"line_mean": 24.4468085106,
"line_max": 116,
"alpha_frac": 0.5334448161,
"autogenerated": false,
"ratio": 3.497076023391813,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4530520839491813,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Antonio Segura Cano'
import numpy as np
import time
from graphviz import Digraph
import tkMessageBox
from wildcards import wildcard
from FSM_utils import *
# We'll create a FSMachine class
# FSMachine class will generate all methods about the program
# A random FSMachine: FSMachine.random
# KISS2 headers include the following information:
#
# .i # number of inputs
# .o # number of outputs
# .p # number of products
# .s # number of states used
# .r # RESET state [optional]
class FSM:
def __init__(self, **kwargs):
self.meta = kwargs
self.statesdict = {}
def build(self, f):
self.statesdict = f(self.meta)
def kiss2(self, *args):
filename = time.strftime("FSM_%m%d%H%M%S.kiss2")
if args:
path = args[0]
stated = self.statesdict
if not stated:
stated = self.meta["statesdict"]
# name = time.strftime("FSM_%m%d%H%M%S.kiss2")
if stated.__len__() != 0:
p = 0
i = False
o = False
outfile = open(path + "/" + filename, 'a')
blob = ""
for statenode in stated:
if not i or not o or not r:
i = stated[statenode][0][1].__len__()
o = stated[statenode][0][2].__len__()
r = statenode
for transition in stated[statenode]:
t2 = transition[2]
if self.meta["type"] == "pattern":
t2 = int(transition[2])
t2 = "{0:b}".format(t2)
while t2.__len__() < math.log(self.meta["output"]-1) + 1:
t2 = "0"+t2
blob += transition[1] + " " + statenode + " " + transition[0] + " " + t2 + "\n"
p += 1
iopsr = [i,o,p,stated.__len__(),r]
header = "iopsr"
cont = 0
for l in header:
outfile.write("." + l + " " + str(iopsr[cont]) + "\n")
cont += 1
outfile.write("\n"+blob)
else:
print "Sorry, but you must create a FSM with 'built' method first"
def image(self, *args):
filename = time.strftime("FSM_%m%d%H%M%S")
if args:
path = args[0]
infile = self.statesdict
if not infile:
infile = self.meta["statesdict"]
dot = Digraph(directory=path)
# dot.directory = path
dot.filename = filename
dot.format = 'png'
for state in infile:
dot.node(state, state)
for state in self.statesdict:
dot.edge("",state)
break
for state in infile:
for edge in infile[state]:
dot.edge(state,edge[0],edge[1] + ' / ' + edge[2])
dot.view()
try:
dot.render(filename, view=True)
except IOError:
pass
def getPatterns(self, *args):
filename = time.strftime("Patterns_FSM_%m%d%H%M%S.txt")
if args:
path = args[0]
outfile = open(path + "/" + filename, 'a')
msg = self.meta["patternlist"]
message = ""
cont = 1
for i in msg:
message += str(cont) + " -> " + i + "\n"
cont += 1
outfile.write(message)
tkMessageBox.showinfo("The patterns are the following: ", message)
def __makeindeterminacy(cls, *args):
if args:
ind = args[0]*100
npri = np.random.random_integers
for state in cls.statesdict:
res = []
for edge in cls.statesdict[state]:
if not hasindeterminacy(edge):
count = 0
ledge = list(edge[1])
for bit in ledge:
percent = npri(0,100) # 0 and 100 are both included
if percent <= ind:
ledge[count] = '-'
count += 1
lst = list (edge)
lst[1] = "".join(ledge)
edge = tuple (lst)
res.append(edge)
cls.statesdict[state] = res
actualstatelist = list(cls.statesdict[state])
count = 0
for s0 in actualstatelist:
anotherstates = list(actualstatelist)
del anotherstates[count]
count += 1
c = 0
for s1 in anotherstates:
if matchindeterminacy(s0[1],s1[1]):
del anotherstates[c]
c += 1
# MAKE INDETERMINACY
for state in cls.statesdict:
aux = list(cls.statesdict[state])
for edge in cls.statesdict[state]:
aux.remove(edge)
if "-" in edge[1]:
for newedge in aux:
if matchindeterminacy(newedge[1],edge[1]):
aux.remove(newedge)
print aux
def random(self):
"""
:param seed: Put a seed to generate random FSMs (default: "seed")
:param min: The minimum number of inputs or outputs in the FMS (included)
:param max: The maximum number of inputs or outputs in the FMS (included)
:param states:
:return: A pack of random FSMs
"""
seed = self["seed"]
inputs = self["input"]
outputs = self["output"]
states = self["states"]
self["type"] = "random"
statesdict = {}
np.random.seed(int(seed, 36))
npri = np.random.random_integers
# numinput = npri(min, max)
# numoutput = npri(min, max)
numinput = inputs
numoutput = outputs
if "pattern" in self:
numoutput = 1
stateslist = ['s'+str(i) for i in range(states)]
for state in stateslist:
stl = []
for premise in range(2**numinput):
input = fix_size(bin(premise)[2:], numinput)
o = npri(2**numoutput) - 1
output = fix_size(bin(o)[2:], numoutput)
nextstate = npri(stateslist.__len__()) - 1
stl.append((stateslist[nextstate],input,output))
statesdict[state] = stl
# Calcular inaccesibilidad
candidates = inaccess(statesdict, [], 0, {})
if candidates.__len__() != statesdict.__len__():
for i in range(statesdict.__len__()):
if not candidates.has_key("s"+str(i)):
for j in candidates:
if candidates[j].__len__() != 0:
initialState = candidates[j].pop()
edge = statesdict[initialState]
nlist = []
for k in edge:
if k[0] == j:
nlist.append(("s"+str(i), k[1],k[2]))
else :
nlist.append(k)
statesdict[initialState] = nlist
return statesdict
def pattern(self):
statesdict = {}
patternlist = []
input = self["input"]
nPatterns = self["output"]
seed = self["seed"]
self["type"] = "pattern"
np.random.seed(int(seed, 36))
npri = np.random.random_integers
allPatterns = getPatternList(self["states"], input)
for i in range(nPatterns):
if allPatterns.__len__() == 0:
break
patternlist.append(allPatterns.pop(npri(allPatterns.__len__()-1)))
patternCont = 0
self["patternlist"] = patternlist
for i in patternlist:
splitted = []
cont = 0
print i
for j in range(i.__len__()/input):
cont += input
splitted.append(i[cont-input:cont])
def createSD(statesdict, crumb, pattern, patternCont):
currentState = crumb[crumb.__len__()-1]
if not statesdict.has_key(currentState):
statesdict[currentState] = []
exists = False
for i in statesdict[currentState]:
if pattern[0] == i[1]:
exists = i[0]
break
if not exists:
o = "0"
state = "s"+str(statesdict.__len__())
inserted = False
if pattern.__len__() == 1:
o = str(patternCont+1)
patternCont += 1
inserted = True
statesdict[currentState].append((state,pattern[0],o))
crumb.append(state)
if not inserted:
createSD(statesdict, crumb, pattern[1:], patternCont)
else :
crumb.append(exists)
createSD(statesdict, crumb, pattern[1:], patternCont)
createSD(statesdict, ["s0"], splitted, patternCont)
patternCont += 1
self["statesdict"] = statesdict
return statesdict
def sequential(self):
try:
seed = self["seed"]
input = self["input"]
output = self["output"]
states = self["states"]
except KeyError:
print "You must input seed, number of inputs/outputs and states parameters"
return 1
self["type"] = "sequential"
if not "loops" in self:
self["loops"] = 0
if not "jumps" in self:
self["jumps"] = 0
if 100 <= self["loops"] + self["jumps"]:
tkMessageBox.showinfo("Error", "loops + jumps must be less than 100")
return 1
statesdict = {}
np.random.seed(int(seed,36))
npri = np.random.random_integers
stateslist = ['s'+str(i) for i in range(states)]
if (self["loops"] == 0) and (self["jumps"] == 0):
i = 1
for state in stateslist:
o = npri(2**int(output)) - 1
out = fix_size(bin(o)[2:], output)
op = ""
for l in range(input):
op = op + "-"
statesdict[state] = [(stateslist[i%(stateslist.__len__())],op,out)]
i += 1
else:
i = 1
for state in stateslist:
res = []
for inp in range(2**int(input)):
out = npri(1,2**int(output)) - 1
nextState = stateslist[i%(stateslist.__len__())]
dice = npri(0,100)
if dice < self["jumps"] :
myrandom = npri(0,stateslist.__len__()-1)
nextState = stateslist[myrandom]
else:
if dice < (self["jumps"]+self["loops"]):
nextState = state
res.append((nextState,fix_size("{0:b}".format(inp),input),fix_size("{0:b}".format(out),output)))
statesdict[state] = res
i += 1
candidates = inaccess(statesdict, [], 0, {})
if candidates.__len__() != statesdict.__len__():
for i in range(statesdict.__len__()):
if not candidates.has_key("s"+str(i)):
for j in candidates:
if candidates[j].__len__() != 0:
initialState = candidates[j].pop()
edge = statesdict[initialState]
nlist = []
for k in edge:
if k[0] == j:
nlist.append(("s"+str(i), k[1],k[2]))
else :
nlist.append(k)
statesdict[initialState] = nlist
self["statesdict"] = statesdict
return statesdict
| {
"repo_name": "GOYUSO/FSpyChine",
"path": "src/FSM_class.py",
"copies": "1",
"size": "11776",
"license": "mit",
"hash": 4614024261350587000,
"line_mean": 30.2360742706,
"line_max": 112,
"alpha_frac": 0.4668817935,
"autogenerated": false,
"ratio": 4.116043341488989,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5082925134988989,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Antonio Segura Cano'
import os
import re
import numpy as np
# We'll create a FSMachine class
# FSMachine class will generate all methods about the program
# A random FSMachine: FSMachine.random
# KISS2 headers include the following information:
#
# .i # number of inputs
# .o # number of outputs
# .p # number of products
# .s # number of states used
# .r # RESET state [optional]
class FSMachine:
""" FSMachine class """
def __init__(self, n=10):
"""
:param n: Number of Final State Machines that you want to create at the package (default: 10)
:return: FSMachine package ready to work with it.
"""
self.n = n
def random(self, seed="seed", min=1, max=8, states=10):
"""
:param seed: Introduce a seed to generate random FSMs (default: "seed")
:param min: The minimum number of inputs or outputs in the FMS (included)
:param max: The maximum number of inputs or outputs in the FMS (included)
:param states:
:return: A pack of random FSMs
"""
np.random.seed(int(seed, 36))
npri = np.random.random_integers
for fsm in range(self.n):
numinput = npri(min, max)
numoutput = npri(min, max)
stateslist = ['s'+str(i) for i in range(states)]
for state in stateslist:
for premise in range(2**numinput):
input = fix_size(bin(premise)[2:], numinput)
o = npri(2**numoutput) - 1
output = fix_size(bin(o)[2:], numoutput)
nextstate = npri(stateslist.__len__()) - 1
print input + ' ' + state + ' ' + stateslist[nextstate] + ' ' + output
# Util functions
def kiss2png(filepath):
infile = open(filepath, 'r')
outfile = open("./temp.txt", 'a')
outfile.write("digraph g{\n\t")
metadata = {}
nline = 1
verifystates = {}
resetstate = ""
for line in infile:
pattern = re.compile("^.[ioprs]")
p = pattern.findall(line)
chunksline = line.split()
writemem = ''
if p:
key = chunksline[0].replace(".", "")
val = chunksline[1]
metadata[key] = val
if key == "r":
resetstate = val
else:
lenc = chunksline.__len__()
if lenc != 4:
if lenc == 0:
continue
log(filepath, nline)
break
else:
if not (treatment_size(chunksline[0], metadata["i"]) and treatment_size(chunksline[3], metadata["o"])):
log(filepath, nline)
break
else:
currentstate = chunksline[1]
if not resetstate:
resetstate = currentstate
# if not verifystates.has_key(currentstate):
if currentstate not in verifystates:
verifystates[currentstate] = 1
else:
verifystates[currentstate] += 1
writemem += currentstate + '->' + chunksline[2] + \
' [label="' + chunksline[0] + ' ' + chunksline[3] + '"];\n\t'
outfile.write(writemem)
nline += 1
outfile.write("\r}")
infile.close()
outfile.close()
ok = True
for state in verifystates:
mypow = 2**int(metadata["i"])
if verifystates[state] != mypow:
ok = False
log(filepath, nline)
break
print resetstate
if ok:
os.system("dot temp.txt -o result.png -Tpng && rm temp.txt")
def treatment_size(s, l):
return s.__len__() == int(l)
def fix_size(s, l):
r = s
if s.__len__() != l:
r = fix_size("0"+s, l)
return r
def log(filepath, numline):
print "Format kiss2 wrong at line " + numline.__str__()
os.system('(date "+DATE: %Y-%m-%d%nTIME: %H:%M" && echo "' +
filepath + ' wrong at line '+numline.__str__() + '") >> ../logs/error.log')
def wild_state(s1, s2):
n, i = 0, 0
r = True
for letter in s1:
if letter != s2[i]:
n += 1
if 1 < n:
r = False
break
i += 1
if n == 0:
r = False
print r
def contains(l,n,*args):
r = enumerate(args)
if not r:
r = ""
res = r.next()[1]
if n in l:
res = l[n]
return res
class FSM:
"""FSM:
s0=[{i:xx,o:xx,s:xx}]
"""
def __init__(self, states = False):
"""
:return: FSM object initialized
"""
self.defined = False
self.states = {}
self.reset = ""
if states:
if type(states) is str:
infile = open(states, 'r')
pattern = re.compile("^.[ioprs]")
for line in infile:
p = pattern.findall(line)
chunksline = line.split()
if not chunksline:
continue
if p:
key = chunksline[0].replace(".", "")
val = chunksline[1]
if key == "r":
self.reset = val
else:
astate = chunksline[1]
if astate not in self.states:
self.states[astate] = []
self.states[astate].append((chunksline[2],chunksline[0],chunksline[3]))
else:
self.states = states
if not self.reset:
self.reset = self.states.iterkeys().next()
def build(self, function, **kwargs):
pass
def tokiss2(self):
pass
def toimage(self):
if not self.defined:
print "You must initialize a FSM "
else:
print "OK"
def toimage2(self, filepath):
infile = open(filepath, 'r')
outfile = open("./temp.txt", 'a')
outfile.write("digraph g{\n\t")
metadata = {}
nline = 1
verifystates = {}
resetstate = ""
for line in infile:
pattern = re.compile("^.[ioprs]")
p = pattern.findall(line)
chunksline = line.split()
writemem = ''
if p:
key = chunksline[0].replace(".", "")
val = chunksline[1]
metadata[key] = val
if key == "r":
resetstate = val
else:
lenc = chunksline.__len__()
if lenc != 4:
if lenc == 0:
continue
log(filepath, nline)
break
else:
if not (treatment_size(chunksline[0], metadata["i"]) and treatment_size(chunksline[3], metadata["o"])):
log(filepath, nline)
break
else:
currentstate = chunksline[1]
if not resetstate:
resetstate = currentstate
# if not verifystates.has_key(currentstate):
if currentstate not in verifystates:
verifystates[currentstate] = 1
else:
verifystates[currentstate] += 1
writemem += currentstate + '->' + chunksline[2] + \
' [label="' + chunksline[0] + ' ' + chunksline[3] + '"];\n\t'
outfile.write(writemem)
nline += 1
outfile.write("\r}")
infile.close()
outfile.close()
ok = True
for state in verifystates:
mypow = 2**int(metadata["i"])
if verifystates[state] != mypow:
ok = False
log(filepath, nline)
break
print resetstate
if ok:
os.system("dot temp.txt -o result.png -Tpng && rm temp.txt")
def verify(data):
ok = True
if type(data) == str:
infile = open(data, 'r')
nline = 1
metadata = {}
verifystates = {}
for line in infile:
pattern = re.compile("^.[ioprs]")
p = pattern.findall(line)
chunksline = line.split()
if p:
key = chunksline[0].replace(".", "")
val = chunksline[1]
metadata[key] = val
else:
lenc = chunksline.__len__()
if lenc != 4:
if lenc == 0:
continue
log(data, nline)
break
else:
if not (treatment_size(chunksline[0], metadata["i"]) and treatment_size(chunksline[3], metadata["o"])):
log(data, nline)
break
else:
currentstate = chunksline[1]
if currentstate not in verifystates:
verifystates[currentstate] = 1
else:
verifystates[currentstate] += 1
nline += 1
infile.close()
for state in verifystates:
mypow = 2**int(metadata["i"])
if verifystates[state] != mypow:
ok = False
log(data, nline)
break
if type(data) == dict:
print "Diccionario"
return ok
# x = FSM("../res/testkiss2.kiss2")
# verify(x.states)
def obtainwild(str):
counter = 0
for letter in str:
if letter == "*":
counter += 1
# Pasar a binario desde 0 hasta range(counter)
pass
obtainwild("0*1*")
# i2 o1 s8 p32
# random -> el rango de entradas va de [n,m] uniforme
# valor de campana mu y sigma
# mu, sigma = 0, 0.1 # mean and standard deviation
# >>> s = np.random.normal(mu, sigma, 1000)
#
# completamente especificada
# inespecificada (representacion)
| {
"repo_name": "GOYUSO/FSpyChine",
"path": "src/FSM_functions.py",
"copies": "1",
"size": "10209",
"license": "mit",
"hash": -8004902952611181000,
"line_mean": 30.0303951368,
"line_max": 123,
"alpha_frac": 0.4594965227,
"autogenerated": false,
"ratio": 4.108249496981891,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5067746019681891,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Anton Melnikov'
from collections import Counter, OrderedDict
from enum import Enum
from itertools import chain
from pprint import pprint
class FeatureValue(Enum):
"""
enum for values of phonological features
"""
yes = 1
no = 0
both = 2
unspecified = -1
class FeatureValueDict(OrderedDict):
pass
class Phoneme:
def __init__(self, symbol, name, features, is_complete=True,
parent_phonemes: set=None, feature_counter: Counter=None,
parent_similarity=1.0):
"""
:param is_complete: indicates whether the object represents a complete phoneme
"""
self.value = self.parse_features(features)
self.symbol = symbol
self.name = name
self.is_complete = is_complete
if parent_phonemes:
self.parent_phonemes = parent_phonemes
if not parent_phonemes:
self.parent_phonemes = {symbol}
# the count of how similar the parent phonemes are
self.parent_similarity = parent_similarity
def __repr__(self):
return self.symbol
def __str__(self):
return self.symbol
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
if other:
return self.value == other.value
else:
# the other must be None
return False
def __len__(self):
return len(self.value)
def __contains__(self, item):
return item in self.value
def __iter__(self):
return iter(self.value.items())
@classmethod
def from_symbol(cls, symbol: str, phonemes: dict):
"""
Initialise a Phoneme object from its IPA symbol, using a dictionary of IPA symbols and features
:param symbol:
:param phonemes:
:return:
"""
phoneme = phonemes[symbol]
name = phoneme['name']
features = cls.parse_features(phoneme['features'])
return cls(symbol, name, features)
@staticmethod
def parse_features(features_dict) -> FeatureValueDict:
if isinstance(features_dict, FeatureValueDict):
return features_dict
features = FeatureValueDict()
for feature, value in features_dict.items():
# values can be True, False, 0 or ±
if value is True:
feature_value = FeatureValue.yes
elif value is False:
feature_value = FeatureValue.no
elif value is 0:
feature_value = FeatureValue.unspecified
elif value == '±':
feature_value = FeatureValue.both
else:
raise ValueError('{} is not recognised'.format(value))
features[feature] = feature_value
return features
@property
def features(self):
return self.value
def get_positive_features(self):
for feature, value in self:
if value == FeatureValue.yes or value == FeatureValue.both:
yield feature
def similarity_ratio(self, other):
"""
computes the similarity between this Phoneme object and another
:param other: Phoneme
:return:
"""
similarity_count = 0
for feature, feature_value in self:
other_feature = other.value[feature]
if other_feature == feature_value:
similarity_count += 1
# add 0.5 if either of the features is ± and the other is + or -
elif other_feature == FeatureValue.both or feature_value == FeatureValue.both:
if (other_feature != FeatureValue.unspecified
and feature_value != FeatureValue.unspecified):
similarity_count += 0.5
similarity_ratio = similarity_count / len(self.features)
return similarity_ratio
def partial_equals(self, other, threshold=0.7):
"""
returns True if this Phoneme object's similarity to another Phoneme object
is equal to or above the given threshold of similarity
:param other: Phoneme
:param threshold: similarity threshold
:return:
"""
similarity_ratio = self.similarity_ratio(other)
if similarity_ratio >= threshold:
return True
else:
return False
def intersection(self, other):
"""
Returns an 'intersection phoneme' between this Phone object and another
:param other: Phoneme
:return: Phoneme
"""
if self == other:
return self
elif other:
if other.symbol in self.parent_phonemes:
return self
intersection = FeatureValueDict(set(self).intersection(set(other)))
# create new parents
new_parents = set(chain(self.parent_phonemes, other.parent_phonemes))
new_symbol = '/'.join(new_parents)
combined_similarity = self.similarity_ratio(other)
partial_phoneme = Phoneme(new_symbol, 'partial phoneme',
intersection, is_complete=False,
parent_phonemes=new_parents,
parent_similarity=combined_similarity)
return partial_phoneme
else:
return None
def pick_closest(self, other_phonemes):
"""
Picks the closest Phoneme object (using the similarity ratio) from an iterable of Phoneme objects
:param other_phonemes: iterable of Phonemes
:return: Phoneme
"""
closest = max(other_phonemes, key=lambda phoneme: self.similarity_ratio(phoneme))
return closest | {
"repo_name": "notnami/phonemes",
"path": "phonemelib/phoneme.py",
"copies": "1",
"size": "5779",
"license": "mit",
"hash": -6695121573522030000,
"line_mean": 28.0301507538,
"line_max": 105,
"alpha_frac": 0.578601108,
"autogenerated": false,
"ratio": 4.715102040816326,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5793703148816326,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Anton Purin'
"""
AnLinq - Linq analog for Python
Contact: purin.anton@gmail.com
The MIT License (MIT)
Copyright (c) 2015 Anton Purin
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import itertools
import sys
import functools
class AnLinq(object):
"""Allows to apply AnLinq-like methods to wrapped iterable"""
class AnLinqException(Exception):
"""
Special exception to be thrown by AnLinq
"""
pass
def __init__(self, iterable):
"""
Instantiates AnLinq wrapper
:param iterable: iterable to wrap
:return: item
:rtype: object
"""
if iterable is None:
raise AnLinq.AnLinqException("iterable is None")
if iterable.__class__ is AnLinq:
self.iterable = iterable.iterable
else:
self.iterable = iterable
def __repr__(self):
return repr(self.to_list())
def __iter__(self):
"""
Allows to iterate AnLinq object
"""
return iter(self.iterable)
def __getitem__(self, index):
"""
Defines operator[]
:param index: numeric index of item in iterable
:return: item
:rtype: object
"""
count = 0
for item in self.iterable:
if count == index:
return item
count += 1
raise AnLinq.AnLinqException("Index " + repr(index) + " is out of range (" + repr(count) + ")")
def __len__(self):
"""
Provides len(AnLinq) function
:return: number of items in iterable
:rtype: int
"""
return self.count()
def count(self):
"""
Counts underlying items
:return: number of items in iterable
:rtype: int
"""
if hasattr(self.iterable, '__len__'):
return len(self.iterable)
count = 0
for item in self.iterable:
count += 1
return count
def __eq__(self, other):
"""
Equality operator
:return: True if items are equal
:rtype: bool
"""
return self.iterable == other.iterable if isinstance(other, self.__class__) else self.to_list() == other
def __ne__(self, other):
"""
Negative equality operator
:return: True if items are equal
:rtype: bool
"""
return not self.__eq__(other)
def any(self, predicate=None):
"""
Returns true if there any item which matches given predicate.
If no predicate given returns True if there is any item at all.
:param predicate: Function which takes item as argument and returns bool
:return: True, if there any item matching predicate
:rtype: bool
"""
for i in self.iterable:
if predicate is None:
return True
elif predicate(i):
return True
return False
def all(self, predicate):
"""
Returns true if all items match given predicate.
:param predicate: Function which takes item as argument and returns bool
:return: Boolean
:rtype: bool
"""
for i in self.iterable:
if not predicate(i):
return False
return True
def first(self, predicate=None):
"""
Returns first item which matches predicate or first item if no predicate given.
Raises exception, if no matching items found.
:param predicate: Function which takes item as argument and returns bool
:return: item
:rtype: object
"""
for i in self.iterable:
if predicate is None:
return i
elif predicate(i):
return i
raise AnLinq.AnLinqException('No matching items!')
def first_or_none(self, predicate=None):
"""
Returns first item which matches predicate or first item if no predicate given.
Returns None, if no matching items found.
:param predicate: Function which takes item as argument and returns bool
:return: item
:rtype: object
"""
try:
return self.first(predicate)
except AnLinq.AnLinqException:
return None
def last(self, predicate=None):
"""
Returns last item which matches predicate or last item if no predicate given.
Raises exception, if no matching items found.
:param predicate: Function which takes item as argument and returns bool
:return: item
:rtype: object
"""
last_item = None
last_item_set = False
for i in self.iterable:
if (predicate is not None and predicate(i)) or (predicate is None):
last_item = i
last_item_set = True
if not last_item_set:
raise AnLinq.AnLinqException('No matching items!')
return last_item
def last_or_none(self, predicate=None):
"""
Returns last item which matches predicate or last item if no predicate given.
Returns None, if no matching items found.
:param predicate: Function which takes item as argument and returns bool
:return: item
:rtype: object
"""
try:
return self.last(predicate)
except AnLinq.AnLinqException:
return None
def to_list(self):
"""
Converts LinqIterable to list
:return: list
:rtype: list
"""
return list(self.iterable)
def to_dictionary(self, key_selector=None, value_selector=None, unique=True):
"""
Converts LinqIterable to dictionary
:param key_selector: function which takes item and returns key for it
:param value_selector: function which takes item and returns value for it
:param unique: boolean, if True that will throw exception if keys are not unique
:return: dict
:rtype: dict
"""
result = {}
keys = set() if unique else None
for i in self.iterable:
key = key_selector(i) if key_selector is not None else i
value = value_selector(i) if value_selector is not None else i
if unique:
if key in keys:
raise AnLinq.AnLinqException("Key '" + repr(key) + "' is used more than once.")
keys.add(key)
result[key] = value
return result
def where(self, predicate):
"""
Returns items which matching predicate function
:param predicate: Function which takes item as argument and returns bool
:return: results wrapped with AnLinq
:rtype: AnLinq
"""
return AnLinq([i for i in self.iterable if predicate(i)])
def distinct(self, key_selector=None):
"""
Filters distinct values from enumerable
:param key_selector: function which takes item and returns key for it
:return: results wrapped with AnLinq
:rtype: AnLinq
"""
key_selector = key_selector if key_selector is not None else lambda item: item
keys = set()
return AnLinq([i for i in self.iterable if key_selector(i) not in keys and not keys.add(key_selector(i))])
def group_by(self, key_selector=None, value_selector=None):
"""
Groups given items by keys.
:param key_selector: function which takes item and returns key for it
:param value_selector: function which takes item and returns value for it
:return: Dictionary, where value if AnLinq for given key
:rtype: dict
"""
key_selector = key_selector if key_selector is not None else lambda item: item
value_selector = value_selector if value_selector is not None else lambda item: item
result = {}
for i in self.iterable:
key = key_selector(i)
if result.__contains__(key):
result[key].append(value_selector(i))
else:
result[key] = [value_selector(i)]
for key in result:
result[key] = AnLinq(result[key])
return result
def order_by(self, comparer=None, descending=False):
"""
Orders items.
:param value_selector: function which takes item and returns value for it
:param comparer: function which takes to items and compare them returning int
:param descending: shows how items will be sorted
"""
if sys.version.startswith('2'):
return AnLinq(sorted(self.iterable, comparer, None, descending))
else:
key = functools.cmp_to_key(comparer) if comparer else None
return AnLinq(sorted(self.iterable, key=key, reverse=descending))
def take(self, number):
"""
Takes only given number of items, of all available items if their count is less than number
:param number: number of items to get
:return: results wrapped with AnLinq
:rtype: AnLinq
"""
def internal_take(iterable, number_arg):
count = 0
for i in iterable:
count += 1
if count > number_arg:
break
yield i
return AnLinq(internal_take(self.iterable, number))
def skip(self, number):
"""
Skips given number of items in enumerable
:param number: number of items to get
:return: results wrapped with AnLinq
:rtype: AnLinq
"""
def internal_skip(iterable, number_arg):
count = 0
for i in iterable:
count += 1
if count <= number_arg:
continue
yield i
return AnLinq(internal_skip(self.iterable, number))
def select(self, selector):
"""
Converts items in list with given function
:param selector: Function which takes item and returns other item
:return: results wrapped with AnLinq
:rtype: AnLinq
"""
return AnLinq([selector(i) for i in self.iterable])
def map(self, selector):
"""
Converts items in list with given function
:param selector: Function which takes item and returns other item
:return: results wrapped with AnLinq
:rtype: AnLinq
"""
return self.select(selector)
def select_many(self, selector):
"""
Converts items in list with given function
:param selector: Function which takes item and returns iterable
:return: results wrapped with AnLinq
:rtype: AnLinq
"""
return AnLinq([i for i in [selector(sub) for sub in self.iterable]])
def aggregate(self, func, seed=None):
"""
Reduces list to a single variable
:param func: function which takes prev value, this value and index to aggregate one step
:param seed: initial value, will be used as prev on first iteration
:return: reduced value
"""
for index, i in enumerate(self.iterable):
seed = func(seed, i, index)
return seed
def reduce(self, func, seed=None):
"""
Reduces list to a single variable
:param func: function which takes prev value, this value and index to aggregate one step
:param seed: initial value, will be used as prev on first iteration
:return: reduced value
"""
return self.aggregate(func, seed)
def foreach(self, func):
"""
Allows to perform some action for each object in iterable, but not allows to redefine items
:param func: Function which takes item as argument
:return: self
:rtype: AnLinq
"""
for i in self.iterable:
if func(i) == True:
break
return self
def concat(self, iterable):
"""
Concats two iterables
:param iterable: Any iterable
:return: self
:rtype: AnLinq
"""
return AnLinq(itertools.chain(self.iterable, iterable))
def concat_item(self, item):
"""
Concats iterable with single item
:param item: Any item
:return: self
:rtype: AnLinq
"""
return AnLinq(itertools.chain(self.iterable, [item]))
def except_for(self, iterable):
"""
Filters items except given iterable
:param iterable: Any iterable
:return: self
:rtype: AnLinq
"""
return AnLinq([i for i in self.iterable if i not in iterable])
def intersect(self, iterable):
"""
Intersection between two iterables
:param iterable: Any iterable
:return: self
:rtype: AnLinq
"""
return AnLinq([i for i in self.iterable if i in iterable]) | {
"repo_name": "anpur/anlinq",
"path": "anlinq/__init__.py",
"copies": "1",
"size": "13908",
"license": "mit",
"hash": 7610082816602367000,
"line_mean": 32.1957040573,
"line_max": 114,
"alpha_frac": 0.5944779983,
"autogenerated": false,
"ratio": 4.602250165453342,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5696728163753343,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Anton'
import os
import jinja2
import webapp2
import simplejson as json
from utils import get_current_user
from model.model import User, Bookmark
from google.appengine.api import users
jinja_environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates')))
class MainPage(webapp2.RequestHandler):
def get(self, dir=None):
user = get_current_user()
if user is None:
self.redirect('/login')
else:
template = jinja_environment.get_template('main.html')
params = {"username":user.name, "logout": users.create_logout_url('/')}
params['header'] = { 'stream':True }
params['dir'] = dir
self.response.out.write(template.render(params))
class PublicPage(webapp2.RequestHandler):
def get(self, hurl):
template = jinja_environment.get_template('public.html')
bookmark = Bookmark.all().filter('hurl =', hurl).get()
params = {'title': bookmark.title, 'url': bookmark.url, 'description':bookmark.description }
self.response.out.write(template.render(params))
class LoginPage(webapp2.RequestHandler):
def chack_login(self):
params = {}
google_user = users.get_current_user()
if google_user:
params['google_user'] = True
else:
params['google_user'] = False
params['login_url'] = users.create_login_url("/login")
user = get_current_user(google_user)
if user:
params['user'] = True
else:
params['user'] = False
#return self.redirect('/')
template = jinja_environment.get_template('login.html')
self.response.out.write(template.render(params))
def get(self):
self.chack_login()
def post(self):
google_user = users.get_current_user()
name = self.request.get('name')
user = User(name=name, email=google_user.email(), user=google_user,key_name=google_user.email())
user.put()
settings = Settings(user=user, key_name=google_user.email())
settings.put()
self.chack_login()
| {
"repo_name": "sloot14/flexifod",
"path": "view/pages.py",
"copies": "1",
"size": "2193",
"license": "mit",
"hash": 2207551038548645000,
"line_mean": 35.1694915254,
"line_max": 104,
"alpha_frac": 0.6155950752,
"autogenerated": false,
"ratio": 3.8205574912891986,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9899641184962101,
"avg_score": 0.007302276305419466,
"num_lines": 59
} |
__author__ = 'Anton'
import re
import md5
import webapp2
from time import time
import simplejson as json
from utils import get_current_user
from model.model import User, Bookmark
from google.appengine.api import users
from datetime import datetime, timedelta
reg = re.compile(r'\b(((\S+)?)(@|mailto\:|(news|(ht|f)tp(s?))\://)\S+)\b')
inf = 1e10000
class Feed(webapp2.RequestHandler):
def get(self, last_feed=0):
last_feed = int(last_feed)
if last_feed == 0:
last_feed = inf
self.response.headers['Content-Type'] = 'application/json'
user = get_current_user()
if user:
q = Bookmark.all().filter('user =', user) \
.filter('changedate <', last_feed) \
.order('-changedate').run(limit=50)
#self.response.out.write(json.dumps({'error': True, 'error_type': 'Bad params', 'original': word}))
self.response.out.write(json.dumps([i.to_dict() for i in q]))
else:
self.response.out.write(json.dumps({'error': True, 'error_type': 'Not authorized'}))
class BookmarkHandler(webapp2.RequestHandler):
def delete(self, id):
self.response.headers['Content-Type'] = 'application/json'
user = get_current_user()
if user:
bookmark = Bookmark.get_by_id(id)
if bookmark:
if bookmark.user == user:
bookmark.delete()
self.response.out.write(json.dumps({'status':'success', 'item':bookmark.to_dict()}))
return
else:
self.response.out.write(json.dumps({'status':'error', 'error_type':'Not allowed'}))
else:
self.response.out.write(json.dumps({'status':'error', 'error_type':'Bad id'}))
else:
self.response.out.write(json.dumps({'status':'error', 'error_type':'Not authorized'}))
def put(self, id):
self.response.headers['Content-Type'] = 'application/json'
user = get_current_user()
if user:
url = self.request.get('url')
title = self.request.get('title')
description = self.request.get('description')
id = int(self.request.get('id'))
domain = reg.search(url)
if domain:
domain = domain.group()
bookmark = Bookmark.get_by_id(id)
if bookmark:
if bookmark.user.email == user.email:
bookmark.url=url
bookmark.title=title
bookmark.domain=domain
bookmark.description=unicode(description)
bookmark.changedate=int(time()*1000)
bookmark.put()
self.response.out.write(json.dumps({'status':'success', 'b':description, 'item':bookmark.to_dict()}))
else:
self.response.out.write(json.dumps({'status':'error', 'error_type':'Not allowed', "u1":bookmark.user.email, "u2":user.email}))
else:
self.response.out.write(json.dumps({'status':'error', 'error_type':'Bad id'}))
else:
self.response.out.write(json.dumps({'status':'error', 'error_type':'Bad link'}))
else:
self.response.out.write(json.dumps({'status':'error', 'error_type':'Not authorized'}))
class BookmarkCreate(webapp2.RequestHandler):
def post(self):
self.response.headers['Content-Type'] = 'application/json'
user = get_current_user()
if user:
url = self.request.get('url')
title = self.request.get('title')
description = self.request.get('description')
domain = reg.search(url)
if domain:
domain = domain.group()
bookmark = Bookmark(url=url, title=title, description=description, user=user, domain=domain, \
adddate=int(time()*1000), changedate=int(time()*1000),
hurl=md5.new(str(int(time()*1000))).hexdigest())
bookmark.put()
self.response.out.write(json.dumps({'status':'success', 'item':bookmark.to_dict()}))
return
else:
self.response.out.write(json.dumps({'status':'error', 'error_type':'Bad link'}))
else:
self.response.out.write(json.dumps({'status':'error', 'error_type':'Not authorized'}))
class GetUserInfo(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'application/json'
user = get_current_user()
if user:
self.response.out.write(json.dumps(user.to_dict()))
else:
self.response.out.write(json.dumps({'status':'error', 'error_type':'Not authorized'}))
| {
"repo_name": "sloot14/flexifod",
"path": "view/api.py",
"copies": "1",
"size": "4460",
"license": "mit",
"hash": 8186933402622245000,
"line_mean": 38.1801801802,
"line_max": 138,
"alpha_frac": 0.6022421525,
"autogenerated": false,
"ratio": 3.6647493837304848,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9649412830678881,
"avg_score": 0.023515741110320815,
"num_lines": 111
} |
__author__ = 'Antony Cherepanov'
from datetime import datetime
import random
import tweepy
import dbhandler
import generator
class RandBot(object):
def __init__(self):
self.db = dbhandler.DBHandler()
self.auth = tweepy.OAuthHandler(*(self.db.get_consumer_data()))
self.auth.set_access_token(*(self.db.get_access_token_data()))
self.api = tweepy.API(self.auth)
def run(self):
self.__process_last_mentions()
self.__process_search()
def __process_last_mentions(self):
print("Processing mentions")
mentions = list()
last_msg_id = self.db.get_last_msg_id()
if last_msg_id is None:
mentions = self.api.mentions_timeline(count=10)
else:
mentions = self.api.mentions_timeline(since_id=last_msg_id,
count=10)
mentions.reverse()
for tweet in mentions:
user_data = self.db.get_user_data(tweet.author.id_str)
if user_data is None:
self.__process_new_user(tweet)
else:
msg = "@{0} your random number is {1}".format(
user_data['name'], user_data['number'])
print("Replying to user: {0}".format(msg))
self.__send_tweet(
self.__create_tweet_struct(tweet.id_str, msg))
self.db.set_last_msg_id(tweet.id_str)
def __create_tweet_struct(self, reply_id, text):
return {'id': reply_id, 'tweet': text}
def __process_new_user(self, tweet, mention=True):
if tweet is None:
print("Invalid tweet - it is empty!")
return
gen = generator.Generator()
number = gen.generate(tweet)
if number is None:
return
user_name = tweet.author.screen_name
msg = str()
if mention is True:
msg = "@{0} hi! I'm a randbot and I have a random number " \
"for you: {1}".format(user_name, number)
else:
msg = "Random number for {0}: {1}".format(user_name, number)
print("Adding new user: {0}".format(msg))
self.__send_tweet(self.__create_tweet_struct(tweet.id_str, msg))
user_data = {'user_id': tweet.author.id_str, 'name': user_name,
'number': number}
self.db.add_user(user_data)
def __process_search(self):
print("Processing search")
keyword = 'random'
query = '{0} OR #{0}'.format(keyword)
results = self.api.search(q=query, result_type='mixed', count=100)
filtered_results = list()
authors = list()
for tweet in results:
if keyword in tweet.text and \
tweet.author.id_str not in authors and \
self.db.get_user_data(tweet.author.id_str) is None:
filtered_results.append(tweet)
authors.append(tweet.author.id_str)
if len(filtered_results) == 0:
print("There are no new users in search result")
return
index = random.randint(0, len(filtered_results) - 1)
self.__process_new_user(filtered_results[index], mention=False)
def __send_tweet(self, msg):
return self.api.update_status(status=msg['tweet'],
in_reply_to_status_id=msg['id'])
if __name__ == '__main__':
print("Start RandBot at " + str(datetime.today()))
bot = RandBot()
bot.run()
print("Done")
print()
| {
"repo_name": "iamantony/randbot",
"path": "src/randbot.py",
"copies": "1",
"size": "3545",
"license": "mit",
"hash": 5402749398009376000,
"line_mean": 32.1308411215,
"line_max": 74,
"alpha_frac": 0.5466854725,
"autogenerated": false,
"ratio": 3.767268862911796,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9812223632746513,
"avg_score": 0.0003461405330564209,
"num_lines": 107
} |
__author__ = 'Antony Cherepanov'
from exceptions import Exception
from math import fabs, pow, sqrt
class PointException(Exception):
pass
class Point(object):
"""
2D Point object
"""
def __init__(self, t_x=0, t_y=0):
self.__x = t_x
self.__y = t_y
def __str__(self):
return "Point(" + str(self.__x) + ", " + str(self.__y) + ")"
def __checkCoordType(self, t_coord):
if isinstance(t_coord, (int, float, long)):
return True
return False
def SetCoords(self, t_x, t_y):
if not self.__checkCoordType(t_x) or not self.__checkCoordType(t_y):
raise PointException("Invalid coordinate value type")
self.__x = t_x
self.__y = t_y
def GetX(self):
return self.__x
def GetY(self):
return self.__y
@staticmethod
def distance(t_firstPoint, t_secondPoint):
"""
Calculate euclidean distance between two points
@input:
- t_firstPoint - valid Point object
- t_secondPoint - valid Point object
@output:
- value - distance between points
Exception: PointException
"""
if not isinstance(t_firstPoint, Point) or\
not isinstance(t_secondPoint, Point):
raise PointException("Invalid point objects")
distX = fabs(t_firstPoint.GetX() - t_secondPoint.GetX())
istXSq = pow(distX, 2)
distY = fabs(t_firstPoint.GetY() - t_secondPoint.GetY())
distYSq = pow(distY, 2)
return sqrt(istXSq + distYSq)
| {
"repo_name": "iamantony/PythonNotes",
"path": "src/objects/point.py",
"copies": "1",
"size": "1597",
"license": "mit",
"hash": -518499129016724300,
"line_mean": 22.8358208955,
"line_max": 76,
"alpha_frac": 0.5597996243,
"autogenerated": false,
"ratio": 3.6797235023041477,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.965660438696899,
"avg_score": 0.01658374792703151,
"num_lines": 67
} |
__author__ = 'Antony Cherepanov'
import argparse
import os
import multiprocessing
from PIL import Image
def main():
folder, save_folder = parse_arguments()
is_ok = check_arguments(folder, save_folder)
if is_ok is True:
start(folder, save_folder)
else:
print("Invalid arguments. Try again!")
def parse_arguments():
""" Parse arguments and start transformation
:return [tuple] of arguments
"""
parser = argparse.ArgumentParser(
description="Multi-thread python app for transformation of color "
"images to grayscale images.")
parser.add_argument("folder",
help="absolute path to the folder with images to "
"transform")
parser.add_argument("-s", "--save_to",
help="path to the folder where greyscale images "
"should be saved",
default="")
args = parser.parse_args()
return args.folder, args.save_to
def check_arguments(t_folder, t_save_folder):
""" Check arguments
:param t_folder: [string] - absolute path to the folder with images to
transform
:param t_save_folder: [string] - absolute path to folder for greyscale
images
:return [bool] True if arguments are OK.
"""
if check_existing_folder(t_folder) is False:
print("Error: Invalid path to folder with images - " + t_folder)
return False
if 0 < len(t_save_folder):
if check_folder_path(t_save_folder) is True:
if not os.path.exists(t_save_folder):
os.makedirs(t_save_folder)
else:
print("Error: Invalid path to folder for greyscale images - " +
t_save_folder)
return False
return True
def check_existing_folder(t_path):
""" Check if folder really exist
:param t_path: [string] - absolute path to presumably existing folder
:return: [bool] True if folder exist, False if it's not
"""
if not os.path.isabs(t_path) or\
not os.path.exists(t_path) or\
not os.path.isdir(t_path):
return False
return True
def check_folder_path(t_path):
""" Check if path to folder is valid
:param t_path: [string] - absolute path to some folder
:return: [bool] True if path could be path for folder.
"""
if os.path.isabs(t_path) is True:
return True
return False
def start(t_folder, t_save_folder):
""" Start transformation process
:param t_folder: [string] - absolute path to the folder with images to
transform
:param t_save_folder: [string] - absolute path to folder for greyscale
images
"""
images = get_images_paths(t_folder)
cores_num = multiprocessing.cpu_count()
img_chunks = list_split(images, cores_num)
jobs = list()
for i in range(cores_num):
thread = multiprocessing.Process(target=greyscale,
args=(next(img_chunks), t_save_folder))
jobs.append(thread)
thread.start()
for thread in jobs:
thread.join()
def get_images_paths(t_folder):
""" Check if folder contains images (on the first level) and return
their paths
:param t_folder: [string] - absolute path to the folder
:return: [list] with the absolute paths of the images in folder
"""
if not os.path.isdir(t_folder):
return list()
image_extensions = ("jpg", "jpeg", "bmp", "png", "gif", "tiff")
images = list()
entries = os.listdir(t_folder)
for entry in entries:
file_path = os.path.join(t_folder, entry)
extension = get_extension(file_path)
if os.path.isfile(file_path) and extension in image_extensions:
images.append(file_path)
return images
def get_extension(t_path):
""" Get extension of the file
:param t_path: [string] - path or name of the file
:return: [string] with extension of the file or empty string if we failed
to get it
"""
path_parts = str.split(t_path, '.')
extension = path_parts[-1:][0]
extension = extension.lower()
return extension
def list_split(t_list, t_size):
""" Generator that split list of elements into n chunks
:param t_list: [list] - list of elements
:param t_size: [int] - size of chunk
:return generator of lists of chunks
"""
new_length = int(len(t_list) / t_size)
for i in range(0, t_size - 1):
start = i * new_length
yield t_list[start: start + new_length]
yield t_list[t_size * new_length - new_length:]
def greyscale(t_images, t_save_folder):
""" Transform color images to greyscale images
:param t_images: [list] - list of paths to the images
:param t_save_folder: [string] - absolute path to folder for greyscale
images
:return [list] of paths to created greyscale images
"""
grey_images = list()
for img_path in t_images:
print("Transforming " + img_path)
img = Image.open(img_path)
grey_img = img.convert("L")
path, name, extension = parse_image_path(img_path)
if 0 < len(t_save_folder):
path = t_save_folder
filename = "{path}{sep}{name}.{ext}".format(path=path, name=name,
sep=str(os.sep), ext=extension)
grey_img.save(filename)
grey_images.append(filename)
img.close()
return grey_images
def parse_image_path(t_img_path):
""" Parse path to image and return it's parts: path, image name, extension
:param t_img_path: [string] - path to image
:return: [tuple] of strings that hold path to image file, image name and
image extension
"""
img_path_parts = str.split(t_img_path, os.sep)
path_parts, image_name = img_path_parts[:-1], img_path_parts[-1]
path = os.sep.join(path_parts)
img_name_parts = str.split(image_name, '.')
image_name_parts, extension = img_name_parts[:-1], img_name_parts[-1]
name = ".".join(image_name_parts)
return path, name, extension
if __name__ == '__main__':
main() | {
"repo_name": "iamantony/images2grey",
"path": "images2grey.py",
"copies": "1",
"size": "6153",
"license": "mit",
"hash": -8434002787829895000,
"line_mean": 28.5865384615,
"line_max": 80,
"alpha_frac": 0.6070209654,
"autogenerated": false,
"ratio": 3.788793103448276,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48958140688482754,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Antony Cherepanov'
import argparse
import os
import multiprocessing
from PIL import Image
def parse_arguments():
""" Parse arguments and start slice process
:return tuple of arguments
"""
parser = argparse.ArgumentParser(
description="Multi-thread python app for slicing images")
parser.add_argument("folder",
help="absolute path to the folder with images to slice")
parser.add_argument("width", help="width of slice", type=int)
parser.add_argument("height", help="height of slice", type=int)
parser.add_argument("-add", action="store_true",
help="add extra space to the last slice")
parser.add_argument("-s", "--save_to",
help="path to the folder where slices should be saved",
default="")
args = parser.parse_args()
return args.folder, args.width, args.height, args.add, args.save_to
def check_arguments(t_folder, t_width, t_height, t_save_folder):
""" Check arguments
:param t_folder: string - absolute path to the folder with images to slice
:param t_width: int width of the slice
:param t_height: int height of the slice
:param t_save_folder: string absolute path to folder for slices
:return boolean value. True if arguments are OK.
"""
if check_existing_folder(t_folder) is False:
print("Error: Invalid path to folder with images - " + t_folder)
return False
if t_width <= 0 or t_height <= 0:
print("Error: Invalid slice size")
return False
if 0 < len(t_save_folder):
if check_folder_path(t_save_folder) is True:
if not os.path.exists(t_save_folder):
os.makedirs(t_save_folder)
else:
print("Error: Invalid path to folder for slices - " + t_save_folder)
return False
return True
def check_existing_folder(t_path):
""" Check if folder really exist
:param t_path: string with absolute path to presumably existing folder
:return: boolean value. True if folder exist, False if it's not
"""
if not os.path.isabs(t_path) or\
not os.path.exists(t_path) or\
not os.path.isdir(t_path):
return False
return True
def check_folder_path(t_path):
""" Check if path to folder is valid
:param t_path: string with absolute path to some folder
:return: boolean value. True if path could be path for folder.
"""
if os.path.isabs(t_path) is True:
return True
return False
def start_slicing(t_folder, t_width, t_height, t_add_small_slice,
t_save_folder):
""" Slice images
:param t_folder: string with absolute path to the folder with images
to slice
:param t_width: int width of the slice
:param t_height: int height of the slice
:param t_add_small_slice: boolean value If True and last slice is too
small, add it to the previous
:param t_save_folder: string absolute path to folder for slices
"""
images = get_images_paths(t_folder)
cores_num = multiprocessing.cpu_count()
img_chunks = list_split(images, cores_num)
jobs = list()
for i in range(cores_num):
thread = multiprocessing.Process(target=slice_images,
args=(next(img_chunks),
t_width,
t_height,
t_add_small_slice,
t_save_folder))
jobs.append(thread)
thread.start()
for thread in jobs:
thread.join()
def get_images_paths(t_folder):
""" Check if folder contains images (on the first level) and return
their paths
:param t_folder: string with the absolute path to the folder
:return: list with the absolute paths of the images in folder
"""
if not os.path.isdir(t_folder):
return list()
image_extensions = ("jpg", "jpeg", "bmp", "png", "gif", "tiff")
images = list()
entries = os.listdir(t_folder)
for entry in entries:
file_path = os.path.join(t_folder, entry)
extension = get_extension(file_path)
if os.path.isfile(file_path) and extension in image_extensions:
images.append(file_path)
return images
def get_extension(t_path):
""" Get extension of the file
:param t_path: path or name of the file
:return: string with extension of the file or empty string if we failed
to get it
"""
path_parts = str.split(t_path, '.')
extension = path_parts[-1:][0]
extension = extension.lower()
return extension
def list_split(t_list, t_size):
""" Generator that split list of elements into n chunks
:param t_list - list of elements
:param t_size - size of chunk
:return generator of lists of chunks
"""
new_length = int(len(t_list) / t_size)
for i in range(0, t_size - 1):
start = i * new_length
yield t_list[start: start + new_length]
yield t_list[t_size * new_length - new_length:]
def slice_images(t_images, t_width, t_height, t_add_small_slice, t_save_folder):
""" Slicing images
:param t_images: list of path to the images
:param t_width: int width of the slice
:param t_height: int height of the slice
:param t_add_small_slice: boolean value If True and last slice is too
small, add it to the previous
:param t_save_folder: string absolute path to folder for slices
:return list: created slices paths
"""
slices_paths = list()
for img_path in t_images:
print("Slicing " + img_path)
img = Image.open(img_path)
img_width, img_height = img.size
# Size of of image should bigger than a size of slice
min_number_of_slices = 1
# But if we can add extra space to the last slice, then size of image
# should be bigger than two slices.
if t_add_small_slice is True:
min_number_of_slices = 2
if (img_width // t_width < min_number_of_slices) or\
(img_height // t_height < min_number_of_slices):
print("Skip image " + img_path + " because it's too small")
img.close()
continue
path, name, extension = parse_image_path(img_path)
if 0 < len(t_save_folder):
path = t_save_folder
column = 0
row = 0
for hgt in range(0, img_height, t_height):
hgt_end = hgt + t_height
if img_height < hgt_end:
break
if t_add_small_slice is True and\
img_height < hgt_end + t_height:
hgt_end = img_height
for wdt in range(0, img_width, t_width):
wdt_end = wdt + t_width
if img_width < wdt_end:
break
if t_add_small_slice is True and\
img_width < wdt_end + t_width:
wdt_end = img_width
area = (wdt, hgt, wdt_end, hgt_end)
img_slice = img.crop(area)
filename = "{path}{sep}{name}_{row:02d}_{col:02d}.{ext}".format(
path=path, sep=str(os.sep), name=name, col=column, row=row,
ext=extension)
img_slice.save(filename)
slices_paths.append(filename)
column += 1
column = 0
row += 1
img.close()
return slices_paths
def parse_image_path(t_img_path):
""" Parse path to image and return it's parts: path, image name, extension
:param t_img_path: string with path to image
:return: tuple of strings that hold path to image file, image name and
image extension
"""
img_path_parts = str.split(t_img_path, os.sep)
path_parts, image_name = img_path_parts[:-1], img_path_parts[-1]
path = os.sep.join(path_parts)
img_name_parts = str.split(image_name, '.')
image_name_parts, extension = img_name_parts[:-1], img_name_parts[-1]
name = ".".join(image_name_parts)
return path, name, extension
if __name__ == '__main__':
arguments = parse_arguments()
is_ok = check_arguments(arguments[0],
arguments[1],
arguments[2],
arguments[4])
if is_ok is True:
start_slicing(arguments[0],
arguments[1],
arguments[2],
arguments[3],
arguments[4])
else:
print("Invalid arguments. Try again!") | {
"repo_name": "iamantony/images_slicer",
"path": "images_slicer.py",
"copies": "1",
"size": "8689",
"license": "mit",
"hash": 2203926127672149500,
"line_mean": 31.5468164794,
"line_max": 80,
"alpha_frac": 0.5759005639,
"autogenerated": false,
"ratio": 3.9122017109410177,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9985400166635184,
"avg_score": 0.0005404216411666007,
"num_lines": 267
} |
__author__ = 'Antony Cherepanov'
import sqlite3
import os
class DBHandler(object):
def __init__(self):
self.db_name = 'bot.db'
self.db_path = os.path.dirname(os.path.abspath(__file__)) + \
os.sep + self.db_name
self.__check_db()
self.connection = sqlite3.connect(self.db_path)
self.__check_consumer_data()
self.__check_token_data()
def __check_db(self):
if os.path.exists(self.db_path):
return
print("Database do not exist")
self.__init_db()
def __init_db(self):
print("Database initialisation")
connection = sqlite3.connect(self.db_path)
cursor = connection.cursor()
cursor.execute('''CREATE TABLE consumer (
id INTEGER PRIMARY KEY NOT NULL,
key TEXT NOT NULL UNIQUE,
secret TEXT NOT NULL UNIQUE)''')
cursor.execute('''CREATE TABLE token (
id INTEGER PRIMARY KEY NOT NULL,
key TEXT NOT NULL UNIQUE,
secret TEXT NOT NULL UNIQUE)''')
cursor.execute('''CREATE TABLE users (
id INTEGER PRIMARY KEY NOT NULL,
user_id TEXT NOT NULL UNIQUE,
name TEXT NOT NULL UNIQUE,
number TEXT NOT NULL UNIQUE)''')
cursor.execute('''CREATE TABLE last_msg (
id INTEGER PRIMARY KEY NOT NULL,
msg_id TEXT NOT NULL UNIQUE)''')
connection.commit()
connection.close()
def __check_consumer_data(self):
cursor = self.connection.cursor()
cursor.execute('SELECT * FROM consumer LIMIT 1')
if cursor.fetchone() is None:
key, secret = self.__request_consumer_data()
cursor.execute('INSERT INTO consumer VALUES (null, ?, ?)',
(key, secret))
self.connection.commit()
def __request_consumer_data(self):
print('Please enter consumer data of twitter application.')
key = input('Consumer key: ')
secret = input('Consumer secret: ')
return key, secret
def get_consumer_data(self):
cursor = self.connection.cursor()
cursor.execute('SELECT key, secret FROM consumer LIMIT 1')
return cursor.fetchone()
def __check_token_data(self):
cursor = self.connection.cursor()
cursor.execute('SELECT * FROM token LIMIT 1')
if cursor.fetchone() is None:
key, secret = self.__request_token_data()
cursor.execute('INSERT INTO token VALUES (null, ?, ?)',
(key, secret))
self.connection.commit()
def __request_token_data(self):
print('Please enter access token data of twitter application.')
key = input('Access Token: ')
secret = input('Access Token secret: ')
return key, secret
def get_access_token_data(self):
cursor = self.connection.cursor()
cursor.execute('SELECT key, secret FROM token LIMIT 1')
return cursor.fetchone()
def get_last_msg_id(self):
cursor = self.connection.cursor()
cursor.execute('SELECT msg_id FROM last_msg LIMIT 1')
data = cursor.fetchone()
if data is None:
return None
return data[0]
def set_last_msg_id(self, new_id):
if new_id is None or len(new_id) == 0:
print("Invalid ID of last message:", new_id)
return
cursor = self.connection.cursor()
msg_id = self.get_last_msg_id()
if msg_id is None:
cursor.execute('INSERT INTO last_msg VALUES (null, ?)', (new_id,))
else:
cursor.execute('UPDATE last_msg SET msg_id=? WHERE msg_id=?',
(new_id, msg_id))
self.connection.commit()
def get_user_data(self, user_id):
if user_id is None or len(user_id) == 0:
print("Invalid user ID:", user_id)
return None
cursor = self.connection.cursor()
cursor.execute('SELECT name, number FROM users WHERE user_id=?',
(user_id,))
data = cursor.fetchone()
if data is None:
return None
user_data = {'user_id': user_id, 'name': data[0], 'number': data[1]}
return user_data
def add_user(self, user_data):
if self.get_user_data(user_data['user_id']) is not None:
print("User with ID {0} already exist in database".format(
user_data['user_id']))
return
cursor = self.connection.cursor()
values = (user_data['user_id'], user_data['name'], user_data['number'])
cursor.execute("INSERT INTO users VALUES (null, ?, ?, ?)", values)
self.connection.commit()
| {
"repo_name": "iamantony/randbot",
"path": "src/dbhandler.py",
"copies": "1",
"size": "4738",
"license": "mit",
"hash": 8273032128214237000,
"line_mean": 32.8428571429,
"line_max": 79,
"alpha_frac": 0.5635289152,
"autogenerated": false,
"ratio": 4.152497808939526,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5216026724139526,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Antony Cherepanov'
from matrix import Matrix, MatrixException
def Start():
first = Matrix(4, 4, [i for i in range(1, 17)])
second = Matrix(4, 4, [i for i in range(17, 33)])
print "First = ", first
print "Second = ", second
print "Multiplication: ", first * second
print "Strassen multiplication = ", StrassenMultiplication(first, second)
def StrassenMultiplication(t_first, t_second):
# Work only with square Matrices of same even dimensions
if not isinstance(t_first, Matrix) or not isinstance(t_second, Matrix) or \
not t_first.IsSquare() or not t_second.IsSquare() or \
t_first.rows() != t_second.rows():
return None
isPowerOf2 = t_first.rows() & (t_first.rows() - 1)
if 0 != isPowerOf2:
return None
# Check if we work with smallest square matrices 4x4
if 4 == t_first.rows():
mA = t_first.GetSlice([0, 0], [1, 1])
mB = t_first.GetSlice([0, 2], [1, 3])
mC = t_first.GetSlice([2, 0], [3, 1])
mD = t_first.GetSlice([2, 2], [3, 3])
mE = t_second.GetSlice([0, 0], [1, 1])
mF = t_second.GetSlice([0, 2], [1, 3])
mG = t_second.GetSlice([2, 0], [3, 1])
mH = t_second.GetSlice([2, 2], [3, 3])
p1 = mA * (mF - mH)
p2 = (mA + mB) * mH
p3 = (mC + mD) * mE
p4 = mD * (mG - mE)
p5 = (mA + mD) * (mE + mH)
p6 = (mB - mD) * (mG + mH)
p7 = (mA - mC) * (mE + mF)
resA = p5 + p4 - p2 + p6
resB = p1 + p2
resC = p3 + p4
resD = p1 + p5 -p3 - p7
result = Matrix(t_first.rows(), t_first.cols())
result.SetSlice([0, 0], [1, 1], resA)
result.SetSlice([0, 2], [1, 3], resB)
result.SetSlice([2, 0], [3, 1], resC)
result.SetSlice([2, 2], [3, 3], resD)
return result
Start()
| {
"repo_name": "iamantony/PythonNotes",
"path": "src/algorithms/matrix/strassen_multiplication.py",
"copies": "1",
"size": "1926",
"license": "mit",
"hash": 526298607596635500,
"line_mean": 30.1,
"line_max": 79,
"alpha_frac": 0.5036344756,
"autogenerated": false,
"ratio": 2.857566765578635,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3861201241178635,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Antony Cherepanov'
from Objects import Point, PointException
class SortBy(object):
X_COORD = 0
Y_COORD = 1
def Start():
input = [Point(1, 1), Point(5, 7), Point(3, 0), Point(6, 2), Point(10, 10),
Point(2, 1), Point(0, 10), Point(6, 5), Point(7, 2)]
result = ClosestPoints(input)
print "result = ", result
def ClosestPoints(t_input):
"""
Get two closest points. Algorithm based on Merge Sort
@input:
- list of point objects
@output:
- list of two points
"""
pointsSortedX = MergeSortForPoints(t_input, SortBy.X_COORD)
for i in pointsSortedX:
print i
pointsSortedY = MergeSortForPoints(t_input, SortBy.Y_COORD)
for i in pointsSortedY:
print i
def MergeSortForPoints(t_points, t_sortByCoord):
length = len(t_points)
if 2 < length:
halfLength = length / 2
leftHalf = t_points[:halfLength]
rightHalf = t_points[halfLength:]
sortedLeft = MergeSortForPoints(leftHalf, t_sortByCoord)
sortedRight = MergeSortForPoints(rightHalf, t_sortByCoord)
i = 0
j = 0
result = list()
for k in range(length):
if len(sortedLeft) <= i:
result.extend(sortedRight[j:])
break
if len(sortedRight) <= j:
result.extend(sortedLeft[i:])
break
# Choose by which coordinate we should sort
if t_sortByCoord == SortBy.X_COORD:
leftPointCoord = sortedLeft[i].GetX()
rightPointCoord = sortedRight[j].GetX()
else:
leftPointCoord = sortedLeft[i].GetY()
rightPointCoord = sortedRight[j].GetY()
# Merge sorted halves
if leftPointCoord < rightPointCoord:
result.append(sortedLeft[i])
i += 1
else:
result.append(sortedRight[j])
j += 1
return result
else:
if 1 == length:
return t_points
elif 2 == length:
# Choose by which coordinate we should sort
if t_sortByCoord == SortBy.X_COORD:
first = t_points[0].GetX()
second = t_points[1].GetX()
else:
first = t_points[0].GetY()
second = t_points[1].GetY()
if first < second:
return t_points
else:
return [t_points[1], t_points[0]]
Start() | {
"repo_name": "iamantony/PythonNotes",
"path": "src/algorithms/search/closestpoints.py",
"copies": "1",
"size": "2620",
"license": "mit",
"hash": -6640703467111342000,
"line_mean": 26.5,
"line_max": 79,
"alpha_frac": 0.5145038168,
"autogenerated": false,
"ratio": 4.0184049079754605,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0017281105990783407,
"num_lines": 92
} |
__author__ = 'Antony Cherepanov'
from random import randint
import sys
sys.setrecursionlimit(10000)
class PivotPosition(object):
""" List of names of rules to choose pivot position """
FIRST, LAST, MEDIAN, RANDOM = range(0, 4)
def quick_sort(t_input, t_pivot_position=PivotPosition.RANDOM):
""" Quick sort algorithm.
No extra memory needed.
@input:
- t_input - list of numbers
- t_pivotPosition - desired rule of choosing pivot's position
@output:
- list(1,2, ...) - sorted list of elements (same input object)
"""
array = t_input[:]
sort(array, 0, len(t_input) - 1, t_pivot_position)
return array
def sort(t_input, t_left, t_right, t_pivot_position):
""" Real Quick Sort procedure
@input:
- t_input - input array of numbers
- t_left - index of left boundary of inspecting array (included)
- t_right - index of right boundary of inspecting array (included)
- t_pivotPosition - desired rule of choosing pivot's position
"""
i = t_left
j = t_right
pivot_index = choose_pivot(t_left, t_right, t_pivot_position)
pivot_value = t_input[pivot_index]
while True:
while t_input[i] < pivot_value:
i += 1
while pivot_value < t_input[j]:
j -= 1
if i <= j:
if t_input[i] > t_input[j]:
t_input[i], t_input[j] = t_input[j], t_input[i]
i += 1
j -= 1
if i > j:
break
if i < t_right:
sort(t_input, i, t_right, t_pivot_position)
if t_left < j:
sort(t_input, t_left, j, t_pivot_position)
def choose_pivot(t_left, t_right, t_pivot_position):
""" Choose index of pivot element
@input:
- t_left - index of left boundary of inspecting array (included)
- t_right - index of right boundary of inspecting array (included)
- t_pivotPosition - desired rule of choosing pivot's position
@output:
- int - chosen index of pivot element
"""
if t_right <= t_left:
return t_left
if t_pivot_position == PivotPosition.FIRST:
return t_left
elif t_pivot_position == PivotPosition.LAST:
return t_right
elif t_pivot_position == PivotPosition.MEDIAN:
return (t_right + t_left) // 2
elif t_pivot_position == PivotPosition.RANDOM:
return randint(t_left, t_right)
else:
print("ChoosePivot(): Warning - invalid rule! Choose first element")
return t_left | {
"repo_name": "iamantony/PythonNotes",
"path": "src/algorithms/sort/quicksort.py",
"copies": "1",
"size": "2567",
"license": "mit",
"hash": 6123720753447051000,
"line_mean": 27.5287356322,
"line_max": 76,
"alpha_frac": 0.5862874951,
"autogenerated": false,
"ratio": 3.5702364394993045,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9653787207724647,
"avg_score": 0.0005473453749315818,
"num_lines": 87
} |
__author__ = 'Antony Cherepanov'
glob_str = "i'm global"
print("Names at the top level of a file are global to code within that " +
"single file only")
print("If we want use them in other modules, than we should import them")
print("This is global variable: " + glob_str)
glob_list = [1, 2, 3, 4]
def module_func():
print("\nNow we inside local function. All variables, that we create " +
"inside this functions are local. We can't access them anywhere else")
local_str = "this is local variable"
print("Local variable: " + local_str)
print("We can use here global variable: " + glob_str)
print("And we can change them: before - " + str(glob_list))
glob_list.append(10)
print("After: " + str(glob_list))
print("But if we will assign something to global variable, than " +
"local variable will be created and global variable will now change")
module_func()
def change_global():
global glob_str
print("\nchange_global()")
print("We have global variable: ", glob_str)
print("To change it's value from local scope of function we should use" +
"global statement.")
glob_str = "Change globally"
print("Changed global variable (inside function): " + str(glob_str))
change_global()
print("Global variable (outside): " + str(glob_str))
def first_way_to_change_global():
print("\nfirst_way_to_change_global()")
print("First way to change global variable of another module:")
print("Import module, declare global variable, change it")
from functions.another_module import another_module_glob
global another_module_glob
print("Before: " + str(another_module_glob))
another_module_glob += 1
print("After: " + str(another_module_glob))
def second_way_to_change_global():
print("\nsecond_way_to_change_global()")
print("We can find module via sys.module method and get access to " +
"its' global var.")
import sys
an_mod = sys.modules['functions.another_module']
print("Before: " + str(an_mod.another_module_glob))
an_mod.another_module_glob += 1
print("After: " + str(an_mod.another_module_glob))
def nested_functions():
print("\nnested_functions()")
print("Within nested function reference to the variable looks first " +
"in the current local scope.")
print("Than in the local scope of any enclosing functions from " +
"inner to outer.")
print("Then in the current global scope and finally in built-ins.")
var = 99
print("In local scope we declared variable: " + str(var))
def func():
print("From nested function: " + str(var))
func()
def closures():
print("\nclosures()")
print("Closure - function object that remember values in enclosing " +
"scopes regardless of whether those scopes are still present " +
"in memory.")
def maker(n):
def action(x):
return x ** n
return action
mk_object = maker(2)
print("First closure return values powered to 2: " + str(mk_object(4)))
another_maker = maker(5)
print("Second closure return values powered to 5: " + str(another_maker(5)))
def lambda_maker():
x = 4
# Pass x manually
action = (lambda n, x=x: x ** n)
return action
print("We can create closures with lambdas.")
lambda_mk = lambda_maker()
print("Acts the same: " + str(lambda_mk(3)))
def nonlocal_examples():
print("\nnonlocal_examples")
print("We know that in nested function we can reference variables that " +
"was declared in enclosing function.")
print("We can use them, but can not change (static variables).")
print("So if we want more control on variables, we " +
"should use 'nonlocal' keyword")
static_var = 64
mutable_list = [1, 2]
print("We declare two variables: static and mutable:")
print(static_var)
print(mutable_list)
def nested_func():
nonlocal static_var
print("Inside nested function:")
print("Static variable = " + str(static_var))
print("We can use it here only because this: 'nonlocal static_var'")
print("Mutable variable = " + str(mutable_list))
print("Let's try to change static variable:")
static_var += 10
print(static_var)
print("Let's try to change mutable variable:")
mutable_list[0] = "changed"
print(mutable_list)
nested_func()
print("Remember:")
print("- nonlocal variables must have been previously assigned " +
"in enclosing function")
print("- nonlocal restricts scope lookup to enclosing defs.")
print("So you can't use nonlocal keyword with global variable")
def closures_with_nonlocal():
print("\nclosures_with_nonlocal")
print("With nonlocal it's easy to create closures that will have")
print("it's own individual state that can't be changed outside")
def maker(n):
degree = n
def action():
nonlocal degree
print("Closure with degree " + str(degree))
degree += 1
return action
print("We have two closures, that have different initial state " +
"(value of degree). Each call they increment it by one.")
first = maker(2)
second = maker(10)
first()
second()
first()
second()
def function_attributes():
print("\nfunction_attributes")
print("Another way to create closures with state retention - " +
"function attributes")
def maker(n):
degree = n
def action():
print("Closure with degree " + str(action.degree))
action.degree += 1
action.degree = degree
return action
print("We have two closures, that have different initial state " +
"(value of degree). Each call they increment it by one.")
first = maker(63)
second = maker(8)
first()
second()
first()
second()
print("This method of state retention for closures is portable: " +
"we can us it with 2.X and 3.X")
print("Also we have access to closures attributes outside: " +
str(second.degree))
first_way_to_change_global()
second_way_to_change_global()
nested_functions()
closures()
nonlocal_examples()
closures_with_nonlocal()
function_attributes()
print("\nUpshot\n")
print("global makes scope lookup begin in the enclosing module’s scope and")
print("allows names there to be assigned. Scope lookup continues on to the")
print("built-in scope if the name does not exist in the module, but ")
print("assignments to global names always create or change them in the")
print("module’s scope\n")
print("nonlocal restricts scope lookup to just enclosing defs, requires that")
print("the names already exist there, and allows them to be assigned.")
print("Scope lookup does not continue on to the global or built-in scopes\n") | {
"repo_name": "iamantony/PythonNotes",
"path": "src/functions/scopes.py",
"copies": "1",
"size": "7183",
"license": "mit",
"hash": -660773789471181000,
"line_mean": 28.9525862069,
"line_max": 80,
"alpha_frac": 0.6232065747,
"autogenerated": false,
"ratio": 4.03314606741573,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.515635264211573,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Antony Cherepanov'
import os
import sys
def different_ways_to_print():
print("\ndifferent_ways_to_print()")
a, b, c = 42, "i'm a string", [99, 0.2]
print("The standard way:")
print(a, b, c)
print("With special separator:")
print(a, b, c, sep=" -- ")
print("With special line ending:")
print(a, b, c, end=" -it's-the-end-of-the-line-")
print("Print to file:")
print(a, b, c, file=open("test.txt", "w"))
print("From file: " + open("test.txt").read())
os.remove("test.txt")
def print_object_info():
print("\nprint_object_info()")
print("Sometimes we want to know with what object we are working.")
print("For that purpose we can use function id().")
print("It return address of the object. Example: ")
print(id("string"))
print("To convert address to hex number use hex()")
simple_list = [1, 2]
print(str(simple_list) + " = " + hex(id(simple_list)))
def stdout_redirection():
print("\nstdout_redirection()")
print("After this line all print output will be redirected to file")
temp = sys.stdout
sys.stdout = open("log.txt", "a")
print("This string will be saved to file!")
sys.stdout.close()
sys.stdout = temp
print("Back! Let's see what in a file:")
print(open("log.txt").read())
os.remove("log.txt")
different_ways_to_print()
print_object_info()
stdout_redirection() | {
"repo_name": "iamantony/PythonNotes",
"path": "src/tools/printing.py",
"copies": "1",
"size": "1471",
"license": "mit",
"hash": 8462925440992717000,
"line_mean": 24.3035714286,
"line_max": 72,
"alpha_frac": 0.5914343984,
"autogenerated": false,
"ratio": 3.389400921658986,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44808353200589857,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Antony Cherepanov'
import pickle
import json
import os
def save_dict_to_file_via_pickle():
print("\nsave_dict_to_file_via_pickle()")
simple_dict = {"key1": 224, "kkl": "strong"}
print("Our dict: " + str(simple_dict))
print("Let's serialise it and save to file")
test_file = open("datafile.pkl", "wb")
pickle.dump(simple_dict, test_file)
test_file.close()
print("Let's see what inside: " + str(open("datafile.pkl", "rb").read()))
print("And now recreate it from file!")
reopened_test_file = open("datafile.pkl", "rb")
recreated_dict = pickle.load(reopened_test_file)
reopened_test_file.close()
print("Recreated dict: " + str(recreated_dict))
print("Are they the same: " + str(simple_dict == recreated_dict))
os.remove("datafile.pkl")
def save_dict_as_json():
print("\nsave_dict_as_json()")
simple_dict = {"key1": 224, "kkl": "strong"}
print("Our dict: " + str(simple_dict))
print("Let's serialise it and save to json file")
json.dump(simple_dict, fp=open("testjson.txt", "w"))
print("Let's see what inside: " + open("testjson.txt").read())
recreated_dict = json.load(open("testjson.txt"))
print("Recreated dict: " + str(recreated_dict))
os.remove("testjson.txt")
save_dict_to_file_via_pickle()
save_dict_as_json() | {
"repo_name": "iamantony/PythonNotes",
"path": "src/tools/serialisation.py",
"copies": "1",
"size": "1387",
"license": "mit",
"hash": -5379272974747288000,
"line_mean": 26.3469387755,
"line_max": 77,
"alpha_frac": 0.613554434,
"autogenerated": false,
"ratio": 3.1666666666666665,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42802211006666663,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Antony Cherepanov'
def get_list_of_items():
print("\nget_list_of_items():")
some_dict = dict(test=1, some_list=[1, 2, 3], useful="This is a dict!")
print("Dict: " + str(some_dict))
print("Dict keys: " + str(some_dict.keys()))
print("Dict values: " + str(some_dict.values()))
print("Dict items: " + str(some_dict.items()))
def mutability_example():
print("\nmutability_example():")
some_list = [1, 4, 9, 99]
some_dict = dict([("test", 1), ("list", some_list), ("useful","This is a dict!" )])
print("External list: " + str(some_list))
print("Dict with list as value: " + str(some_dict))
some_list.append(9567)
print("List changed outside: " + str(some_list))
print("Dict also changed: " + str(some_dict))
def sort_dict():
print("\nsort_dict():")
some_dict = {'a': 1, 'c': 56, 'z': 2}
print("Dict: " + str(some_dict))
for key in sorted(some_dict):
print(str(key) + ": " + str(some_dict[key]))
get_list_of_items()
mutability_example()
sort_dict() | {
"repo_name": "iamantony/PythonNotes",
"path": "src/types/dict.py",
"copies": "1",
"size": "1079",
"license": "mit",
"hash": 5577652131674973000,
"line_mean": 28.8857142857,
"line_max": 87,
"alpha_frac": 0.5634847081,
"autogenerated": false,
"ratio": 3.0055710306406684,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9051253316443864,
"avg_score": 0.003560484459360864,
"num_lines": 35
} |
__author__ = 'Antony Cherepanov'
def gnome_sort(t_input):
""" Gnome Sort Algorithm
Simple and slow algorithm
http://en.wikipedia.org/wiki/Gnome_sort
Best case performance: O(n^2)
Worst case performance: O(n)
Worst Case Auxiliary Space Complexity: O(1)
:param t_input: [list] of numbers
:return: [list] - sorted list of numbers
"""
array = t_input[:]
current_pos = 0
saved_pos = 0
while current_pos < len(t_input) - 1:
if array[current_pos] <= array[current_pos + 1]:
# check if we can jump further to the previously saved position
if saved_pos != 0:
current_pos = saved_pos
saved_pos = 0
current_pos += 1
else:
# swap elements
array[current_pos], array[current_pos + 1] = \
array[current_pos + 1], array[current_pos]
# check position
if 0 < current_pos:
# if we are not at the start, then save this position
# and step back
if saved_pos is 0:
saved_pos = current_pos
current_pos -= 1
else:
# if we are at the beginning - go to the next position
current_pos += 1
return array
| {
"repo_name": "iamantony/PythonNotes",
"path": "src/algorithms/sort/gnomesort.py",
"copies": "1",
"size": "1355",
"license": "mit",
"hash": -5063976868646683000,
"line_mean": 30.2619047619,
"line_max": 75,
"alpha_frac": 0.5129151292,
"autogenerated": false,
"ratio": 4.1692307692307695,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 42
} |
__author__ = 'Antony Cherepanov'
def immutable_set():
print("\nimmutable_set():")
simple_list = [1, 2, 1, 5, 4, 7, 7]
simple_set = set(simple_list)
print("List :" + str(simple_list))
print("Set that based on this list :" + str(simple_set))
print("Check that sets are immutable:")
try:
simple_set.add([0])
print("Will not print")
except Exception:
print("Immutable!")
def set_operations():
print("\nset_operations():")
first = {1, 4, 7, 9}
print("First set: " + str(first))
second = {1, 3, 4, 10}
print("Second set: " + str(second))
print("Difference: " + str(first - second))
print("Union: " + str(first | second))
print("Intersection: " + str(first & second))
print("Symmetric Difference (XOR): " + str(first ^ second))
def list_in_set():
print("\nlist_in_set():")
simple = set([1, 4, 7, 9])
print("Set could be created on base of list: " + str(simple))
print("But we can't add list to set because lists are mutable.")
try:
simple.add([10, 9])
print("Will not print")
except Exception:
print("Failed to add list to set")
print("Workaround: we can add frozensets to set because they are immutable!")
simple.add(frozenset([6, 7]))
print("Set with frozenset inside: " + str(simple))
immutable_set()
set_operations()
list_in_set() | {
"repo_name": "iamantony/PythonNotes",
"path": "src/types/sets.py",
"copies": "1",
"size": "1443",
"license": "mit",
"hash": -4599833167454836700,
"line_mean": 25.7884615385,
"line_max": 81,
"alpha_frac": 0.5717255717,
"autogenerated": false,
"ratio": 3.510948905109489,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4582674476809489,
"avg_score": null,
"num_lines": null
} |
__author__ = "Antony Cherepanov"
def merge_sort(t_input):
length = len(t_input)
if 2 < length:
half_length = length // 2
left_half = t_input[:half_length]
right_half = t_input[half_length:]
sorted_left = merge_sort(left_half)
sorted_right = merge_sort(right_half)
i = 0
j = 0
result = list()
for k in range(length):
if len(sorted_left) <= i:
result.extend(sorted_right[j:])
break
if len(sorted_right) <= j:
result.extend(sorted_left[i:])
break
if sorted_left[i] < sorted_right[j]:
result.append(sorted_left[i])
i += 1
else:
result.append(sorted_right[j])
j += 1
return result
else:
if 1 == length:
return t_input
elif 2 == length:
first = t_input[0]
second = t_input[1]
if first < second:
return [first, second]
else:
return [second, first] | {
"repo_name": "iamantony/PythonNotes",
"path": "src/algorithms/sort/mergesort.py",
"copies": "1",
"size": "1163",
"license": "mit",
"hash": 7740609142568472000,
"line_mean": 25.7380952381,
"line_max": 48,
"alpha_frac": 0.4393809114,
"autogenerated": false,
"ratio": 4.1535714285714285,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5092952339971428,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Antony Cherepanov'
def nesting():
""" List can hold another list! """
matrix = list()
# First row
matrix.append([1, 2, 3])
# Second
matrix.append([4, 5, 6])
# Third
matrix.append([7, 8, 9])
print("Our matrix as list of lists: " + str(matrix))
print("Second element of third row: " + str(matrix[2][1]))
def extending():
""" Several ways to extend list """
some_list = list((1, 2))
print("At the start: " + str(some_list))
some_list[:0] = [-1, 0]
print("Prepend values [:0]: " + str(some_list))
some_list[len(some_list):] = [3, 4]
print("Append values [len(some_list):]: " + str(some_list))
some_list.extend([5, 6])
print("Explicit extending: " + str(some_list))
def sorting():
""" Sorting options """
some_list = [2, 5, 1, -1, -10, 9]
print("Our list: " + str(some_list))
some_list.sort()
print("Standart sorting: " + str(some_list))
print("Via standart function: " + str(sorted(some_list)))
some_list.sort(key=abs)
print("Sorting by absolute values: " + str(some_list))
some_list.sort(reverse=True)
print("Sorting in reverse order: " + str(some_list))
nesting()
extending()
sorting() | {
"repo_name": "iamantony/PythonNotes",
"path": "src/types/lists.py",
"copies": "1",
"size": "1285",
"license": "mit",
"hash": 3814658780672597500,
"line_mean": 21.8333333333,
"line_max": 63,
"alpha_frac": 0.5556420233,
"autogenerated": false,
"ratio": 3.165024630541872,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42206666538418713,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Antony Cherepanov'
def passing_arguments():
print("\npassing_arguments()")
print("Immutable arguments are effectively passed 'by value'")
print("So inside a function we work with copy of variable and")
print("any changed made inside the function will not affect")
print("original variable.")
example_str = "hey there"
print("Example: string = " + example_str)
print("Object: " + str(id(example_str)))
def get_immutable(arg_str):
arg_str = "jjj"
print("Inside function we changed string: " + arg_str)
print("Object: " + str(id(arg_str)))
get_immutable(example_str)
print("But outside of the function our string still have the same value: " +
example_str)
print("____")
print("Mutable arguments are effectively passed 'by pointer'")
print("Such variables can be changed inside functions")
example_list = [1, 2, 3]
print("Example: list = " + str(example_list))
print("Object: " + str(id(example_list)))
def get_mutable(arg_list):
arg_list[0] = "wow"
print("Inside function we changed list: " + str(arg_list))
print("Object: " + str(id(arg_list)))
get_mutable(example_list)
print("Now we outside and we have changed modified list: " +
str(example_list))
def positional_arguments():
print("\npositional_arguments()")
print("It's a normal case. Passed argument values match to argument names")
print("in a function header by position, from left to right")
def func(a, b, c):
print(a, b, c)
func(1, 2, 3)
def keyword_arguments():
print("\nkeyword_arguments()")
print("Keyword arguments allow us to match values by name of argument.")
print("In this case position of values doesn't matter")
def func(a, b, c):
print(a, b, c)
func(c=3, a=1, b=2)
def default_arguments():
print("\ndefault_arguments()")
print("We can declare default values for function arguments. In this case")
print("if we don't pass a value to argument, " +
"it's default value will be used")
def func(a=1, b=2, c=3):
print(a, b, c)
print("Call function without argument values")
func()
print("Call function with several argument values")
func(c=10)
print("Call function with all values")
print(0, 0, 0)
print()
print("Beware mutable defaults: if you code a default to be a mutable")
print("object (list for example), the same single mutable object will be")
print("reused every time the function is later called!")
def func_with_mutable(val=list()):
val.append(1)
print(val)
func_with_mutable()
func_with_mutable()
func_with_mutable()
def arbitrary_arguments():
print("\narbitrary_arguments()")
print("Functions can use special arguments preceded with")
print("one or two * characters to collect an arbitrary number")
print("of possibly extra arguments")
print("All positional arguments will be added to argument with one *")
print("All keyword arguments will be added to argument with two *")
def func(*pargs, **kargs):
print(pargs, kargs)
func()
func(1, 2)
func(my_key=False)
func(1, 2, 3, a='this is a string', b=True)
def combination_of_arguments():
print("\ncombination_of_arguments()")
print("If you choose to use and combine the special argument-matching")
print("modes, Python will ask you to follow these ordering rules among")
print("the modes’ optional components:")
print("In a function header:")
print("any normal arguments (name); followed by any default arguments")
print("(name=value); followed by the *name form; followed by any name")
print("or name=value keyword-only arguments; followed by the **name form")
print()
print("In a function call:")
print("any positional arguments (value); followed by a combination")
print("of any keyword arguments (name=value) and the *iterable form;")
print("followed by the **dict form")
print()
def func(a, b, c=42, *pargs, keyword='test', **kargs):
print(a, b, c, pargs, keyword, kargs)
func(1, 2, 41, 1, 1, 1, keyword='call_func', my_arbitrary_key="this")
def unpacking_arguments():
print("\nunpacking_arguments()")
print("We can use the * syntax when we call a function. In this context")
print("it will unpacks a collection of arguments")
def func(a, b, c, d):
print(a, b, c, d)
list_args = [1, 2, 3, 4]
print("We can unpack list: " + str(list_args))
func(*list_args)
dict_args = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
print("We can unpack dict: " + str(dict_args))
func(**dict_args)
print("We can combine objects unpacking:")
func(*(1, 2), **{'d': 4, 'c': 3})
func(1, *(2, 3), d=4)
func(1, *(2,), c=3, **{'d': 4})
passing_arguments()
positional_arguments()
keyword_arguments()
default_arguments()
arbitrary_arguments()
combination_of_arguments()
unpacking_arguments() | {
"repo_name": "iamantony/PythonNotes",
"path": "src/functions/arguments.py",
"copies": "1",
"size": "5218",
"license": "mit",
"hash": -4258642881229772000,
"line_mean": 28.3372093023,
"line_max": 80,
"alpha_frac": 0.6150306748,
"autogenerated": false,
"ratio": 3.7906976744186047,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4905728349218605,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Antony Cherepanov'
def range_iteration():
print("\nrange_iteration")
rng = range(10)
print("When we call range() functions, we get object tht can iterate: " +
str(rng))
print("But it's not an iterator! Let's try:")
try:
next(rng)
print("Wow, iterating!")
except TypeError:
print("Nope, not an iterator")
itr = iter(rng)
print("To get iterator we should call iter( range_object ): " + str(itr))
print("Call next( iterator ) to get next value: " + str(next(itr)))
print("Again: " + str(next(itr)))
second_itr = iter(rng)
print("We can create another range iterator: " + str(second_itr))
print("And it will not depend from previously created iterators: " +
str(next(second_itr)))
print("Another way to get next value - call built-in method: " +
str(itr.__next__))
def single_pass_iterators():
print("\nsingle_pass_iterators")
print("Unlike range(), functions like map(), zip() and so on don't " +
"support multiple active iterators")
m_obj = map(abs, (-1, 0, 1))
itr_1 = iter(m_obj)
itr_2 = iter(m_obj)
print("We created two map iterators: " + str(itr_1) + " and " + str(itr_2))
print("Call first iterator: " + str(next(itr_1)))
print("Call second iterator: " + str(next(itr_2)))
print("Call first iterator: " + str(next(itr_1)))
try:
print("Call second iterator: " + str(next(itr_2)))
except StopIteration:
print("End of iteration")
range_iteration()
single_pass_iterators() | {
"repo_name": "iamantony/PythonNotes",
"path": "src/operations/iteration.py",
"copies": "1",
"size": "1628",
"license": "mit",
"hash": 943196206723900900,
"line_mean": 29.9607843137,
"line_max": 79,
"alpha_frac": 0.5847665848,
"autogenerated": false,
"ratio": 3.4638297872340424,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9539173838718656,
"avg_score": 0.00188450666307713,
"num_lines": 51
} |
__author__ = 'Antony Cherepanov'
def repeat_string():
""" Repeat string several times simply multiplying it by some number """
string = "And Again "
long_string = string * 5
print("Original string :", string)
print("Repeated string :", long_string)
def immutability():
""" Strings are immutable objects. You can't change them. """
string = "example"
print("Our string: " + string + "; id = " + str(id(string)))
try:
string[0] = 'f'
print("You will not see this because of exception!")
except Exception:
print("Failed to change string object!")
string = "new " + string
print("We can change string only by creating new object: " + string + "; id = " + str(id(string)))
def transform_to_list():
""" Transform string to list of it's characters """
string = "example"
characters_list = list(string)
print("Transform string \"" + string + "\" to list: " + str(characters_list))
def string_formatting():
string_one = "Example"
string_two = "string formatting"
print("%s of %s 1" % (string_one, string_two))
print("{0} of {1} 2".format(string_one, string_two))
print("{} of {} 3".format(string_one, string_two))
def string_formatting_with_dict():
data = dict(name="Bob", action="working")
formatted_string_first = "1: %(name)s is %(action)s"
print(formatted_string_first % data)
formatted_string_second = "2: {name} is {action}"
print(formatted_string_second.format(name="Ann", action="cooking"))
from string import Template
temp = Template("3: $name is $action")
print(temp.substitute(name="Jane", action="reading"))
repeat_string()
immutability()
transform_to_list()
string_formatting()
string_formatting_with_dict() | {
"repo_name": "iamantony/PythonNotes",
"path": "src/types/strings.py",
"copies": "1",
"size": "1829",
"license": "mit",
"hash": -1436668460843254300,
"line_mean": 28.0163934426,
"line_max": 102,
"alpha_frac": 0.6145434664,
"autogenerated": false,
"ratio": 3.709939148073022,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.981988045578391,
"avg_score": 0.0009204317378222723,
"num_lines": 61
} |
__author__ = 'Antony Cherepanov'
def Start():
input = [4, 2, 8, 5, 3, 1, 6, 7]
# input = [6, 5, 4, 3, 2, 1]
# input = [1, 3, 5, 2, 4, 6]
print "input =", input
sortedInput, inversionsNum = InversionsCounter(input)
print "number of inversions =", inversionsNum
# The main idea: use Merge Sort algorithm
def InversionsCounter(t_input):
length = len(t_input)
if 2 < length:
halfLength = length / 2
leftHalf = t_input[:halfLength]
rightHalf = t_input[halfLength:]
sortedLeft, leftInvNum = InversionsCounter(leftHalf)
sortedRight, rightInvNum = InversionsCounter(rightHalf)
i = 0
maxI = len(sortedLeft)
j = 0
maxJ = len(sortedRight)
sortedArray = list()
inversionsNum = leftInvNum + rightInvNum
for k in range(length):
if maxI <= i:
sortedArray.extend(sortedRight[j:])
break
if maxJ <= j:
sortedArray.extend(sortedLeft[i:])
break
if sortedLeft[i] < sortedRight[j]:
# that is ok, no inversion
sortedArray.append(sortedLeft[i])
i += 1
else:
sortedArray.append(sortedRight[j])
j += 1
# number of inversion = number of elements in the sorted left
# half of the input array (in this case sortedLeft) that
# we don't iterate yet (max number of elements - number of
# current position)
inversionsNum += maxI - i
return sortedArray, inversionsNum
else:
if 1 == length:
# no inversion in array that have only one number
return t_input, 0
elif 2 == length:
sortedArray = t_input
inversionsNum = 0
first = t_input[0]
second = t_input[1]
if second < first:
# if first number greater than the second, than there is
# inversion!
sortedArray = [second, first]
inversionsNum = 1
return sortedArray, inversionsNum
Start()
| {
"repo_name": "iamantony/PythonNotes",
"path": "src/algorithms/search/inversionscount.py",
"copies": "1",
"size": "2269",
"license": "mit",
"hash": -2276744506828214500,
"line_mean": 30.4142857143,
"line_max": 77,
"alpha_frac": 0.5130013222,
"autogenerated": false,
"ratio": 4.289224952741021,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5302226274941021,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Antony Cherepanov'
def ways_to_create():
print("\nways_to_create()")
# On the end - comma!
first = "first value",
print("1: ", str(first))
# Also we can create tuple via tuple()
second = ("second value", 2)
print("2: ", str(second))
nested = (1, (2, 3))
print("3 nested: ", str(nested))
def immutability():
print("\nimmutability()")
simple_tuple = 1, 2, 3
print("tuple = ", str(simple_tuple))
try:
simple_tuple[0] = 10
print("This will not be printed")
except:
print("We can't change value in tuple. But we can recreate them!")
simple_list = [1, 4, 99]
second_tuple = simple_tuple, simple_list
print("Modified tuple: " + str(second_tuple))
simple_list[0] = 191
print("After list modification: " + str(second_tuple))
def assignment():
print("\nassignment()")
print("Assign string to variables in tuple:")
(a, b, c) = "ABC"
print(a, b)
def extended_sequence_unpacking():
print("\nextended_sequence_unpacking()")
simple_list = [1, 2, 3, 4, 5]
print("We have some object (string, list, tuple, set): " + str(simple_list))
print("Suppose, we want only first and the last element of this object.")
print("And we don't know the length of it.")
print("We can use Extended Sequence unpacking:")
first, *middle, last = simple_list
print("First = " + str(first))
print("Last = " + str(last))
print("Middle = " + str(middle))
ways_to_create()
immutability()
assignment()
extended_sequence_unpacking() | {
"repo_name": "iamantony/PythonNotes",
"path": "src/types/tuples.py",
"copies": "1",
"size": "1639",
"license": "mit",
"hash": -3393803989563968500,
"line_mean": 23.640625,
"line_max": 80,
"alpha_frac": 0.5820622331,
"autogenerated": false,
"ratio": 3.5399568034557234,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46220190365557234,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Antony Cherepanov'
def while_with_else():
print("\nwhile_with_else")
print("If inside while() loop 'break' was not called, code under the 'else' will be executed")
print("\nLoop without break:")
i = 2
while i < 5:
print("Inside loop")
i += 1
else:
print("Code inside else")
print("End of loop")
print("\nLoop with break:")
i = 2
while i < 5:
print("Inside loop")
if i == 3:
print("Break!")
break
i += 1
else:
print("Code inside else")
print("End of loop")
def for_with_else():
print("\nfor_with_else")
print("If inside for() loop 'break' was not called, code under the 'else' will be executed")
print("\nLoop without break")
for i in range(3):
print("Inside loop")
else:
print("This is else")
print("End of loop")
def iterate_through_object():
print("\niterate_through_object")
print("\nIterate simple list:")
simple_list = [1, 2, 3, 4]
for i in simple_list:
print("List element: " + str(i))
print("\nIterate dict:")
simple_dict = {1: "1", 2:"2", 3: "3"}
for i in simple_dict:
print("Key: " + str(i) + "; Value: " + simple_dict[i])
for (key, value) in simple_dict.items():
print(str(key) + " => " + value)
print("\nIterate list of tuples:")
list_of_tuples = [(1, 2, 3), (4, 5, 6), (7, 8, 9)]
for (a, b, c) in list_of_tuples:
print(a, b, c)
print("\n With extended sequence assignment")
for (*a, b) in list_of_tuples:
print(a, b)
def parallel_traversals():
print("\nparallel_traversals")
first = [1, 2, 3, 4]
second = [5, 6, 7, 8]
msg = "What if we want to iterate two objects (in our case lists) " + \
"of the same number of elements at the same time? We can you zip:"
print(msg)
for (x, y) in zip(first, second):
print(x, y)
print("\nBut what if objects have different number of elements?")
print("zip() function will truncate result sequence to the length " +
"of the shortest")
first.append(42)
print("First list now have one more element: " + str(first))
for (x, y) in zip(first, second):
print(x, y)
while_with_else()
for_with_else()
iterate_through_object()
parallel_traversals() | {
"repo_name": "iamantony/PythonNotes",
"path": "src/operations/loops.py",
"copies": "1",
"size": "2463",
"license": "mit",
"hash": 461948605431415230,
"line_mean": 22.898989899,
"line_max": 98,
"alpha_frac": 0.5403978888,
"autogenerated": false,
"ratio": 3.4788135593220337,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45192114481220336,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Antring'
import urllib.request
import os
import feedparser
import datetime
from time import *
class rssReader:
'''Class for finding and downloading podcast'''
def __init__(self, url):
self.rssurl = url
self.podstream = feedparser.parse(self.rssurl)
self.podLink = self.podstream.entries[0].link
self.podName = self.podstream.entries[0].title
self.rsslen = len(self.podstream.entries)
def checker(self):
'''Checking if a podcast episode is already downloaded'''
curDir = os.getcwd() #Get current working dir
#TODO Write rest of this...
def downloader(self, podcasturl, filename):
'''For downloading podcasts given a url to the .mp3 and the filename that it should be stored with'''
try:
urllib.request.urlretrieve(podcasturl, filename + '.mp3')
f = open('log.txt', 'a')
f.write('(+)Downloaded ' + filename + ' - ' + datetime.datetime.now().strftime("%Y-%m-%d %H:%M") + ('\n'))
f.close()
except IOError:
f = open('log.txt', 'a')
f.write('(!)Downloader error ' + IOError + datetime.datetime.now().strftime("%Y-%m-%d %H:%M") + ('\n'))
f.close()
def infoprinter(self):
'''Prints information about the podcast'''
print("PodLink: ", self.podLink, "\nPodName: ", self.podName, "\nRSSLen: ", self.rsslen)
if __name__ == '__main__':
misjonen = "http://www.p4.no/lyttesenter/podcast.ashx?pid=330"
podcast1 = rssReader(misjonen)
podcast1.infoprinter() | {
"repo_name": "antring/RssThingyServer",
"path": "server.py",
"copies": "1",
"size": "1585",
"license": "mit",
"hash": 1476858080664317400,
"line_mean": 32.0416666667,
"line_max": 118,
"alpha_frac": 0.6012618297,
"autogenerated": false,
"ratio": 3.530066815144766,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9612247514366947,
"avg_score": 0.0038162260955636102,
"num_lines": 48
} |
__author__ = 'Antti Pohjola'
# DynDns autologin
# automatically logins to dyndns
import webapp2
import logging
import urllib
import urllib2
import cookielib
import time
class Settings:
dyndns_username = ""
dyndns_password = ""
loginurl = "https://account.dyn.com/entrance"
loginredirecturl = "https://account.dyn.com/"
logouturl = "https://account.dyn.com/entrance/?__logout=1"
class HTMLSession:
cj = None
opener = None
txHeaders = None
def __init__(self, txHeaders):
#The CookieJar will hold any cookies necessary throughout the login process.
self.cj = cookielib.CookieJar()
self.txHeaders = txHeaders
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cj))
urllib2.install_opener(self.opener)
def setHeaders(self, txheaders):
self.txHeaders = txHeaders
def getHeaders(self):
return self.txHeaders
def openURI(self, uri, txdata):
try:
req = urllib2.Request(uri, txdata, self.txHeaders)
# create a request object
handle = urllib2.urlopen(req)
# and open it to return a handle on the url
except IOError as e:
logging.debug("IOError, now its time to panic and freak out")
return None
else:
return handle.read()
class CronController(webapp2.RequestHandler):
def getHiddenRandHTMLResponse(self,response):
target = "<input type=\'hidden\' name=\'multiform\' value=\'"
return response[response.find(target)+len(target):response.find(target)+len(target)+34]
def checkLogin(self,response):
target = "<title>My Dyn Account</title>"
if response.find(target) == -1:
return False
return True
def autologin(self):
hiddenval = ""
txHeaders = {'User-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:25.0) Gecko/20100101 Firefox/25.0'}
hiddenval = ""
txdata = None
myhtmlSession = HTMLSession(txHeaders)
response = myhtmlSession.openURI(Settings.loginurl, None)
if response == None:
logging.debug("Empty response")
return
hiddenval = self.getHiddenRandHTMLResponse(response)
txdata = urllib.urlencode({'username':Settings.dyndns_username, 'password':Settings.dyndns_password, 'multiform':hiddenval, 'submit': "Log in"})
response = myhtmlSession.openURI(Settings.loginurl, txdata)
if response == None:
loging.debug("login failed: ")
return
#the response is 302 to new url, load it, and see if the login succeed
response = myhtmlSession.openURI(Settings.loginredirecturl,None)
if self.checkLogin(response):
logging.info("Login succeed to dynDns")
#sleep a while before logout
time.sleep(5)
response = myhtmlSession.openURI(Settings.logouturl, None)
if response == None:
logging.info("Logout FAILED")
logging.info("Logout Succeed")
def get(self):
self.autologin()
self.response.headers['Content-Type'] = 'text/plain'
self.response.write('DNS refressed!')
app = webapp2.WSGIApplication([('/cron/autologin', CronController)], debug=True)
logging.debug("loaded cron")
| {
"repo_name": "Summeli/dyndns-autologin",
"path": "cron.py",
"copies": "1",
"size": "3348",
"license": "mit",
"hash": 5769142636215022000,
"line_mean": 31.1923076923,
"line_max": 152,
"alpha_frac": 0.6350059737,
"autogenerated": false,
"ratio": 3.9715302491103204,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.020053288688706684,
"num_lines": 104
} |
__author__ = 'anuvrat'
class WeightedQuickUnion(object):
def __init__(self, size, debug=False, compress_path=True):
self.group_count = self.size = size
self.group = [i for i in range(size)]
self.tree_size = [1] * size
self.debug = debug
self.compress_path = compress_path
def union(self, child, parent):
""" Complexity = O(lg n) """
child_root = self.find(child)
parent_root = self.find(parent)
if child_root == parent_root:
return
# push the smaller tree into the larger tree to reduce tree height
new_tree_size = self.tree_size[parent_root] + self.tree_size[child_root]
if self.tree_size[child_root] < self.tree_size[parent_root]:
self.group[child_root] = self.group[parent_root]
self.tree_size[parent_root] = new_tree_size
self.tree_size[child_root] = 0
else:
self.group[parent_root] = self.group[child_root]
self.tree_size[child_root] = new_tree_size
self.tree_size[parent_root] = 0
self.group_count -= 1
if self.debug:
print(self.group_count, self.group, self.tree_size)
def find(self, element):
""" Complexity = O(lg n) """
while self.group[element] != element:
if self.compress_path:
# Compress the path by bringing up the subtree by 1 level
self.group[element] = self.group[self.group[element]]
element = self.group[element]
return element
def connected(self, element_a, element_b):
""" Complexity = O(lg n) """
return self.find(element_a) == self.find(element_b)
class Edge(object):
def __init__(self, node_a, node_b, weight):
self.node_a = node_a
self.node_b = node_b
self.weight = weight
def __repr__(self):
return repr((self.node_a, self.node_b, self.weight))
if __name__ == '__main__':
nodes_count, edges_count = map(int, input().split())
edges = []
for _ in range(edges_count):
i, j, k = map(int, input().split())
edges.append(Edge(i - 1, j - 1, k))
total_weight = 0
mst = WeightedQuickUnion(nodes_count)
for edge in sorted(edges, key=lambda e: e.weight):
if not mst.connected(edge.node_a, edge.node_b):
mst.union(edge.node_a, edge.node_b)
total_weight += edge.weight
print(total_weight)
| {
"repo_name": "anuvrat/spoj",
"path": "problems/partial/003188_mst.py",
"copies": "1",
"size": "2460",
"license": "mit",
"hash": -8294627334888914000,
"line_mean": 33.1666666667,
"line_max": 80,
"alpha_frac": 0.5756097561,
"autogenerated": false,
"ratio": 3.455056179775281,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9527236580594074,
"avg_score": 0.0006858710562414266,
"num_lines": 72
} |
__author__ = 'anuvrat'
"""
Problem: http://www.spoj.com/problems/ACS/
"""
if __name__ == '__main__':
rows = [x for x in range(1234)]
columns = [x for x in range(5678)]
try:
entry = raw_input()
while entry:
command = entry.split()
if command[0] == 'R':
i, j = int(command[1]) - 1, int(command[2]) - 1
rows[i], rows[j] = rows[j], rows[i]
elif command[0] == 'C':
i, j = int(command[1]) - 1, int(command[2]) - 1
columns[i], columns[j] = columns[j], columns[i]
elif command[0] == 'Q':
i, j = int(command[1]) - 1, int(command[2]) - 1
print rows[i] * 5678 + columns[j] + 1
elif command[0] == 'W':
i = int(command[1]) - 1
x = int(i / 5678)
y = i % 5678
print str(rows.index(x) + 1) + ' ' + str(columns.index(y) + 1)
entry = raw_input()
except EOFError:
pass
| {
"repo_name": "anuvrat/spoj",
"path": "problems/classical/acs.py",
"copies": "1",
"size": "1028",
"license": "mit",
"hash": -6960644796639814000,
"line_mean": 29.2352941176,
"line_max": 78,
"alpha_frac": 0.4270428016,
"autogenerated": false,
"ratio": 3.2738853503184715,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42009281519184716,
"avg_score": null,
"num_lines": null
} |
__author__ = 'apatti'
import getopt
from sys import exit,argv
from readData import getAverageQuantity,getPreviousDayErrorRate,storePredictedValue
def getQuantityValue(date,school):
average = getAverageQuantity(date,school)
errorRate = getPreviousDayErrorRate(date)
prediction = average*errorRate
storePredictedValue(date,school)
return 0
def main(argv):
date=''
school=''
try:
opts, args = getopt.getopt(argv,"hd:s:",["date=","schoolid="])
except getopt.GetoptError:
print 'predictIndentQuantity.py -d <date> -s <schoolid>'
exit(2)
for opt, arg in opts:
if opt == '-h':
print 'predictIndentQuantity.py -d <date> -s <schoolid>'
exit(2)
elif opt in ("-d", "--date"):
date = arg
elif opt in ("-s", "--schoolid"):
school = arg
if date is '' or school is '':
print 'predictIndentQuantity.py -d <date> -s <schoolid>'
exit(2)
getQuantityValue(date,school)
if __name__ == '__main__':
main(argv[1:]) | {
"repo_name": "prashishh/musichackday",
"path": "model/predictIndentQuantity.py",
"copies": "3",
"size": "1063",
"license": "cc0-1.0",
"hash": -8570101341501990000,
"line_mean": 24.9512195122,
"line_max": 83,
"alpha_frac": 0.6058325494,
"autogenerated": false,
"ratio": 3.627986348122867,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01217328346262515,
"num_lines": 41
} |
__author__ = 'apatti'
class fwrapper:
def __init__(self, function, childcount, name):
self.function = function
self.childcount = childcount
self.name = name
class node:
def __init__(self, fw, children):
self.function = fw.function
self.children = children
self.name = fw.name
def evaluate(self, inp):
results = [n.evaluate(inp) for n in self.children]
return self.function(results)
def display(self, indent=0):
print '%s%s' % (' '*indent, self.name)
[child.display(indent+1) for child in self.children]
class paramnode:
def __init__(self, idx):
self.idx = idx
def evaluate(self, inp):
return inp[self.idx]
def display(self, indent=0):
print '%sp%d' % (' ' * indent, self.idx)
class constnode:
def __init__(self, value):
self.value = value
def evaluate(self, inp):
return self.value
def display(self, indent):
print '%s%d' % (' ' * indent, self.value)
| {
"repo_name": "apatti/apatti_ml",
"path": "genetic_programing/gptree.py",
"copies": "1",
"size": "1038",
"license": "mit",
"hash": 2438494341723220000,
"line_mean": 19.76,
"line_max": 60,
"alpha_frac": 0.5741811175,
"autogenerated": false,
"ratio": 3.6167247386759582,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4690905856175958,
"avg_score": null,
"num_lines": null
} |
__author__ = 'apatti'
from matplotlib import pyplot as plt
import numpy as np
class MultiPanelPlot:
@staticmethod
def insetPlot():
"""
The add_axes method allows you to create an axes instance by specifying the size relative to the figure edges.
The argument is [left, bottom, width, height] which specifies the axes extent in fractions of the figure size (i.e. between 0 and 1):
"""
fig = plt.figure()
main_ax = fig.add_axes([0.1,0.1,0.8,0.8])
inset_ax = fig.add_axes([0.6,0.6,0.25,0.25])
main_ax.plot(np.random.rand(100),color="gray")
inset_ax.plot(np.random.rand(20),color="green")
plt.show()
pass
@staticmethod
def simpleMultiPanel():
"""
create a simple multi-panel graph using add_subplot
:return: None
"""
fig = plt.figure()
for i in range(1,7):
ax = fig.add_subplot(2,3,i)
#to add text
#ax.text(0.45,0.45,str(i),fontsize=24)
ax.plot(np.random.rand(20))
#to add spaces.
fig.subplots_adjust(left=0.1, right=0.9,
bottom=0.1, top=0.9,
hspace=0.4, wspace=0.4)
plt.show()
@staticmethod
def multipleSubplots():
fig,axes = plt.subplots(nrows=2,ncols=3,sharex=True,sharey=True)
for i in range(2):
for j in range(3):
axes[i][j].plot(np.random.rand(20))
#axes[i][j].text(0.45,0.45,str((i,j)),fontsize=24)
plt.show()
@staticmethod
def gridSpecPlot():
"""
GridSpec is the highest-level routine for creating subplots.
It's an abstract object that allows the creation of multi-row or multi-column subplots via an intuitive slicing interface
:return: None
"""
gs = plt.GridSpec(3,3,wspace=0.4,hspace=0.4) # a 3x3 grid
fig = plt.figure(figsize=(6,6))
fig.add_subplot(gs[1,:2])
fig.add_subplot(gs[0,:2])
fig.add_subplot(gs[:2,2])
fig.add_subplot(gs[2,1:])
fig.add_subplot(gs[2,0])
plt.show()
if __name__ == '__main__':
MultiPanelPlot.insetPlot()
MultiPanelPlot.simpleMultiPanel()
MultiPanelPlot.multipleSubplots()
MultiPanelPlot.gridSpecPlot() | {
"repo_name": "apatti/apatti_ml",
"path": "python_data_examples/matplot_example.py",
"copies": "1",
"size": "2311",
"license": "mit",
"hash": 5859317978782247000,
"line_mean": 31.1111111111,
"line_max": 141,
"alpha_frac": 0.5716140199,
"autogenerated": false,
"ratio": 3.329971181556196,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4401585201456196,
"avg_score": null,
"num_lines": null
} |
__author__ = 'apatti'
import numpy as np
class sModel:
def __init__(self):
self.__table = np.zeros(shape=(3, 3, 3, 3), dtype=int)
self.__tofill = range(0, 81)
self.__possiblenumbers={}
def display(self):
#print self.__table
print self.__table[0,0,0,:],self.__table[0,1,0,:],self.__table[0,2,0,:]
print self.__table[0,0,1,:],self.__table[0,1,1,:],self.__table[0,2,1,:]
print self.__table[0,0,2,:],self.__table[0,1,2,:],self.__table[0,2,2,:]
print self.__table[1,0,0,:],self.__table[1,1,0,:],self.__table[1,2,0,:]
print self.__table[1,0,1,:],self.__table[1,1,1,:],self.__table[1,2,1,:]
print self.__table[1,0,2,:],self.__table[1,1,2,:],self.__table[1,2,2,:]
print self.__table[2,0,0,:],self.__table[2,1,0,:],self.__table[2,2,0,:]
print self.__table[2,0,1,:],self.__table[2,1,1,:],self.__table[2,2,1,:]
print self.__table[2,0,2,:],self.__table[2,1,2,:],self.__table[2,2,2,:]
def writerow(self, row, numbers):
colindex=0
tableindex=row/3
for index, number in enumerate(numbers):
self.__table[tableindex, colindex, row % 3, index % 3] = number
if number != 0:
self.__tofill.remove((row * 9) + index)
if index % 3 == 2:
colindex += 1
def getrow(self, row):
a = (row / 3)%3
b = row % 3
return self.__table[a, :, b, :]
def getadjacentrow(self,position):
currentrow = position % 9
adjacent = []
if currentrow % 3 == 0:
adjacent.extend(self.getrow(currentrow+1).flatten().tolist())
adjacent.extend(self.getrow(currentrow+2).flatten().tolist())
if currentrow % 3 == 1:
adjacent.extend(self.getrow(currentrow-1).flatten().tolist())
adjacent.extend(self.getrow(currentrow+1).flatten().tolist())
if currentrow % 3 == 2:
adjacent.extend(self.getrow(currentrow-1).flatten().tolist())
adjacent.extend(self.getrow(currentrow-2).flatten().tolist())
adjacent = list(set(adjacent))
adjacent.remove(0)
return adjacent
def getcol(self, col):
a = (col/3) % 3
b = col % 3
return self.__table[:, a, :, b]
def getadjacentcol(self,position):
currentcol = position % 9
adjacent = []
if currentcol % 3 == 0:
adjacent.extend(self.getrow(currentcol+1).flatten().tolist())
adjacent.extend(self.getrow(currentcol+2).flatten().tolist())
if currentcol % 3 == 1:
adjacent.extend(self.getrow(currentcol-1).flatten().tolist())
adjacent.extend(self.getrow(currentcol+1).flatten().tolist())
if currentcol % 3 == 2:
adjacent.extend(self.getrow(currentcol-1).flatten().tolist())
adjacent.extend(self.getrow(currentcol-2).flatten().tolist())
adjacent = list(set(adjacent))
adjacent.remove(0)
return adjacent
def getblock(self, block):
a = block/3
b = block % 3
return self.__table[a, b, :, :]
def getpeers(self,position):
# row, col and block
row = position / 9
col = position % 9
block = (row/3)*3+col/3
peers = self.getcol(col).flatten().tolist()
peers.extend(self.getrow(row).flatten().tolist())
peers.extend(self.getblock(block).flatten().tolist())
peers = list(set(peers))
peers.remove(0)
return peers
def getpossiblenumbers(self,position):
peers = self.getpeers(position)
#peers.extend(self.getadjacentcol(position))
#peers.extend(self.getadjacentrow(position))
return list(set(range(1, 10)) - set(peers))
def __solvesimple(self):
# take each zero number box in row and check what all can fit.
for position in self.__tofill:
possiblenumbers = self.getpossiblenumbers(position)
if len(possiblenumbers)==1:
self.__table[position/27,position%3,position%3,position%3]=possiblenumbers[0]
self.__tofill.remove(position)
print position,"Filled!!"
else:
self.__possiblenumbers[position]=self.getpossiblenumbers(position)
pass
def __solveblock(self):
pass
def solve(self):
self.__solvesimple() | {
"repo_name": "apatti/apatti_ml",
"path": "sudoku/model.py",
"copies": "1",
"size": "4410",
"license": "mit",
"hash": 6245762479828066000,
"line_mean": 36.0672268908,
"line_max": 93,
"alpha_frac": 0.5585034014,
"autogenerated": false,
"ratio": 3.458823529411765,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9442374167499906,
"avg_score": 0.014990552662371655,
"num_lines": 119
} |
__author__ = 'apatti'
import requests
from lxml import html
import csv
class BowlerScrapper:
@staticmethod
def scrap(matchUrl):
print "Working on {}".format(matchUrl)
inningData ={}
for innings in [1,2]:
url = "http://www.espncricinfo.com{}?innings={};view=commentary".format(matchUrl,innings)
htmlTree = BowlerScrapper.__gethtmldata(url)
htmlElements = htmlTree.xpath('//div[@class="end-of-over-info"]')
fullDetails=[]
bowlerDetails={}
for element in htmlElements:
overDetail={}
overDetail["over"] = int(filter(str.isdigit, element[0][0].text_content()))
overDetail["bowler"] = element[1][1][0][0][0].text_content()
if bowlerDetails.get(overDetail["bowler"]) is None:
bowlerDetails[overDetail["bowler"]] = {"runs":0,"wickets":0}
overDetail["stat"] = element[1][1][0][0][1].text_content()
runs = int(overDetail["stat"].split('-')[2])
wickets = int(overDetail["stat"].split('-')[3])
overDetail["runs"] = runs - bowlerDetails[overDetail["bowler"]]["runs"]
overDetail["wickets"] = wickets - bowlerDetails[overDetail["bowler"]]["wickets"]
bowlerDetails[overDetail["bowler"]]["runs"] += overDetail["runs"]
bowlerDetails[overDetail["bowler"]]["wickets"] += overDetail["wickets"]
overDetail["url"] = url
fullDetails.append(overDetail)
inningData[innings] = fullDetails
return inningData
@staticmethod
def scrapSeries(url):
htmlTree = BowlerScrapper.__gethtmldata(url)
htmlElements = htmlTree.xpath('//span[@class="play_team"]/a')
matches = []
for element in htmlElements:
matchData = {}
matchUrl = element.attrib['href']
matchData["matchid"] = int(filter(str.isdigit,matchUrl[matchUrl.rindex('/'):]))
matchData["year"] = int(filter(str.isdigit,matchUrl[:matchUrl.rindex('/')]))
matchData["innings"] = BowlerScrapper.scrap(matchUrl)
matches.append(matchData)
return matches
@staticmethod
def __gethtmldata(url):
page = requests.get(url)
return html.fromstring(page.content)
@staticmethod
def saveToCsv(filename,data):
with open(filename,'wb') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(["matchid", "year", "innings", "bowler", "over", "runs", "wickets", "stat","url"])
for matchData in matches:
matchid=matchData["matchid"]
year = matchData["year"]
for inning,inningData in matchData["innings"].iteritems():
innings = inning
for bowlerData in inningData:
writer.writerow((matchid,year,innings,
bowlerData["bowler"],bowlerData["over"],bowlerData["runs"],bowlerData["wickets"],bowlerData["stat"],bowlerData["url"]))
if __name__ == '__main__':
matches=[]
matches.extend(BowlerScrapper.scrapSeries("http://www.espncricinfo.com/indian-premier-league-2016/content/series/968923.html?template=fixtures"))
matches.extend(BowlerScrapper.scrapSeries("http://www.espncricinfo.com/indian-premier-league-2015/content/series/791129.html?template=fixtures"))
matches.extend(BowlerScrapper.scrapSeries("http://www.espncricinfo.com/indian-premier-league-2014/content/series/695871.html?template=fixtures"))
BowlerScrapper.saveToCsv("../data/ipl_bowler.csv",matches) | {
"repo_name": "apatti/cricstat",
"path": "cricscrapper/bowler.py",
"copies": "2",
"size": "3687",
"license": "mit",
"hash": -4638719609255339000,
"line_mean": 44.5308641975,
"line_max": 159,
"alpha_frac": 0.5977759696,
"autogenerated": false,
"ratio": 3.636094674556213,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5233870644156212,
"avg_score": null,
"num_lines": null
} |
__author__ = 'apatti'
import unittest
from gptree import fwrapper, constnode, node, paramnode
class gbtree_test_case(unittest.TestCase):
def test_complex_tree(self):
print 'test_complex_tree'
def iffunc(i):
if i[0] > 0:
return i[1]
else:
return i[2]
def greater(i):
if i[0] > i[1]:
return 1
else:
return 0
addw = fwrapper(lambda i: i[0]+i[1], 2, 'add')
subw = fwrapper(lambda i: i[0]-i[1], 2, 'sub')
greatw = fwrapper(greater, 2, '>')
ifw = fwrapper(iffunc, 3, 'if')
tree = node(ifw, [node(greatw, [paramnode(0), constnode(3)]),
node(addw, [paramnode(1), constnode(5)]),
node(subw, [paramnode(1), constnode(2)])])
tree.display(2)
self.assertEqual(tree.evaluate([2, 3]), 1)
self.assertEqual(tree.evaluate([5, 3]), 8)
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "apatti/apatti_ml",
"path": "genetic_programing/gptree_unit_test.py",
"copies": "1",
"size": "1029",
"license": "mit",
"hash": -7665151400587928000,
"line_mean": 26.0789473684,
"line_max": 69,
"alpha_frac": 0.491739553,
"autogenerated": false,
"ratio": 3.3517915309446256,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43435310839446256,
"avg_score": null,
"num_lines": null
} |
__author__ = 'apatti'
#tutorial from kaggle
#introduction to NLP
import pandas as pd
from bs4 import BeautifulSoup
import re
import nltk
nltk.download()
from nltk.corpus import stopwords
def review_to_words(review_text):
#remove html
review_text = BeautifulSoup.get_text(review_text)
#remove puntuation and numbers
review_text = re.sub("[^a-zA-Z]"," ",review_text)
#convert to lower case
review_text = review_text.lower()
#words
review_words = review_text.split()
# 4. In Python, searching a set is much faster than searching
# a list, so convert the stop words to a set
stops = set(stopwords.words("english"))
#remove stop words
review_words = [w for w in review_words if not w in stops]
#There are many other things we could do to the data - For example, Porter Stemming and
# Lemmatizing (both available in NLTK) would allow us to treat "messages", "message", and "messaging"
# as the same word, which could certainly be useful.
return (" ".join(review_words))
if __name__ == '__main__':
#header=0 --> First line contains column names
#quoting=3 --> ignore double quotes in the file.
train = pd.read_csv("labeledTrainData.tsv", header=0, delimiter="\t",quoting=3)
#number of rows and columns
#(25000,3)
#print train.shape
#column names
#id,sentiment, review
#print train.columns.values
clean_train_reviews = []
clean_train_reviews = [review_to_words(review) for review in train["review"]]
print clean_train_reviews[1:2]
| {
"repo_name": "apatti/apatti_ml",
"path": "kaggle/bag_of_words/BagOfWords.py",
"copies": "1",
"size": "1564",
"license": "mit",
"hash": -5858346239414024000,
"line_mean": 23.8253968254,
"line_max": 105,
"alpha_frac": 0.6739130435,
"autogenerated": false,
"ratio": 3.5707762557077625,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4744689299207763,
"avg_score": null,
"num_lines": null
} |
__author__ = 'apg'
#import copy
import cProfile
import time
def timeit(method):
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
print '%r took %2.2f sec' % \
(method.__name__, te - ts)
return result
return timed
class Alphametic:
def __init__(self, query):
# parse the query to generated addends and result. The code assumes that there are
# only two addends. Need to write a test to fail if more than two addends found.
self.op1 = query.split('==')[0].split('+')[0].strip()
self.op2 = query.split('==')[0].split('+')[1].strip()
self.res = query.split('==')[1].strip()
#self.carry = ['c' + str(i) for i in range(len(self.res))]
self.addend1 = [c for c in reversed(' ' + self.op1)]
self.addend2 = [c for c in reversed(' ' + self.op2)]
self.sum = [c for c in reversed(self.res)]
# create a list of graph nodes. The code below is very verbose. There
# must to be a pythonic way to generate the unique nodes in order.
length = max(len(self.op1), len(self.op2), len(self.res))
self.nodes = []
for i in range(length):
if self.addend1[i] not in self.nodes:
self.nodes.append(self.addend1[i])
if self.addend2[i] not in self.nodes:
self.nodes.append(self.addend2[i])
if self.sum[i] not in self.nodes:
self.nodes.append(self.sum[i])
if ' ' in self.nodes:
self.nodes.remove(' ')
#initialize all unique characters and carries to -1
self.char_map = {c: -1 for c in set(self.addend1 + self.addend2 + self.sum)}
#set(self.carry + self.addend1 + self.addend2 + self.sum)}
#set the 0th carry to 0. This carry is never required but helps with consistency
#self.char_map['c0'] = 0
self.char_map[' '] = 0
def _next_node(self):
for node in self.nodes:
if self.char_map[node] == -1:
return node
def _solve(self):
#if self.char_map['l'] == 8 and self.char_map['r'] == 5:
# print 'node map ', [(node, self.char_map[node]) for node in self.nodes]
#print 'carry map ', [(node, self.char_map[node]) for node in self.char_map.keys() if node not in self.nodes]
if -1 not in self.char_map.values():
return True
else:
node = self._next_node()
for i in range(10):
self.char_map[node] = i
if self._ok_to_use():
sol = self._solve()
if sol:
return True
else:
#print "backtracking for ", node, self.char_map[node]
self.char_map[node] = -1
return False
def _ok_to_use(self):
node_dict = {node: self.char_map[node] for node in self.nodes if self.char_map[node] != -1}
# True only when distinct numbers have been assigned to the characters
if len(set(node_dict.keys())) != len(set(node_dict.values())):
return False
#True only when the leading character is non-zero
if (self.char_map[self.op1[0]] == 0 or
self.char_map[self.op2[0]] == 0 or
self.char_map[self.res[0]] == 0):
return False
c = 0
for i in range(len(self.sum)):
#c = self.char_map[self.carry[i]]
#if i == 0:
# c = 0
s1 = self.char_map[self.addend1[i]]
s2 = self.char_map[self.addend2[i]]
r = self.char_map[self.sum[i]]
if c == -1:
return True
if s1 != -1 and s2 != -1 and r != -1:
local_r = (c + s1 + s2) % 10
local_c = (c + s1 + s2) / 10
if r != local_r:
return False
if i == len(self.sum) - 1 and local_c > 0:
return False
#self.char_map['c' + str(i + 1)] = local_c
c = local_c
else:
c = -1
return True
@timeit
def get_result(self):
if self._solve():
d = self.char_map
num1 = sum([10 ** i * d[c] for i, c in enumerate(reversed(self.op1))])
num2 = sum([10 ** i * d[c] for i, c in enumerate(reversed(self.op2))])
nsum = sum([10 ** i * d[c] for i, c in enumerate(reversed(self.res))])
return num1, num2, nsum
else:
return None, None, None
def main():
user_input = raw_input('Please enter your query in the following format a + b == c:')
y = Alphametic(user_input)
result = y.get_result()
if None in result:
print "No solution found."
else:
print result
if __name__ == '__main__':
main()
#y = Alphametic('donald + gerald == robert')
#y = Alphametic('send + more == money')
#y = Alphametic('oooh + food == fight')
#cProfile.run('y.get_result()') | {
"repo_name": "amitgardharia/codekata",
"path": "alphameticoo.py",
"copies": "1",
"size": "5105",
"license": "unlicense",
"hash": 6917707663163828000,
"line_mean": 33.04,
"line_max": 117,
"alpha_frac": 0.5077375122,
"autogenerated": false,
"ratio": 3.4539918809201624,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44617293931201624,
"avg_score": null,
"num_lines": null
} |
import wx
import sys
## make sure the windows is focused than press any button, and the program will move a plate. keep ## pressing until the problem is solved
## by default it will display 5 plates. To change this, you have to call the script with a second ## argument, wich is the number of plates (max 10)
def gen_hanoi(stack, start=1, temp=2, goal=3):
if stack == 2:
yield start, temp
yield start, goal
yield temp, goal
else:
for x in gen_hanoi(stack - 1, start, goal, temp):
yield x
yield start, goal
for x in gen_hanoi(stack - 1, temp, start, goal):
yield x
class Plate(object):
def __init__(self, x_len, x_start):
self.x_len = x_len
self.x_start = x_start
def create_plates(num):
assert num <= 10
x_start = 10
x_len = 100
plates = []
for x in xrange(num):
plates.append(Plate(x_len, x_start))
x_len -= 10
x_start += 5
return plates
class HanoiWindow(wx.Window):
def __init__(self, parent, num):
wx.Window.__init__(self, parent, id=-1, pos = wx.Point(0, 0),
size=wx.DefaultSize, style=wx.SUNKEN_BORDER|
wx.WANTS_CHARS|wx.FULL_REPAINT_ON_RESIZE)
self.SetBackgroundColour(wx.NamedColour('white'))
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_KEY_UP, self.OnKeyUp)
self.towers = [create_plates(num), [], []]
self.solver = gen_hanoi(num)
def OnPaint(self, evt):
def draw_rect(x_len, x_start, y_start):
dc = wx.PaintDC(self)
font = dc.GetFont()
font.SetPointSize(15)
dc.SetFont(font)
size, colour = 2, wx.NamedColour('black')
dc.SetPen(wx.Pen(colour, size, wx.SOLID))
point = wx.Point(x_start, y_start)
dc.DrawLines([point, point + wx.Point(x_len, 0)])
dc.DrawLines([point - wx.Point(0, 5), point + wx.Point(x_len, 0) - wx.Point(0, 5)])
dc.DrawLines([point, point - wx.Point(0, 5)])
dc.DrawLines([point + wx.Point(x_len, 0), point - wx.Point(0, 5) + wx.Point(x_len, 0)])
w, h = self.GetClientSizeTuple()
buffer = wx.EmptyBitmap(w, h)
dc = wx.PaintDC(self)
font = dc.GetFont()
font.SetPointSize(15)
dc.SetFont(font)
msg = 'Hanoi Towers'
w, h = dc.GetTextExtent(msg)
dc.DrawText(msg, 200, 20)
size, colour = 8, wx.NamedColour('black')
dc.SetPen(wx.Pen(colour, size, wx.SOLID))
for num in xrange(len(self.towers)):
y_start = 300
tower = self.towers[num]
num = num * 200 + 10
point = wx.Point(num, y_start)
dc.DrawLines([point, point + wx.Point(120, 0)]) #base
## plates
for plate in tower:
y_start -= 10
draw_rect(plate.x_len, num + plate.x_start, y_start)
def OnKeyUp(self, evt):
try:
from_, to = self.solver.next()
self.towers[to-1].append(self.towers[from_-1].pop())
self.Refresh()
except StopIteration:
wx.MessageBox('Problem Solved!', 'Problem solved', wx.OK)
class HanoiFrame(wx.Frame):
def __init__(self, title, num):
wx.Frame.__init__(self, parent=None, id=-1,
title=title, size=(600, 500), pos=(200, 200))
self.Window = HanoiWindow(self, num)
self.Bind(wx.EVT_CLOSE, self.close_frame)
def close_frame(self, evt):
sys.exit(0)
class HanoiApp(wx.App):
def OnInit(self):
if len(sys.argv) < 2:
num = 5
else:
num = int(sys.argv[1])
hano = HanoiFrame('Hanoi Towers', num)
hano.Show(True)
self.SetTopWindow(hano)
return True
if __name__ == '__main__':
fh = HanoiApp(0)
fh.MainLoop()
| {
"repo_name": "ActiveState/code",
"path": "recipes/Python/577511_Hanoi_Towers_solver_wxPython/recipe-577511.py",
"copies": "1",
"size": "3977",
"license": "mit",
"hash": 5366395803616002000,
"line_mean": 31.867768595,
"line_max": 150,
"alpha_frac": 0.543122957,
"autogenerated": false,
"ratio": 3.2280844155844157,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.925913658046602,
"avg_score": 0.002414158423679161,
"num_lines": 121
} |
__author__ = 'apoorva'
from py2neo.database import Record
from graph_stix.config import stixGraph
from pandas import DataFrame as df
def getIPInfo(ipaddr):
result = df(stixGraph.run("MATCH (a:ObservableNode) WHERE a.AddressValue={x} RETURN a.ObservableID, a.ObjectID, a.IndicatorID",
x=ipaddr).data()).to_html()
return result
def getURIinfo(uri):
recs = stixGraph.run("MATCH (a:ObservableNode) WHERE a.Value={x} RETURN a.ObservableID, a.ObjectID, a.IndicatorID",x=str(uri)).data()
'''
res= res+recs
if recs.forward():
recs = stixGraph.run("MATCH (a:ObservableNode) WHERE a.Value={x} RETURN a.ObservableID, a.ObjectID, a.IndicatorID",x=str(uri))
res= res+recs
'''
result = df(recs)
return result.to_html()
def getMutexInfo(mutex):
recs = stixGraph.run("MATCH (a:ObservableNode) WHERE a.MutexValue={x} RETURN a.ObservableID, a.ObjectID, a.IndicatorID",x=str(mutex)).data()
result = df(recs).to_html()
return result
def getFileInfo(file):
recs = stixGraph.run("MATCH (a:ObservableNode) WHERE EXISTS(a.FileHash) RETURN a.ObservableID, a.ObjectID, a.IndicatorID, a.FileHash, a.SizeInBytes").data()
for r in recs:
Hashes = eval(r["a.FileHash"])
for key in Hashes:
f = Hashes[key]
if f== file:
res = df(stixGraph.run("MATCH (a:ObservableNode) WHERE a.ObservableID={x} RETURN a.ObservableID, a.ObjectID, a.IndicatorID, a.FileHash, a.SizeInBytes",
x= r["a.ObservableID"]).data())
break
return res.to_html() | {
"repo_name": "arangaraju/graph-stix",
"path": "webDemo/model.py",
"copies": "1",
"size": "1599",
"license": "mit",
"hash": 8318012064140282000,
"line_mean": 39,
"line_max": 167,
"alpha_frac": 0.6485303315,
"autogenerated": false,
"ratio": 3.1663366336633665,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.927080557195253,
"avg_score": 0.00881227864216727,
"num_lines": 40
} |
__author__ = 'apoorva'
import sys
from pprint import pprint
from datetime import datetime
from lxml import etree
try:
# python-stix : Used in initial parsing, only to get stix file as a dictionary
from stix.core import STIXPackage
from stix.coa import CourseOfAction
from cybox.bindings.file_object import FileObjectType
from cybox.bindings.account_object import AccountObjectType
from cybox.bindings.email_message_object import EmailHeaderType, EmailMessageObjectType, EmailRecipientsType
from cybox.bindings.domain_name_object import DomainNameObjectType
from cybox.bindings.uri_object import URIObjectType
from cybox.bindings.address_object import AddressObjectType
from cybox.bindings.network_connection_object import NetworkConnectionObjectType
from cybox.bindings.mutex_object import MutexObjectType
from cybox.bindings.link_object import LinkObjectType
from cybox.bindings.win_registry_key_object import WindowsRegistryKeyObjectType
from cybox.common.datetimewithprecision import DateTimeWithPrecision
except ImportError:
print "Error: Could not import required libraries. Requires python-stix and python-cybox libraries. " \
"See https://stix.mitre.org/ " \
"See https://cyboxproject.github.io/"
sys.exit(-1)
try:
# Python-Neo4j interface
from py2neo import Graph, Node, Relationship, authenticate
from py2neo.database.status import ConstraintError
except ImportError:
print "Error: Could not import required libraries. Requires py2neo library. See http://py2neo.org/v3/"
sys.exit(-1)
stixGraph = Graph("http://neo4j:neo4jtest@127.0.0.1:7474/db/data")
stixGraph.delete_all()
#stixGraph.run("DROP CONSTRAINT ON (k:KillChainPhaseNode)ASSERT k.ID IS UNIQUE")
#stixGraph.run("MATCH (n) DETACH DELETE n")
#Init Node
desc = "This Node will connect to LM Kill Chain, all STIX Header,Observable nodes to make sure the graph is not disconnected"
stixGraph.run("CREATE CONSTRAINT ON (n:InitNode) ASSERT n.NodeID IS UNIQUE")
init_node = Node("StixGraph", Description=desc, NodeID="InitNode")
stixGraph.create(init_node)
def parse_observables(observables, StixFileID, indicatorID, incidentID):
#objRelated = {}
for obs in observables:
#parse_observable(obs, StixFileID, objRelated, indicatorID, incidentID)
parse_observable(obs, StixFileID, indicatorID,incidentID)
'''
if len(objRelated) != 0 :
for key, val in objRelated:
objNode = stixGraph.find_one("ObservableNode", property_key="ObjectID", property_value=key)
relObjNode = stixGraph.find_one("ObservableNode", property_key="RelatedObjectID", property_value= val)
if relObjNode and objNode:
relPhase = Relationship(relObjNode, "RelatedObjectLink", objNode,
ObjectID =key, RelatedObjectID = val)
try:
stixGraph.merge(relPhase)
except ConstraintError:
pass
except AttributeError:
pass
'''
#def parse_observable(obs, StixFileID, objRelated, indicatorID, incidentID):
def parse_observable(obs, StixFileID, indicatorID, incidentID):
obj = obs.to_obj()
if not obj or not hasattr(obj, "Object") or not hasattr(obj.Object, "Properties"): return
prop = obj.Object.Properties
stixGraph.run("CREATE CONSTRAINT ON (n:ObservableNode) ASSERT n.ObservableID IS UNIQUE")
ObservableNode = Node("ObservableNode", ObservableID=obs.id_, ObjectID=obj.Object.id,
xsiType=prop.xsi_type,STIXFileID=StixFileID)
if indicatorID: ObservableNode["IndicatorID"]= indicatorID
if incidentID:
ObservableNode["IncidentID"]= incidentID
#print "Observable: " + obs.id_ #Observable ID
#obj = obs.get('object')
#print "Related Observable: " + obj.id
#prop = obs.get('object').get('properties')
#print "XSI Type: " + prop.xsi_type
if (type(prop) == FileObjectType):
try:
#print "Size(in Bytes) : " + str(prop.Size_In_Bytes.valueOf_)
ObservableNode["SizeInBytes"] = prop.Size_In_Bytes.valueOf_
except AttributeError:
size = 0
#FileName
try:
print prop.category + ": " + prop.Address_Value
print "File Name: " + obj.File_Name
ObservableNode["Category"] = prop.category
ObservableNode["AddressValue"] = prop.Address_Value
ObservableNode["FileName"] = obj.File_Name
except AttributeError:
fileName = None
if (prop.Hashes != None):
hashType = prop.Hashes.Hash
for i, hash in enumerate(hashType):
h = hash.Type.valueOf_
if ( h == "MD5" or h == "MD6" or h == "SHA1" or h == "SHA224" or
h == "SHA256" or h == "SHA=384" or h == "SHA512"):
hashVal = hash.Simple_Hash_Value.valueOf_
elif ( h == "SSDEEP"):
hashVal = hash.Fuzzy_Hash_Value.valueOf_
else:
hashVal = 0 #hash.Fuzzy_Hash_Structure
hashType = "Fuzzy Structure"
#print "Hash(Type : Value) " + str(h) + ":" + str(hashVal)
if h!= None:
ObservableNode[h]=hashVal
elif (type(prop) == AddressObjectType):
ObservableNode["Category"] = prop.category
ObservableNode["AddressValue"] = prop.Address_Value.valueOf_
ObservableNode["ApplyCondition"] = prop.Address_Value.apply_condition
ObservableNode["Condition"] = prop.Address_Value.condition
elif (type(prop) == URIObjectType):
ObservableNode["Type"] = prop.type_
ObservableNode["ApplyCondition"] = prop.Value.apply_condition
#ObservableNode["Condition"] = prop.Value.condition
#ObservableNode["Delimiter"] = prop.Value.delimiter
ObservableNode["Value"] = prop.Value.valueOf_
elif type(prop) == EmailMessageObjectType:
#Email Header has the following attributes: message_id, from, sender, subject
emailHeader = prop.Header
if emailHeader:
if emailHeader.Message_ID:
ObservableNode["MessageID"] = emailHeader.Message_ID.valueOf_
if emailHeader.From:
#print "\t" + emailHeader.From.category + " (ObjType " + emailHeader.From.xsi_type + ") from " + emailHeader.From.Address_Value.valueOf_
ObservableNode["From_Category"] = emailHeader.From.category
ObservableNode["From_xsiType"] = emailHeader.From.xsi_type
ObservableNode["From_AddressValue"] = emailHeader.From.Address_Value.valueOf_
if emailHeader.Sender:
#print "\t" + emailHeader.Sender.category + " (ObjType: " + emailHeader.Sender.xsi_type + ") sent to " + emailHeader.Sender.Address_Value.valueOf_
ObservableNode["Sender_Category"] = emailHeader.Sender.category
ObservableNode["Sender_xsiType"] = emailHeader.Sender.xsi_type
ObservableNode["Sender_AddressValue"] = emailHeader.Sender.Address_Value.valueOf_
if emailHeader.Subject:
#print "\tSubject (Apply condition: condition::delimiter: Value) " + emailHeader.Subject.apply_condition + ":" + emailHeader.Subject.condition + ":" + emailHeader.Subject.delimiter + ":" + ":" + emailHeader.Subject.valueOf_
ObservableNode["Subject_ApplyCondition"] = emailHeader.Subject.apply_condition
ObservableNode["Subject_Condition"] = emailHeader.Subject.condition
ObservableNode["Subject_Delimiter"] = emailHeader.Subject.delimiter
ObservableNode["Subject_Value"] = emailHeader.Subject.valueOf_
#Email Attachments
if prop.Attachments:
emailAttachments = prop.Attachments.File
#print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
#print "Email Attachments: "
for i, attach in enumerate(emailAttachments):
#print "\t" + attach.object_reference
em = "EmailAttachment#" + str(i)
ObservableNode[em] = attach.object_reference
elif( type(prop) == LinkObjectType):
if prop.type_ : ObservableNode["Type"] = prop.type_
if prop.xsi_type: ObservableNode["xsiType"] = prop.xsi_type
if prop.URL_Label.apply_condition: ObservableNode["Link_ApplyCondition"] = prop.URL_Label.apply_condition
if prop.URL_Label.pattern_type : ObservableNode["Link_PatternType"] = prop.URL_Label.pattern_type
if prop.URL_Label.condition : ObservableNode["Link_Condition"] = prop.URL_Label.condition
if prop.URL_Label.delimiter: ObservableNode["Link_Delimiter"] = prop.URL_Label.delimiter
if prop.URL_Label.valueOf_ : ObservableNode["Link_Value"] = prop.URL_Label.valueOf_
if prop.Value.apply_condition: ObservableNode["Link_ApplyCondition"] = prop.Value.apply_condition
if prop.Value.condition : ObservableNode["Link_Condition"] = prop.Value.condition
if prop.Value.delimiter: ObservableNode["Link_Delimiter"] = prop.Value.delimiter
if prop.Value.valueOf_ : ObservableNode["Link_Value"] = prop.Value.valueOf_
elif ( type(prop) == NetworkConnectionObjectType):
if prop.Creation_Time:
ObservableNode["CreationTime"]= str(prop.Creation_Time)
if prop.Destination_TCP_State:
ObservableNode["DestinationTCPState"]= str(prop.Destination_TCP_State)
if prop.Source_TCP_State:
ObservableNode["SourceTCPState"]= str(prop.Source_TCP_State)
if prop.Layer3_Protocol:
ObservableNode["Layer3Protocol"] = str(prop.Layer3_Protocol)
if prop.Layer4_Protocol:
ObservableNode["Layer4Protocol"] = str(prop.Layer4_Protocol)
if prop.Layer7_Protocol:
ObservableNode["Layer7Protocol"] = str(prop.Layer7_Protocol)
if prop.Layer7_Connections:
ObservableNode["Layer7Connections"] = str(prop.Layer7_Connections)
if prop.Source_Socket_Address:
ObservableNode["SourceSocketAddress"] = str(prop.Source_Socket_Address)
# Should be expanded
if prop.Destination_Socket_Address:
ObservableNode["DestinationSocketAddress"] = str(prop.Destination_Socket_Address)
if prop.xsi_type:
ObservableNode["xsiType"] = prop.xsi_type
elif ( type(prop)== WindowsRegistryKeyObjectType):
if prop.Byte_Runs: ObservableNode["ByteRuns"] = prop.Byte_Runs
if prop.Creator_Username: ObservableNode["CreatorUsername"]= prop.Creator_Username
if prop.Hive: ObservableNode["HiveValue"] = prop.Hive.valueOf_
if prop.Key: ObservableNode["KeyValue"] = prop.Key.valueOf_
for i,val in enumerate(prop.Values.Value):
vn = "ValueName"+str(i)
vd = "ValueData"+str(i)
if val.Data: ObservableNode[vd] = val.Data.valueOf_
if val.Name:ObservableNode[vn]=val.Name.valueOf_
if prop.xsi_type: ObservableNode["xsiType"] = prop.xsi_type
#print "HandleWindowsRegistryKeyObjectType Win Registry Key Object"
elif (type(prop)== MutexObjectType):
ObservableNode["xsiType"] = prop.xsi_type
ObservableNode["MutexValue"]=prop.Name.valueOf_
else:
ObservableNode["xsiType"] = prop.xsi_type
print "Handle "+ prop.xsi_type
if obj.Object.Related_Objects:
reltd = obj.Object.Related_Objects.Related_Object
for i,reltdObj in enumerate(reltd):
ObservableNode["RelatedObjectID"+str(i)] = reltdObj.id
#if obj.Object.id and reltdObj.id:
#objRelated[str(obj.Object.id)] = str(reltdObj.id)
if (type(reltdObj.Properties) == MutexObjectType):
'''
print "Handle Mutex Type Object"
print " Properties : \n\t"+reltdObj.Properties.Name.apply_condition+"\n\t"
print reltdObj.Properties.Name.condition+"\n\t"
print reltdObj.Properties.Name.delimiter+"\n\t"
print reltdObj.Properties.Name.valueOf_
'''
ObservableNode["RelatedObjMutexValue"+str(i)]= str(reltdObj.Properties.Name.valueOf_)
elif (type(reltdObj.Properties) == FileObjectType):
#print "Handle File Object"
ObservableNode["RelatedObjFileName"+str(i)]= reltdObj.Properties.File_Name.valueOf_
ObservableNode["RelatedObjFileExtension"+str(i)]= reltdObj.Properties.File_Extension.valueOf_
elif (type(reltdObj.Properties) == AddressObjectType):
ObservableNode["RelatedObjAddressValue"+str(i)]= str(reltdObj.Properties.Address_Value.valueOf_)
else:
print "Related Object to be handled"
try:
headNode = stixGraph.find_one("HeaderNode", property_key="STIXFileID", property_value=StixFileID)
rel = Relationship(headNode, "HeaderObservableLink", ObservableNode, STIXFileID=StixFileID,
connect="To make sure graph isn't disconnected")
stixGraph.merge(rel)
except AttributeError:
pass
'''
obsNode = stixGraph.find_one("ObservableNode", property_key="ObjectID", property_value=obj.id)
if obsNode:
relObs = Relationship(ObservableNode, "ObservableLink", obsNode, RelatedObservableID=obs.id_)
stixGraph.merge(relObs)
'''
def parse_header(header, StixFileID):
#print "***********************************************HEADER*********************************************"
head = header.to_obj()
HeaderNode = Node("HeaderNode",Title=header.title, Description= str(header.description), STIXFileID=StixFileID)
for h in head.Description:
desc = h.valueOf_
#print "Description:\n" + desc + "\n"
try:
head_date = head.Information_Source.Time.Produced_Time.valueOf_
except AttributeError:
now = datetime.today()
head_date = str(now.strftime("%Y-%m-%dT%H:%M:%S+00:00")) #If there is no timestamp, adding today's timestamp..!!
dt, tm = head_date.split("T", 1)
year, month, day = dt.split("-", 2)
tm2, ms = tm.split("+", 1)
HH, MM, SS = tm2.split(":", 2)
HeaderNode["ProducedDate"]= dt
HeaderNode["ProducedTime"] = tm
months = {"01": 'Jan', "02": 'Feb', "03": 'Mar', "04": 'Apr', "05": 'May', "06": 'Jun', "07": 'Jul',
"08": 'Aug', "09": 'Sep', "10": 'Oct', "11": 'Nov', "12": 'Dec'}
#print "Produced Date:" + day + " " + str(months.get(month)) + ", " + year
#print "Produced Time(24 HR):" + HH + ":" + MM + ":" + SS
#+"Produced Time Precision: "+ head.Information_Source.Time.Produced_Time.precision
for pkInt in head.Package_Intent:
#print "XSI-TYPE: " + pkInt.xsi_type
HeaderNode["PackageIntent"]=pkInt.valueOf_
for mark in head.Handling.Marking:
for struc in mark.Marking_Structure:
color = struc.color
HeaderNode["MarkingColor"]= color
#struc.id
#struc.idref
#struc.marking_model_name
#struc.marking_model_ref
#print "\t\t XML-TYPE: " + struc.xml_type
HeaderNode["Marking_XMLNS_Prefix"]= struc.xmlns_prefix
rel = Relationship(init_node, "HeaderGraphLink", HeaderNode, connect="To make sure graph isn't disconnected",
STIXFileID=StixFileID)
stixGraph.merge(rel)
def parse_indicator(indicator, id, kill_chains, kill_chain_phases, StixFileID):
#print "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%"
keyIndValueList = []
desc = "Indicators contains threat information and how to handle them within its observables, kill_chain phases etc.Connected to initNode"
stixGraph.run("CREATE CONSTRAINT ON (n:IndicatorNode) ASSERT n.ID IS UNIQUE")
IndicatorNode = Node("IndicatorNode", Description=desc, STIXFileID=StixFileID, ID=indicator.id_)
#rel = Relationship(init_node, "IndicatorGraphLink", IndicatorNode, connect="To make sure graph isn't disconnected",STIXFileID=StixFileID)
if indicator.confidence: IndicatorNode["Confidence"] = str(indicator.confidence)
if indicator.handling: IndicatorNode["Handling"] = str(indicator.handling)
if indicator.information_source: IndicatorNode["InformationSource"] = str(indicator.information_source)
if indicator.likely_impact: IndicatorNode["LikelyImpact"] = str(indicator.likely_impact)
if indicator.negate: IndicatorNode["IndicatorNegate"] = indicator.negate
if indicator.producer: IndicatorNode["Producer"] = str(indicator.producer)
if indicator.short_description: IndicatorNode["ShortDescription"] = str(indicator.short_descriptions)
if indicator.suggested_coas: IndicatorNode["SuggestedCOA"] = str(indicator.suggested_coas)
if indicator.timestamp: IndicatorNode["Timestamp"] = str(indicator.timestamp)
if indicator.title: IndicatorNode["Title"] = indicator.title
if indicator.version: IndicatorNode["Version"] = indicator.version
if indicator.observable_composition_operator:
IndicatorNode["CompositeIndicatorOperator"] = indicator.observable_composition_operator
if indicator.description:
if indicator.description.value: IndicatorNode["IndicatorDescription"] = indicator.description.value
if indicator.sightings:
IndicatorNode["SightingsCount"] = indicator.sightings.sightings_count
for s in indicator.sightings:
if s.timestamp and s.timestamp_precision:
IndicatorNode["SightingsTimestamp"] = str(s.timestamp)
IndicatorNode["SightingsTimestampPrecision"] = str(s.timestamp_precision)
#print "Timestamp " + str(s.timestamp) + " with precision upto " + s.timestamp_precision
if s.confidence: IndicatorNode["SightingsConfidence"] = s.confidence
if s.description: IndicatorNode["SightingsDescription"] = s.description
if s.reference: IndicatorNode["SightingsReference"] = s.reference
if s.related_observables: IndicatorNode["SightingsRelatedObservables"] = pprint(s.related_observables)
if s.source: IndicatorNode["SightingsSource"] = pprint(s.source)
if indicator.test_mechanisms:
for tm in indicator.test_mechanisms:
if tm.id_: IndicatorNode["IndicatorTestMechanismID"]= tm.id_
if tm.producer:
if tm.producer.identity:
IndicatorNode["IndicatorTestMechanismProducerName"]= tm.producer.identity.name
IndicatorNode["IndicatorTestMechanismProducerID"]= tm.producer.identity.id_
if tm.efficacy:
IndicatorNode["IndicatorTestMechanismEfficacy"]= tm.efficacy.value.value
IndicatorNode["IndicatorTestMechanismEfficacyTimestamp"]= str(tm.efficacy.timestamp)
if tm._XSI_TYPE == "stix-openioc:OpenIOC2010TestMechanismType":
IndicatorNode["IOC"] = etree.tostring(tm.ioc)
elif tm._XSI_TYPE == "snortTM:SnortTestMechanismType":
for i,rule in enumerate(tm.rules):
IndicatorNode["SnortRule"+str(i)]= rule.value
elif tm._XSI_TYPE =="yaraTM:YaraTestMechanismType":
IndicatorNode["YaraRule"]= str(tm.rule)
else:
print "New test mechanism to be handled: "+tm._XSI_TYPE
if indicator.indicator_types:
stixGraph.run("CREATE CONSTRAINT ON (n:AllowedIndicatorTypesNode) ASSERT n.Description is UNIQUE")
AllowedIndicatorTypesNode = Node("AllowedIndicatorTypesNode", Description="All allowed Indicator Types")
stixGraph.run("CREATE CONSTRAINT ON (n:IndicatorTypeNode) ASSERT n.IndicatorType is UNIQUE")
for keyInd in indicator.indicator_types:
if keyInd.value: IndicatorNode["IndicatorTypeValue"] = keyInd.value
keyIndValueList.append(keyInd.value)
if keyInd.xsi_type: IndicatorNode["xsiType"] = keyInd.xsi_type
AllowedIndicatorTypesNode[keyInd.TERM_ANONYMIZATION] = "TERM_ANONYMIZATION"
AllowedIndicatorTypesNode[keyInd.TERM_C2] = "TERM_C2"
AllowedIndicatorTypesNode[keyInd.TERM_COMPROMISED_PKI_CERTIFICATE] = "TERM_COMPROMISED_PKI_CERTIFICATE"
AllowedIndicatorTypesNode[keyInd.TERM_DOMAIN_WATCHLIST] = "TERM_DOMAIN_WATCHLIST"
AllowedIndicatorTypesNode[keyInd.TERM_EXFILTRATION] = "TERM_EXFILTRATION"
AllowedIndicatorTypesNode[keyInd.TERM_FILE_HASH_WATCHLIST] = "TERM_FILE_HASH_WATCHLIST"
AllowedIndicatorTypesNode[keyInd.TERM_HOST_CHARACTERISTICS] = "TERM_HOST_CHARACTERISTICS"
AllowedIndicatorTypesNode[keyInd.TERM_IMSI_WATCHLIST] = "TERM_IMSI_WATCHLIST"
AllowedIndicatorTypesNode[keyInd.TERM_MALWARE_ARTIFACTS] = "TERM_MALWARE_ARTIFACTS"
AllowedIndicatorTypesNode[keyInd.TERM_LOGIN_NAME] = "TERM_LOGIN_NAME"
AllowedIndicatorTypesNode[keyInd.TERM_IMEI_WATCHLIST] = "TERM_IMEI_WATCHLIST"
AllowedIndicatorTypesNode[keyInd.TERM_IP_WATCHLIST] = "TERM_IP_WATCHLIST"
AllowedIndicatorTypesNode[keyInd.TERM_URL_WATCHLIST] = "TERM_URL_WATCHLIST"
AllowedIndicatorTypesNode[keyInd.TERM_MALICIOUS_EMAIL] = "TERM_MALICIOUS_EMAIL"
if keyInd.TERM_ANONYMIZATION != None :
nodeType = Node("IndicatorTypeNode",Description="To Group Indicators based on their Type",
IndicatorTypeValue = keyInd.TERM_ANONYMIZATION)
try:
relIndType= Relationship(nodeType, "IndicatorTypesLink", AllowedIndicatorTypesNode,
IndicatorType = "TERM_ANONYMIZATION")
stixGraph.merge(relIndType)
except ConstraintError:
pass
if keyInd.TERM_C2 != None :
nodeType = Node("IndicatorTypeNode",Description="To Group Indicators based on their Type",
IndicatorTypeValue = keyInd.TERM_C2)
try:
relIndType= Relationship(nodeType, "IndicatorTypesLink", AllowedIndicatorTypesNode,
IndicatorType = "TERM_C2")
stixGraph.merge(relIndType)
except ConstraintError:
pass
if keyInd.TERM_COMPROMISED_PKI_CERTIFICATE:
nodeType = Node("IndicatorTypeNode",Description="To Group Indicators based on their Type",
IndicatorTypeValue = keyInd.TERM_COMPROMISED_PKI_CERTIFICATE)
try:
relIndType= Relationship(nodeType, "IndicatorTypesLink", AllowedIndicatorTypesNode,
IndicatorType = "TERM_COMPROMISED_PKI_CERTIFICATE")
stixGraph.merge(relIndType)
except ConstraintError:
pass
if keyInd.TERM_DOMAIN_WATCHLIST != None :
nodeType = Node("IndicatorTypeNode",Description="To Group Indicators based on their Type",
IndicatorTypeValue = keyInd.TERM_DOMAIN_WATCHLIST)
try:
relIndType= Relationship(nodeType, "IndicatorTypesLink", AllowedIndicatorTypesNode,
IndicatorType = "TERM_DOMAIN_WATCHLIST")
stixGraph.merge(relIndType)
except ConstraintError:
pass
if keyInd.TERM_EXFILTRATION != None :
nodeType = Node("IndicatorTypeNode",Description="To Group Indicators based on their Type",
IndicatorTypeValue = keyInd.TERM_EXFILTRATION)
try:
relIndType= Relationship(nodeType, "IndicatorTypesLink", AllowedIndicatorTypesNode,
IndicatorType = "TERM_EXFILTRATION")
stixGraph.merge(relIndType)
except ConstraintError:
pass
if keyInd.TERM_FILE_HASH_WATCHLIST != None :
nodeType = Node("IndicatorTypeNode",Description="To Group Indicators based on their Type",
IndicatorTypeValue = keyInd.TERM_FILE_HASH_WATCHLIST)
try:
relIndType= Relationship(nodeType, "IndicatorTypesLink", AllowedIndicatorTypesNode,
IndicatorType = "TERM_FILE_HASH_WATCHLIST")
stixGraph.merge(relIndType)
except ConstraintError:
pass
if keyInd.TERM_HOST_CHARACTERISTICS != None :
nodeType = Node("IndicatorTypeNode",Description="To Group Indicators based on their Type",
IndicatorTypeValue = keyInd.TERM_HOST_CHARACTERISTICS)
try:
relIndType= Relationship(nodeType, "IndicatorTypesLink", AllowedIndicatorTypesNode,
IndicatorType = "TERM_HOST_CHARACTERISTICS")
stixGraph.merge(relIndType)
except ConstraintError:
pass
if keyInd.TERM_IMEI_WATCHLIST != None :
nodeType = Node("IndicatorTypeNode",Description="To Group Indicators based on their Type",
IndicatorTypeValue = keyInd.TERM_IMEI_WATCHLIST)
try:
relIndType= Relationship(nodeType, "IndicatorTypesLink", AllowedIndicatorTypesNode,
IndicatorType = "TERM_IMEI_WATCHLIST")
stixGraph.merge(relIndType)
except ConstraintError:
pass
if keyInd.TERM_IMSI_WATCHLIST != None :
nodeType = Node("IndicatorTypeNode",Description="To Group Indicators based on their Type",
IndicatorTypeValue = keyInd.TERM_IMSI_WATCHLIST)
try:
relIndType= Relationship(nodeType, "IndicatorTypesLink", AllowedIndicatorTypesNode,
IndicatorType = "TERM_IMSI_WATCHLIST")
stixGraph.merge(relIndType)
except ConstraintError:
pass
if keyInd.TERM_IP_WATCHLIST != None :
nodeType = Node("IndicatorTypeNode",Description="To Group Indicators based on their Type",
IndicatorTypeValue = keyInd.TERM_IP_WATCHLIST)
try:
relIndType= Relationship(nodeType, "IndicatorTypesLink", AllowedIndicatorTypesNode,
IndicatorType = "TERM_IP_WATCHLIST")
stixGraph.merge(relIndType)
except ConstraintError:
pass
if keyInd.TERM_LOGIN_NAME:
nodeType = Node("IndicatorTypeNode",Description="To Group Indicators based on their Type",
IndicatorTypeValue = keyInd.TERM_LOGIN_NAME)
try:
relIndType= Relationship(nodeType, "IndicatorTypesLink", AllowedIndicatorTypesNode,
IndicatorType = "TERM_LOGIN_NAME")
stixGraph.merge(relIndType)
except ConstraintError:
pass
if keyInd.TERM_MALICIOUS_EMAIL:
nodeType = Node("IndicatorTypeNode", Description="To Group Indicators based on their Type",
IndicatorTypeValue= keyInd.TERM_MALICIOUS_EMAIL)
try:
relIndType= Relationship(nodeType, "IndicatorTypesLink", AllowedIndicatorTypesNode,
IndicatorType = "TERM_MALICIOUS_EMAIL")
stixGraph.merge(relIndType)
except ConstraintError:
pass
if keyInd.TERM_MALWARE_ARTIFACTS != None:
nodeType = Node("IndicatorTypeNode", Description="To Group Indicators based on their Type",
IndicatorTypeValue=keyInd.TERM_MALWARE_ARTIFACTS)
try:
relIndType= Relationship(nodeType, "IndicatorTypesLink", AllowedIndicatorTypesNode,
IndicatorType = "TERM_MALWARE_ARTIFACTS")
stixGraph.merge(relIndType)
except ConstraintError:
pass
if keyInd.TERM_URL_WATCHLIST != None:
nodeType = Node("IndicatorTypeNode",Description="To Group Indicators based on their Type",
IndicatorTypeValue = keyInd.TERM_URL_WATCHLIST)
try:
relIndType= Relationship(nodeType, "IndicatorTypesLink", AllowedIndicatorTypesNode,
IndicatorType = "TERM_URL_WATCHLIST")
stixGraph.merge(relIndType)
except ConstraintError:
pass
try:
relAllowInit = Relationship(init_node, "AllowedIndicatorTypesGraphLink", AllowedIndicatorTypesNode,
connect="Easy to find indicators of a particular type")
stixGraph.merge(relAllowInit)
except ConstraintError:
pass
if indicator.kill_chain_phases:
for phase in indicator.kill_chain_phases:
'''
if phase.kill_chain_id:
print "Kill Chain ID- " + str(phase.kill_chain_id)
print "Kill Chain Name: " + str(kill_chains[phase.kill_chain_id])
if phase.kill_chain_name: print "\tKill Chain Name- " + str(phase.kill_chain_name)
if phase.phase_id: print "Phase ID- " + str(phase.phase_id)
if phase.name: print "\tPhase Name- " + str(phase.name)
if phase.ordinality: print "\tOrdinality- " + str(phase.ordinality)
'''
######################CONNECT kill chain phase TO TTPKillChainNode's kill chain phase ?
phaseNode = stixGraph.find_one("KillChainPhaseNode", property_key="PhaseName", property_value=phase.name)
#stixGraph.run("CREATE CONSTRAINT ON (n:IndicatorNode) ASSERT n.ID IS UNIQUE")
if phaseNode and IndicatorNode:
relPhase = Relationship(phaseNode, "IndicatorKillChainPhaseLink", IndicatorNode, ID=indicator.id_, KillChainID=phase.kill_chain_id,
PhaseID = phase.phase_id)
try:
stixGraph.merge(relPhase)
except AttributeError:
pass
if indicator.composite_indicator_expression:
headNode = stixGraph.find_one("HeaderNode", property_key="STIXFileID", property_value=StixFileID)
stixGraph.run("CREATE CONSTRAINT ON (n:IndicatorNode) ASSERT n.ID IS UNIQUE")
if headNode and IndicatorNode:
relHead = Relationship(headNode, "HeaderIndicatorLink", IndicatorNode, ID=indicator.id_, STIXFileID=StixFileID,
CompositeIndicatorOperator=indicator.observable_composition_operator)
try:
stixGraph.merge(relHead)
except ConstraintError:
pass
except AttributeError:
pass
#stixGraph.merge(rel)
if indicator.observables:
for obs in indicator.observables:
#obs, StixFileID, objRelated, indicatorID, incidentID
parse_observable(obs, StixFileID, indicator.id_, None)
obsNode = stixGraph.find_one("ObservableNode", property_key="ObservableID", property_value= obs.id_)
if obsNode:
relInd = Relationship(IndicatorNode,"IndicatorObservableLink",
obsNode, STIXFileID= StixFileID,
IndicatorID = indicator.id_,
ObservableID = obs.id_ ,
connect="Indicator-Observable Link, if it exists. \
http://stixproject.github.io/data-model/1.2/cybox/ObservableType/")
stixGraph.merge(relInd)
for indValKey in keyIndValueList:
indTypeNode = stixGraph.find_one("IndicatorTypeNode", property_key="IndicatorTypeValue", property_value=indValKey)
try:
relIndType = Relationship(indTypeNode, "IndicatorTypeLink",IndicatorNode,IndicatorTypeValue = indValKey)
stixGraph.merge(relIndType)
except ConstraintError:
pass
except AttributeError:
pass
'''
# FUTURE WORK TO MAKE IT STIX COMPLIANT
if indicator.related_campaigns:
parse_indicator_related_campaigns(indicator.related_campaigns)
if indicator.related_indicators:
parse_indicator_related_indicators(indicator.related_indicators)
if indicator.related_packages:
parse_indicator_related_packages(indicator.related_packages)
if indicator.short_descriptions:
parse_indicator_short_descriptions(indicator.short_descriptions)
'''
def parse_indicators(indicators, kill_chains, kill_chain_phases, StixFileID):
##print "*****************************Indicators*******************************************************"
compInd = None
indList = []
for indicator in indicators:
if (indicator.composite_indicator_expression):
compInd = indicator.id_
else:
indList.append(indicator.id_)
for indicator in indicators:
parse_indicator(indicator, indicator.id_, kill_chains, kill_chain_phases, StixFileID)
for ind in indList:
if compInd:
compIndNode = stixGraph.find_one("IndicatorNode", property_key="ID", property_value=compInd)
indNode = stixGraph.find_one("IndicatorNode", property_key="ID", property_value= ind)
if compIndNode and indNode:
relInd = Relationship(compIndNode, "CompositionIndicatorLink", indNode,
CompositionOperator=indicator.observable_composition_operator)
try:
stixGraph.merge(relInd)
except ConstraintError:
pass
def parse_ttps(ttp, kill_chains, kill_chain_phases, StixFileID):
#print "***********************************************TTPS*********************************************"
for tactic in ttp:
#print("TTPID: "+ tactic.id_)
stixGraph.run("CREATE CONSTRAINT ON (n:TTPKillChainNode) ASSERT n.Name IS UNIQUE")
desc= " Tactics, Techniques, and Procedures (TTP) contains leverage that help salve a threat." \
"It contains information about vulnerabilities, misconfigurations, weaknesses likely to be targeted and " \
"actions taken in the past to overcome them."
TTPNode = Node("TTPNode", TTPDesc = desc, TTPID = tactic.id_ , Timestamp = str() )
if tactic.title: TTPNode["TTPTitle"]= tactic.title
if tactic.behavior:
if tactic.behavior.attack_patterns:
for i,behave in enumerate(tactic.behavior.attack_patterns):
TTPNode["TTP_CAPEC_ID"+str(i)]= str(behave.capec_id)
TTPNode["TTPAttackPatternDescription"+str(i)]= str(behave.description)
if tactic.behavior.exploits:
for i, exp in enumerate(tactic.behavior.exploits):
if exp.id_ : TTPNode["TTPExploitsID"]= str(exp.id_)
if exp.description: TTPNode["TTPExploitsDescription"+str(i)]= str(exp.description)
if exp.title: TTPNode["TTPExploitsTitle"+str(i)]= str(exp.title)
if tactic.behavior.malware_instances:
for i,sample in enumerate(tactic.behavior.malware_instances):
TTPNode["TTPMalwareSample"+str(i)]= str(sample.names[0])
TTPNode["TTPMalwareType"+str(i)]= str(sample.types[0])
TTPNode["TTPMalwareID"+str(i)]= sample.id_
#intended_effects, kill_chain_phases, related_packages, related_ttps, victim_targeting
stixGraph.merge(TTPNode)
for chain in ttp.kill_chains:
kill_chains[chain.id_] = chain.name
desc = "Contains kill chains that can be adopted when we encounter a threat.Connected to initNode"
stixGraph.run("CREATE CONSTRAINT ON (n:TTPKillChainNode) ASSERT n.Name IS UNIQUE")
TTPKillChainNode = Node("TTPKillChainNode", Description=desc, Name=chain.name, Definer=chain.definer, Reference=chain.reference,
NoOfPhases=chain.number_of_phases, ID=chain.id_, STIXFileID=StixFileID)
rel = Relationship(init_node, "TTPGraphLink", TTPKillChainNode, connect="To make sure graph isn't disconnected")
try:
stixGraph.merge(rel)
except ConstraintError:
pass
for phase in chain.kill_chain_phases:
kill_chain_phases[phase.phase_id] = str(phase.name)
#print "Phases: [" + str(phase.phase_id) + "][" + str(phase.ordinality) + "] = " + str(phase.name)
desc = "Each Kill Chain is defined in terms of phases in which we caught a particular threat."
stixGraph.run("CREATE CONSTRAINT ON (n:KillChainPhaseNode) ASSERT n.PhaseName IS UNIQUE")
KillChainPhaseNode = Node("KillChainPhaseNode", Description=desc, Ordinality=phase.ordinality,
PhaseName=phase.name, ID=phase.phase_id, Chain_ID=chain.id_,
STIXFileID=StixFileID)
reln = Relationship(TTPKillChainNode, "TTPKillChainPhaseLink", KillChainPhaseNode, connect="Phases Of KillChain")
try:
stixGraph.merge(reln)
except ConstraintError:
pass
def parse_reports(reports):
#print "*****************************Reports*******************************************************"
for report in reports:
ReportNode = Node("ReportNode", ReportID = report.id_ )
if report.timestamp: ReportNode["Timestamp"] = str(report.timestamp)
if report.observables: ReportNode["ReportObservables"]= str(report.observables)
if report.header:
ReportNode["ReportTitle"]= str(report.header.title)
ReportNode["ReportDesc"]= str(report.header.description)
ReportNode["ReportSource"]= str(report.header.information_source.time.produced_time.value)
ReportNode["ReportIntent"]= str(report.header.intents[0].value)
if report.campaigns:
for i,camp in enumerate(report.campaigns):
ReportNode["ReportCampaignID"+str(i)]= camp.idref
campNode = stixGraph.find_one("CampaignNode",property_key="CampaignID", property_value=camp.idref )
if campNode:
relCampaignReport= Relationship(campNode, "CampaignReportLink", ReportNode,
Description="Campaigns in a Report", CampaignID = camp.idref,
ReportID = report.id_)
try:
stixGraph.merge(relCampaignReport)
except:
pass
if report.courses_of_action:
for i,coa in enumerate(report.courses_of_action):
ReportNode["ReportCOAID"+str(i)]= coa.idref
coaNode = stixGraph.find_one("COANode",property_key="COAID", property_value=coa.idref )
if coaNode:
relCOAReport= Relationship(coaNode, "COAReportLink", ReportNode,
Description="COA in a Report", COAID = coa.idref,
ReportID = report.id_)
try:
stixGraph.merge(relCOAReport)
except:
pass
if report.exploit_targets:
for i,targ in enumerate(report.exploit_targets):
ReportNode["ReportExploitTargetID"+str(i)]= targ.idref
targNode = stixGraph.find_one("ExploitTargetNode",property_key="ExploitTargetID", property_value=targ.idref)
if targNode:
relTargetReport= Relationship(targNode, "ExploitargetReportLink", ReportNode,
Description="Exploit Targets in a Report", ExploitTargetID = targ.idref,
ReportID = report.id_)
try:
stixGraph.merge(relTargetReport)
except:
pass
if report.incidents:
for i,inc in enumerate(report.incidents):
ReportNode["ReportIncident"+str(i)]= inc.idref
incNode = stixGraph.find_one("IncidentNode",property_key="IncidentID", property_value=inc.idref )
if incNode:
relIncidentReport= Relationship(incNode, "IncidentReportLink", ReportNode,
Description="Incidents in a Report", IncidentID = inc.idref,
ReportID = report.id_)
try:
stixGraph.merge(relIncidentReport)
except:
pass
if report.indicators:
for i,indi in enumerate(report.indicators):
ReportNode["ReportIndicator"+str(i)]= indi.idref
indiNode = stixGraph.find_one("IndicatorNode",property_key="ID", property_value=indi.idref )
if indiNode:
relIndicatorReport= Relationship(indiNode, "IndicatorReportLink", ReportNode,
Description="Indicators in a Report", IncidentID = indi.idref,
ReportID = report.id_)
try:
stixGraph.merge(relIndicatorReport)
except:
pass
if report.related_reports:
for i,rep in enumerate(report.related_reports):
ReportNode["ReportRelatedReports"+str(i)]= rep.idref
repNode = stixGraph.find_one("ReportNode",property_key="ReportID", property_value=rep.idref )
if repNode:
relReports= Relationship(repNode, "RelatedReportLink", ReportNode,
Description="Related Reports", RelatedReportID = rep.idref,
ReportID = report.id_)
try:
stixGraph.merge(relReports)
except:
pass
if report.threat_actors:
for i,actor in enumerate(report.threat_actors):
ReportNode["ReportActor"+str(i)]= actor.idref
actorNode = stixGraph.find_one("ThreatActorNode",property_key="ThreatActorID", property_value=actor.idref)
if actorNode:
relActorReport= Relationship(actorNode, "ThreatActorReportLink", ReportNode,
Description="Threat Actors in a Report", ThreatActorID = actor.idref,
ReportID = report.id_)
try:
stixGraph.merge(relActorReport)
except:
pass
if report.ttps:
for i,ttp in enumerate(report.ttps):
ReportNode["ReportTTP"+str(i)]= ttp.idref
indiNode = stixGraph.find_one("TTPNode",property_key="TTPID", property_value=ttp.idref )
if indiNode:
relIndicatorReport= Relationship(indiNode, "TTPReportLink", ReportNode,
Description="Indicators in a Report", TTPID = ttp.idref,
ReportID = report.id_)
try:
stixGraph.merge(relIndicatorReport)
except:
pass
def parse_COA(course_of_action):
#print "*****************************COA*******************************************************"
for coa in course_of_action:
COANode = Node("COANode", Desc= "CoursesOfAction", COAID=coa.id_)
COANode["COACost"]= str(coa.cost.value)
COANode["COAEfficacy"]= str(coa.efficacy.value)
COANode["COAImpact"]= str(coa.impact.value)
COANode["COAImpactDescription"]= str(coa.impact.description)
COANode["COAObjectiveDescription"]= str(coa.objective.description)
COANode["COAObjectiveApplicabilityConfidence"]= str(coa.objective.applicability_confidence.value)
for obs in coa.parameter_observables.observables:
COANode["COAObservableProperty"]= str(obs.object_.properties.address_value)
COANode["COAStage"]= str(coa.stage)
COANode["COAType"]= str(coa.type_)
COANode["COATitle"]= coa.title
stixGraph.merge(COANode)
def parse_exploit_target(exploit_targets):
#print "*****************************Exploit Target*******************************************************"
for target in exploit_targets:
ExploitTargetNode = Node("ExploitTargetNode", Title="Exploit Targets", ExploitTargetID=target.id_)
if target.description: ExploitTargetNode["ExploitTargetDescription"]= str(target.description)
if target.handling: ExploitTargetNode["ExploitTargetHandling"]= str(target.handling)
if target.information_source: ExploitTargetNode["ExploitTargetSource"]= str(target.information_source)
if target.potential_coas: ExploitTargetNode["ExploitTargetPotentialCOA"]= str(target.potential_coas)
if target.related_exploit_targets: ExploitTargetNode["RelatedExploitTargets"]= str(target.related_exploit_targets)
if target.related_packages: ExploitTargetNode["ExploitTargetRelatedPackages"]= str(target.related_packages)
if target.timestamp: ExploitTargetNode["ExploitTargetTimestamp"]= str(target.timestamp)
if target.title: ExploitTargetNode["ExploitTargetTitle"]= target.title
if target.vulnerabilities:
for i, vulnerable in enumerate(target.vulnerabilities):
ExploitTargetNode["ExploitTargetVulnerabilityCVE"+str(i)]= vulnerable.cve_id
if target.weaknesses:
for i,weak in enumerate(target.weaknesses):
ExploitTargetNode["ExploitTargetWeaknesses"+str(i)]= str(target.weaknesses)
stixGraph.merge(ExploitTargetNode)
def parse_campaigns(pkg):
#print "*****************************Campaigns*******************************************************"
for camp in pkg.campaigns:
'''
print("-------------------------------\n")
print"CampaignTitle" + str(camp.title)
print "CampaignID" +str(camp.id_)
print "Timestamp"+ str(camp.timestamp)
#Indicator to campaign relationship is broken currently: Available in versions before 1.1 (idref is support issue? )
for indicator in campaign.related_indicators:
print(" - Related To: " + indicators[indicator.item.idref].title)
'''
CampaignNode = Node("CampaignNode",CampaignTitle = camp.title ,CampaignID=camp.id_, Timestamp = str(camp.timestamp))
relatedTTP = []
relatedActors=[]
relatedIncidents =[]
if camp.attribution:
print "---"
for i,attrib in enumerate(camp.attribution):
if attrib[0].item.title: CampaignNode["AttributedActor"+str(i)] = attrib[0].item.title
if attrib[0].item.description: CampaignNode["AttributedActorDesc"+str(i)] = attrib[0].item.description
if attrib[0].item.id_:
CampaignNode["AttributedActorID"+str(i)]= attrib[0].item.id_
relatedActors.append(attrib[0].item.id_)
if attrib[0].item.timestamp: CampaignNode["AttributedActorTimestamp"+ str(i)]= str(attrib[0].item.timestamp)
if attrib[0].item.confidence:
CampaignNode["AttributedActorConfidence"+str(i)] = str(attrib[0].item.confidence.value.value)
if camp.related_incidents:
for i,rel in enumerate(camp.related_incidents):
#affected_assets, attributed_threat_actors, categories, coa_requested ,coa_taken,
# coordinators, discovery_methods,history,reporter, security_compromise
#intended_effects, leveraged_ttps, related_incodents, related_indicators,
# relatedobservables, related_packages, responders, victims
if rel.item:
CampaignNode["RelatedIncidentID"+str(i)] = str(rel.item.idref)
relatedIncidents.append(rel.item.idref)
if rel.item.description: CampaignNode["RelatedTTPDesc"+str(i)] = rel.item.description
if rel.relationship: CampaignNode["RelatedIncidentRelationship"+str(i)] = rel.relationship
if rel.information_source: CampaignNode["RelatedIncidentSource"+str(i)] = rel.information_source
if rel.confidence: CampaignNode["RelatedIncidentConfidence"+str(i)] = rel.confidence
if camp.related_ttps:
for i,tactic in enumerate(camp.related_ttps):
if tactic.relationship:
CampaignNode["CampaignTTPRelationship"+str(i)] = str(tactic.relationship)
#print "CampaignTTPRelationship : "+ str(tactic.relationship)
if tactic.item:
if tactic.item.idref: CampaignNode["RelatedTTPsID_"+str(i)] = str(tactic.item.idref)
relatedTTP.append(tactic.item.idref)
#find TTPNode with idref = tactic.item.idref
# = stixGraph.find_one("TTPNode",property_key="TTPID", property_value=tactic.item.idref)
ttp = pkg.find(tactic.item.idref)
if ttp:
CampaignNode["RelatedTTPTitle_"+str(i)] = str(ttp.title)
#print("RelatedTTP: " + str(ttp.title))
if ttp.victim_targeting.targeted_information:
for j,target in enumerate(ttp.victim_targeting.targeted_information):
#print("\tTarget: " + str(target))
CampaignNode["RelatedTTPVictim_"+str(i)+"_"+str(j)] = str(target)
stixGraph.merge(CampaignNode)
if relatedActors:
for ac in relatedActors:
actorNode = stixGraph.find_one("ThreatActorNode", property_key="ThreatActorID",
property_value=ac)
try:
relActorCampaign= Relationship(CampaignNode, "ThreatActorCampaignLink", actorNode,
Description="Campaign Actor Attribution", ActorID = ac,
CampaignID= camp.id_)
stixGraph.merge(relActorCampaign)
except AttributeError, ValueError:
pass
if relatedTTP:
for ttp in relatedTTP:
ttpNode = stixGraph.find_one("TTPNode", property_key="TTPID",
property_value=ttp)
try:
relTTPCampaign= Relationship(CampaignNode, "TTPCampaignLink", ttpNode,
Description="Campaign TTP Attribution", TTPID = ttp,
CampaignID= camp.id_)
stixGraph.merge(relTTPCampaign)
except AttributeError, ValueError:
pass
if relatedIncidents:
for inc in relatedIncidents:
incNode = stixGraph.find_one("IncidentNode", property_key="IncidentID",
property_value= inc)
try:
relIncCampaign= Relationship(CampaignNode, "IncidentCampaignLink", incNode,
Description="Campaign Incident Attribution", IncidentID = inc,
CampaignID= camp.id_)
stixGraph.merge(relIncCampaign)
except AttributeError, ValueError:
pass
def parse_incidents(incidents, STIXFileID):
#print "*****************************Incidents*******************************************************"
for inc in incidents:
leveragedTTPs=[]
relatedObs=[]
# attributed_threat_actors, handling, history, impact_assessment, reporter, security_compromise, status, version,
# categories, coa_requested, coa_taken,coordinators, discovery_methods, external_ids, intended_effects, leveraged_ttps ,
# related_incidents, related_indicators, related_observables, related_packages, responders, victims
IncidentNode = Node("IncidentNode",IncidentID=inc.id_, Timestamp = str(inc.timestamp))
stixGraph.run("CREATE CONSTRAINT ON (n:IncidentNode) ASSERT n.IncidentID IS UNIQUE")
if inc.title: IncidentNode["IncidentTitle"] = inc.title
if inc.reporter: IncidentNode["IncidentReporter" ]= inc.reporter.identity.name
if inc.description: IncidentNode["IncidentDesc"]= str(inc.description)
if inc.confidence: IncidentNode["IncidentConfidence"]= str(inc.confidence.value)
if inc.time:
IncidentNode["IncidentInitialCompromise"]= str(inc.time.initial_compromise.value)
IncidentNode["IncidentDiscovery"]=str(inc.time.incident_discovery.value)
IncidentNode["IncidentRestoration"]= str(inc.time.restoration_achieved.value)
IncidentNode["IncidentReported"] = str(inc.time.incident_reported.value)
if inc.impact_assessment:
for i, impact in enumerate(inc.impact_assessment.effects):
IncidentNode["IncidentImpact"+ str(i)] = str(impact)
if inc.victims:
for i,victim in enumerate(inc.victims):
IncidentNode["IncidentVictim"+str(i)]= str(victim.name)
if inc.leveraged_ttps:
for i,relation in enumerate(inc.leveraged_ttps):
IncidentNode["IncidentRelatedTTP"+str(i)]= str(relation.relationship)
IncidentNode["IncidentRelatedTTPID"+str(i)]= str(relation.item.idref)
leveragedTTPs.append(relation.item.idref)
#Similar code for related_packages, related_indicators, related_incidents
if inc.related_observables:
for i,obs in enumerate(inc.related_observables):
IncidentNode["IncidentObservableID"+str(i)]=obs.item.id_
IncidentNode["IncidentObservableRelation"+str(i)]= str(obs.relationship)
IncidentNode["IncidentObservableFileName"+str(i)]= str(obs.item.object_.properties.file_name)
IncidentNode["IncidentObservableFilesize"+str(i)]= str(obs.item.object_.properties.size_in_bytes)
IncidentNode["IncidentObservableSHA256Digest"+str(i)]= str(obs.item.object_.properties.hashes[0].simple_hash_value)
if inc.affected_assets:
for i,asset in enumerate(inc.affected_assets):
if asset.description: IncidentNode["IncidentAffectedAssetsDesc"+str(i)]= str(asset.description)
if asset.type_: IncidentNode["IncidentAffectedAssetsType"+str(i)]= str(asset.type_)
if asset.type_.count_affected: IncidentNode["IncidentAffectedAssetsCount"+str(i)]= str(asset.type_.count_affected)
if asset.business_function_or_role: IncidentNode["IncidentAffectedAssetsRole"+str(i)]= str(asset.business_function_or_role)
if asset.ownership_class: IncidentNode["IncidentAffectedAssetsOwner"+str(i)]= str(asset.ownership_class)
if asset.management_class:IncidentNode["IncidentAffectedAssetsManager"+str(i)]= str(asset.management_class)
if asset.location_class: IncidentNode["IncidentAffectedAssetsLocation"+str(i)]= str(asset.location_class)
if asset.nature_of_security_effect:
for i,effect in enumerate(asset.nature_of_security_effect):
if effect.property_: IncidentNode["IncidentSecurityEffectProperty"+str(i)]= str(effect.property_)
if effect.description_of_effect: IncidentNode["IncidentSecurityEffectDesc"+str(i)]= str(effect.description_of_effect)
if effect.non_public_data_compromised:
IncidentNode["IncidentSecurityEffectCompromised"+str(i)]= str(effect.non_public_data_compromised)
if effect.non_public_data_compromised.data_encrypted:
IncidentNode["IncidentSecurityEffectCompromisedEncrypted"+str(i)]= str(effect.non_public_data_compromised.data_encrypted)
stixGraph.merge(IncidentNode)
for lev in leveragedTTPs:
ttpNode = stixGraph.find_one("TTPNode", property_key="TTPID", property_value=lev)
if ttpNode:
relTTPInc = Relationship(ttpNode, "TTPIncidentLink", IncidentNode,
IncidentID = inc.id_, TTPID = lev)
stixGraph.merge(relTTPInc)
def parse_threat_actors(pkg):
#print "*****************************Threat Actors*******************************************************"
for actor in pkg.threat_actors:
observedTTPs=[]
ThreatActorNode = Node("ThreatActorNode",Title = "Contains Threat Actors related to TTP", ThreatActorID=actor.id_, Timestamp = str(actor.timestamp))
stixGraph.run("CREATE CONSTRAINT ON (n:ThreatActorNode) ASSERT n.ThreatActorID IS UNIQUE")
if actor.title: ThreatActorNode["ActorTitle"] = actor.title
if actor.description: ThreatActorNode["ActorDescription"]= str(actor.description)
if actor.confidence: ThreatActorNode["ActorConfidence"]= str(actor.confidence.value.value)
if actor.identity: ThreatActorNode["ThreatActorName"]= str(actor.identity.name)
# associate_campaigns, associated_actors, planning_and_operational_supports, types...
if actor.motivations:
for i,motivate in enumerate(actor.motivations):
if motivate.value: ThreatActorNode["Motivation"+ str(i)]= motivate.value.value
if motivate.confidence: ThreatActorNode["MotivationConfidence"+str(i)]= motivate.confidence
if motivate.description: ThreatActorNode["MotivationDescription"+str(i)]= motivate.description
if motivate.source: ThreatActorNode["MotivationSource"+str(i)] = motivate.source
if motivate.timestamp: ThreatActorNode["MotivationTimestamp"+str(i)] = str(motivate.timestamp)
if actor.intended_effects:
for i,intend in enumerate(actor.intended_effects):
if intend.value: ThreatActorNode["IntendedEffect"+str(i)]= intend.value.value
if intend.confidence: ThreatActorNode["IntendedEffectConfidence"+str(i)]= intend.confidence
if intend.description: ThreatActorNode["IntendedEffectDescription"+str(i)]= intend.description
if intend.source: ThreatActorNode["IntendedEffectSource"+str(i)]= intend.source
if intend.timestamp: ThreatActorNode["IntendedEffectTimestamp"+str(i)]= str(intend.timestamp)
if actor.sophistications:
for i,sophisticate in enumerate(actor.sophistications):
if sophisticate.value: ThreatActorNode["Sophistication"+str(i)]= sophisticate.value.value
if sophisticate.confidence: ThreatActorNode["SophisticationConfidence"+str(i)]= sophisticate.confidence
if sophisticate.description: ThreatActorNode["SophisticationDescription"+str(i)]= sophisticate.description
if sophisticate.source: ThreatActorNode["SophisticationSource"+str(i)]= sophisticate.source
if sophisticate.timestamp: ThreatActorNode["SophisticationTimestamp"+str(i)]= str(sophisticate.timestamp)
if actor.observed_ttps:
for i,ttp in enumerate(actor.observed_ttps):
observedTTPs.append(ttp.item.idref)
ThreatActorNode["ObservedTTP_ID"+str(i)]= ttp.item.idref
if ttp.relationship: ThreatActorNode["ObservedTTPRelationship"+str(i)]= str(ttp.relationship)
if ttp.information_source: ThreatActorNode["ObservedTTPSource"+str(i)]= ttp.information_source
if ttp.confidence: ThreatActorNode["ObservedTTPConfidence"+str(i)]= ttp.confidence
#Link Observable TTP
#print "RelatedTTP: " + str(pkg.find(ttp.item.idref).title)
stixGraph.merge(ThreatActorNode)
for ob in observedTTPs:
ttpNode = stixGraph.find_one("TTPNode", property_key="TTPID", property_value=ob)
if ttpNode:
relTTPActor = Relationship(ttpNode, "TTPActorLink", ThreatActorNode,
ThreatActorID = actor.id_, TTPID = ttp.item.idref)
stixGraph.merge(relTTPActor)
def parse_data(pkg):
kill_chains = {}
kill_chain_phases = {}
if pkg.stix_header:
parse_header(pkg.stix_header, pkg._id)
if pkg.exploit_targets:
parse_exploit_target(pkg.exploit_targets)
#if pkg.related_packages:
# logging.info('Related Packages to be handled separately..')
if pkg.observables:
parse_observables(pkg.observables.observables, pkg._id, None, None)
if pkg.indicators:
parse_indicators(pkg.indicators, kill_chains, kill_chain_phases, pkg._id)
if pkg.incidents:
parse_incidents(pkg.incidents, pkg._id)
if pkg.courses_of_action:
parse_COA(pkg.courses_of_action)
if pkg.ttps:
parse_ttps(pkg.ttps, kill_chains, kill_chain_phases, pkg._id)
if pkg.threat_actors:
parse_threat_actors(pkg)
if pkg.campaigns:
parse_campaigns(pkg)
if pkg.reports:
parse_reports(pkg.reports)
| {
"repo_name": "arangaraju/graph-stix",
"path": "graph_stix/graph_sticks.py",
"copies": "1",
"size": "62613",
"license": "mit",
"hash": 8544116483894264000,
"line_mean": 53.97190518,
"line_max": 240,
"alpha_frac": 0.607557536,
"autogenerated": false,
"ratio": 4.045290089158806,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.011437713643879674,
"num_lines": 1139
} |
__author__ = 'aprocysanae'
#coding: utf-8
import re
from abc import ABCMeta, abstractmethod
class Token(object):
EOL = '\n'
def __init__(self, line_number):
self.current_line = line_number
def is_identifier(self):
return False
def is_number(self):
return False
def is_string(self):
return False
def get_current_line(self):
return self.current_line
def get_number(self):
raise Exception("Not a number token")
def get_text(self):
raise Exception("Not a string token")
class NumberToken(Token):
def __init__(self, line, number):
if not '.' in number:
self.value = int(number)
else:
self.value = float(number)
super(NumberToken, self).__init__(line)
def is_number(self):
return True
def get_number(self):
return self.value
def get_text(self):
return str(self.value)
def __str__(self):
return "Number:{}".format(self.value)
def __repr__(self):
return "{}".format(self.value)
class StringToken(Token):
def __init__(self, line, text):
self.value = text
super(StringToken, self).__init__(line)
def is_string(self):
return True
def get_text(self):
return self.value
def __str__(self):
return "String:{}".format(self.value)
def __repr__(self):
return "{}".format(self.value)
class IdToken(Token):
def __init__(self, line, id_):
self.value = id_
super(IdToken, self).__init__(line)
def is_identifier(self):
return True
def get_text(self):
return self.value
def __str__(self):
return "Id:{}".format(self.value)
def __repr__(self):
return "{}".format(self.value)
| {
"repo_name": "SeavantUUz/Shyaru",
"path": "archive/old/tokens.py",
"copies": "1",
"size": "1812",
"license": "mit",
"hash": -3431961005126009300,
"line_mean": 19.8275862069,
"line_max": 47,
"alpha_frac": 0.5656732892,
"autogenerated": false,
"ratio": 3.775,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48406732892,
"avg_score": null,
"num_lines": null
} |
__author__ = 'aram'
from userLogin import *
from dataBase import *
from operations import *
sys.path.insert(0, '../graphics')
from graphicController import *
import os.path, random
#This class will be the one that controls
#the inputs and outputs for the user interface.
print("Hello Worker! Welcome to ContablePlus!")
currentUser = UserLogin(None)
currentUser.enterLogin()
if(currentUser.registered):
print("What do you want to do? ")
#if it have ibans select an account
ibanList = currentUser.getIbanList()
if ibanList:
#list ibans
print("List of available accounts: ")
for iban in ibanList:
print(iban)
#select one iban, that we will be working on.
currentIbanOption =input("Select one IBAN: ")
currentIban = ibanList[int(currentIbanOption)-1]
#if currentIban is correct, should display options to do:
print("1: Show Graphics from this account ")
#Show graphics
print("2: Add or Withdraw money ")
#Add or withdraw money
print("3: Add another Owner ")
#Add another owner(?)
print("4: Transfer money")
option = input("Select an option: ")
if(option == "1"):
myTest = GraphicController()
myTest.showGrapicWithDNI(currentIban)
if(option == "2"):
money_input=input("How much money will you put? ")
curr_input=input("And what currency is it? ")
new_money=[float(money_input),curr_input]
currentOwner = currentUser.getOwner()
currentDni = currentOwner.getDni()
operations= Operations(currentIban, new_money, currentDni)
operations.addMoney()
if(option == "3"):
newOwner = input("Insert thw new owners' DNI")
datab = DataBase()
datab.addOwnerAccount(currentIban,newOwner)
if(option == "4"):
money_input=input("Money to transfer? ")
curr_input=input("Currency? ")
getter_money=[float(money_input),curr_input]
currentOwner = currentUser.getOwner()
currentDni = currentOwner.getDni()
getter_iban = input("IBAN of other account ")
new_money= [-float(money_input), curr_input]
operations_transfer1= Operations(currentIban, new_money, currentDni)
operations_transfer1.addMoney()
operations_transfer2= Operations(getter_iban, getter_money, currentDni)
operations_transfer2.addMoney()
else:
print("Would you like to create one? ")
createOne = input("Y/N: ")
if createOne == "Y":
print("Lets create an account. ")
registerAcc = DataBase()
currency = "EUR"
amount = input("Insert your diposit: ")
owner = currentUser.getOwner()
dni = owner.getDni()
print(dni)
#this creates a new account.
registerAcc.afegeixCompta(amount, currency, dni)
else:
pass | {
"repo_name": "aramusss/contableplus",
"path": "controller/userInterface.py",
"copies": "1",
"size": "3036",
"license": "apache-2.0",
"hash": 1463559889084886800,
"line_mean": 36.0365853659,
"line_max": 83,
"alpha_frac": 0.6040843215,
"autogenerated": false,
"ratio": 3.927554980595084,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9983233990200255,
"avg_score": 0.00968106237896554,
"num_lines": 82
} |
__author__ = 'aram'
import turtle
import datetime
#DNI;IBAN;DATE(24/02/2014);+/-IMPORT
class GraphicController:
def showGrapicWithDNI(self, inputIban):
filePath = "../database/log.txt"
actualMoney = 0
maxMoney = 0
minMoney = 0
startDate = datetime.datetime(2014, 1, 1)
maxDate = startDate
with open(filePath, 'r') as logDB:
for line in logDB:
dni, iban, date, money = line.split(";")
day = int(date[0:2])
month = int(date[3:5])
year = int(date[6:10])
dateTime = datetime.datetime(year, month, day)
if(iban == inputIban):
if float(money) > maxMoney:
maxMoney = float(money) + maxMoney
if dateTime > maxDate:
maxDate = dateTime
if float(money) < minMoney:
minMoney = float(money)
dateDiff = maxDate - startDate
totalDays = dateDiff.days
t = turtle.Turtle()
screen = t.getscreen()
screen.setworldcoordinates(0, minMoney, totalDays, maxMoney)
t.goto(minMoney, 0)
screen.tracer(100)
t.color("#7D7EC0")
t.fillcolor("#7D7EC0")
t.begin_fill()
file = open("../database/log.txt", "r")
for line in file:
dni, iban, date, money = line.split(";")
actualMoney = actualMoney + float(money)
day = int(date[0:2])
month = int(date[3:5])
year = int(date[6:10])
if iban == inputIban:
dateTime = datetime.datetime(year, month, day)
dateDiff = dateTime - startDate
t.goto(dateDiff.days, actualMoney)
t.goto(totalDays, 0)
t.goto(minMoney, 0)
t.end_fill()
screen.update()
screen.exitonclick() | {
"repo_name": "aramusss/contableplus",
"path": "graphics/graphicController.py",
"copies": "1",
"size": "1990",
"license": "apache-2.0",
"hash": -1486351289620722400,
"line_mean": 25.9054054054,
"line_max": 68,
"alpha_frac": 0.4924623116,
"autogenerated": false,
"ratio": 3.8640776699029127,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48565399815029126,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Arana Fireheart'
lowestLow = 200
lowestLowYear = 0
highestHigh = -200
highestHighYear = 0
try:
with open('testfile1.txt', 'r') as testFileHandle:
header = testFileHandle.readline()
for line in testFileHandle.readlines():
temperatures = line.strip().split('\t')
recordLow = temperatures[3]
recordLowTemp, recordLowYear = recordLow.split()
recordLowTemp = int(recordLowTemp[:-1])
recordLowYear = int(recordLowYear[1:-1])
if recordLowTemp < lowestLow:
lowestLow = recordLowTemp
lowestLowYear = recordLowYear
recordHigh = temperatures[4]
recordHighTemp, recordHighYear = recordHigh.split()
recordHighTemp = int(recordHighTemp[:-1])
recordHighYear = int(recordHighYear[1:-1])
if recordHighTemp > highestHigh:
highestHigh = recordHighTemp
highestHighYear = recordHighYear
print("Lowest Temp: {0} Year: {1} Highest Temp: {2} Year: {3}".format(lowestLow, lowestLowYear, highestHigh, highestHighYear))
except FileNotFoundError:
print("ERROR: Filename incorrect") | {
"repo_name": "NewburyCS/StudentExamples",
"path": "fileReadWrite1.py",
"copies": "1",
"size": "1196",
"license": "mit",
"hash": 421741106269951170,
"line_mean": 41.75,
"line_max": 134,
"alpha_frac": 0.6287625418,
"autogenerated": false,
"ratio": 4,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.51287625418,
"avg_score": null,
"num_lines": null
} |
__author__ = 'arana'
from datetime import datetime
class Person(object):
def __init__(self, startingName):
self.name = startingName
self.dateOfBirth = datetime.date(datetime.now())
self.age = datetime.date(datetime.now()) - self.dateOfBirth
self.height = 0
self.weight = 0
self.eyeColor = None
self.phrases = []
self.gender = None
def __str__(self):
return "Name: {0} Height: {1} Weight: {2} Age: {3} Number of past phrases: {4}".format(self.name, self.height, self.weight, self.age, len(self.phrases))
def speak(self, phraseToSpeak):
self.phrases.append(phraseToSpeak)
print(phraseToSpeak)
def pastPhrases(self, numberOfSteps = 1):
if numberOfSteps > 0:
return self.phrases[-numberOfSteps:]
else:
raise ValueError
def setName(self, newName):
self.name = newName
def getName(self):
return self.name
def setDateOfBirth(self, newDate):
self.dateOfBirth = newDate
def getDateOfBirth(self):
return self.dateOfBirth
def getAge(self):
return self.age
def setHeight(self, newHeight):
self.height = newHeight
def getHeight(self):
return self.height
def setWeight(self, newWeight):
self.weight = newWeight
def getWeight(self):
return self.weight
def setEyeColor(self, newEyeColor):
self.eyeColor = newEyeColor
def getEyeColor(self):
return self.eyeColor
def getGender(self):
return self.gender
class Man(Person):
def __init__(self, startingName):
super().__init__(startingName)
self.gender = "Male"
class Woman(Person):
def __init__(self, startingName):
super().__init__(startingName)
self.gender = "Female"
person1 = Person("Mindy")
person1.setHeight(123)
person1.setWeight(133)
person1.speak("Hello")
person1.speak("Good Bye")
person1.speak("What?")
person1.speak("Nothing!")
person1.speak("Why?")
print(person1)
print("---------")
print(person1.pastPhrases())
print("---------")
print(person1.pastPhrases(4))
print("---------")
try:
print(person1.pastPhrases(-10))
except ValueError:
print("Usage error: numberOfSteps must be positive.")
print(person1.getGender())
man1 = Man("George")
man1.setHeight(345)
man1.setWeight(220)
print(man1.getGender())
woman1 = Woman("Martha")
woman1.setHeight(345)
woman1.setWeight(220)
print(woman1.getGender())
| {
"repo_name": "NewburyCS/StudentExamples",
"path": "Person.py",
"copies": "1",
"size": "2500",
"license": "mit",
"hash": 5124461137187543000,
"line_mean": 22.8095238095,
"line_max": 161,
"alpha_frac": 0.6352,
"autogenerated": false,
"ratio": 3.4059945504087192,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4541194550408719,
"avg_score": null,
"num_lines": null
} |
__author__ = 'arash'
import copy
import logging
import asyncio
import ssl
import pickle
from message_types import ackknowledgment
from message_types import measurement_msg
from message_types import requests
_logger = logging.getLogger(__name__)
def create_ssl_context(certfile, keyfile, root_pem):
"""
Creates and returns a ssl.SSLContext
certfile and root_pem must be in PEM format
:param certfile: path to the certification file
:param keyfile: path to the key
:param root_pem: path to the root certification file
:return: ssl.SSLContext
"""
sslcontext = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH)
sslcontext.load_cert_chain(certfile=certfile, keyfile=keyfile)
sslcontext.load_verify_locations(root_pem)
sslcontext.verify_mode = ssl.CERT_REQUIRED
return sslcontext
class Server():
"""
a class for communication through ssl on NonBlocking IO
receives measurement data from clients and pushes them into
shared queues to be consumed by other classes
"""
def __init__(self, sslcontext, storage_queue, carbon_queue, expireed_certs, host="localhost", port=1234):
self._host = host
self._port = port
self._sslcontext = sslcontext
self._server = None
self._data = None
# asyncio loop
self._loop = None
self._storage_queue = storage_queue
self._carbon_queue = carbon_queue
# a list containing serial numbers of expired certificates
self._expired_certs = expireed_certs
# parameters for checking the messages
# which have not been received
self._not_received_list = []
self._last_received = 0
@asyncio.coroutine
def client_connected(self, reader, writer):
""" handling client connections
:param reader:
:param writer:
:return:
"""
# check if the certificate of the client has been expired!
certDict = writer.get_extra_info("peercert")
if certDict['serialNumber'] in self._expired_certs:
_logger.warn("#warn:connection-from-expired-client:%s" % certDict['serialNumber'])
writer.write('Sorry! Your certificate has been expired!'.encode())
writer.close()
return
# handling messages from certified clients
_logger.info("#info:connection-stablished#peercert:%s" % (writer.get_extra_info("socket").getpeercert()))
while True:
try:
# wait to receive a message from the client
rec = yield from reader.read(1000)
if not rec:
break
# analyze the message and send ack
data = yield from self.analyze_msg(rec, writer)
# the message received is useful
# push it into queues
if data:
self.push_into_queues(data)
except KeyboardInterrupt:
return
except Exception as e:
_logger.error("#error:error-while-handling-msg:%s" % rec)
_logger.debug("#debug:size-of-msg:%s" % len(rec))
_logger.exception(e)
continue
@asyncio.coroutine
def analyze_msg(self, byte_msg, writer):
"""
Analyzes a message received from the client to see
if its a new message, a wanted message, or already seen message
sends an acknowledgment if necessary
:param byte_msg: (bytes)
:param writer:
:return: (list) the content of the message, which is a list
"""
try:
msg = pickle.loads(byte_msg)
# msg must be subclass of GeneralMessage
if msg.get_type() == 'measurement':
return self.handle_measurement_msg(msg, writer)
elif msg.get_type() == 'request':
return self.handle_request(msg)
else:
_logger.warn("#warn:unexpected-message-type%s" % msg.get_type())
except pickle.UnpicklingError as e:
_logger.error("#error:while-unpickling-msg-size:%s" % len(byte_msg))
_logger.debug("#debug:problematic-msg:%s" % str(byte_msg))
_logger.exception(e)
return None
except KeyError as e:
_logger.error("#error:corrupted-msg-%s" % msg)
except AttributeError as e:
_logger.error("#error:message-is-corrupted-%s" % msg)
def handle_measurement_msg(self, msg, writer):
try:
msg_id = msg.get_id()
# check if we miss some messages
# since the last msg received
if msg_id > self._last_received:
for i in range(self._last_received+1, msg_id):
self._not_received_list.append(i)
_logger.debug("#debug:not-received_list-updated:%s" % self._not_received_list)
# updating last_received msg id
self._last_received = msg_id
# sending acknowledgment
yield from self.send_ack(msg_id, writer)
return msg.get_data()
# a requested message arrived! remove it
# from the wanted list
elif msg_id < self._last_received:
if msg_id in self._not_received_list:
# The client send an empty data with msg_id
# which means the client does not have that
# msg anymore
if not msg.get_data():
_logger.warn("#warn:msg:%s-is-completely-lost!-client-sent-None" % msg_id)
# removing from wanted list
self._not_received_list.remove(msg_id)
return None
else:
self._not_received_list.remove(msg_id)
_logger.debug("#debug:Finally!-received-msg-with-id: %s" % msg_id)
# sending acknowledgment
yield from self.send_ack(msg_id, writer)
return msg.get_data()
else:
_logger.debug("#debig:msg_id:%s:-is-<-last_received-but-not-in-not_received-list" % msg_id)
# requesting to get msg counter of the client
yield from self.send_request(writer)
except KeyError as e:
_logger.warn("#warn:corrupted-message!")
_logger.exception(e)
return None
def handle_request(self, msg):
if msg.get_request() == 'GET_MSG_COUNTER':
self._last_received = msg.get_response() - 1
else:
_logger.debug("#debug:unknown-request-%s: " % msg.get_request())
@asyncio.coroutine
def send_ack(self, msg_id, writer):
"""
sends an acknowledgment to the client
:param msg_id: (int) the message that has been successfully received
:param writer:
"""
# check if there is a message which server missed
wanted = None
if len(self._not_received_list) > 0:
wanted = self._not_received_list[0]
# creating and sending the acknowledgment message
ack = ackknowledgment.Acknowledgment(msg_id, wanted)
_logger.debug("#debug:sending-ack-%s" % ack)
byte_ack = pickle.dumps(ack)
writer.write(byte_ack)
yield from writer.drain()
@asyncio.coroutine
def send_request(self, writer, request='GET_MSG_COUNTER'):
req = requests.Request(request=request, data=None)
_logger.debug("#debug:sending-request-%s" % req)
byte_req = pickle.dumps(req)
writer.write(byte_req)
yield from writer.drain()
def push_into_queues(self, data):
"""
pushes the items inside the data into shared queues
:param data: list of dictionaries (measurements)
"""
for item in data:
self._storage_queue.put(copy.copy(item))
self._carbon_queue.put(copy.copy(item))
def run(self):
_logger.info('#info:starting-the-server-on-port-%s' % self._port)
self._loop = asyncio.get_event_loop()
coro = asyncio.start_server(self.client_connected, self._host, self._port, ssl=self._sslcontext)
self._server = self._loop.run_until_complete(coro)
try:
self._loop.run_forever()
except KeyboardInterrupt:
_logger.error("#error:server-stopped-with-keyboardInterrupt")
pass
except Exception as e:
_logger.error("#error:unpredicted-exception-in-server")
_logger.exception(e)
finally:
self.disconnect()
def disconnect(self):
_logger.info("#info:disconnecting-the-server")
self._server.close()
self._loop.run_until_complete(self._server.wait_closed())
self._loop.close()
| {
"repo_name": "i13tum/i13monserver",
"path": "server.py",
"copies": "2",
"size": "8954",
"license": "mit",
"hash": -6250001729452612000,
"line_mean": 35.2510121457,
"line_max": 113,
"alpha_frac": 0.5771722135,
"autogenerated": false,
"ratio": 4.241591662719091,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00041760822089680674,
"num_lines": 247
} |
from selenium import webdriver
import os
import getpass
import json
from selenium.common.exceptions import WebDriverException, NoSuchElementException
home_path = os.getenv('HOME')
base_path = home_path + os.sep + '.nfw'
try:
input = raw_input
except NameError:
pass
if not os.path.exists(base_path):
os.mkdir(base_path)
out_file_path = base_path + os.sep + 'cred.json'
# Chrome driver
chrome_driver_path = 'https://sites.google.com/a/chromium.org/chromedriver/downloads'
def write_credentials(out_file_path, data):
"""
Method to write credentials to data file
"""
json.dump(data, open(out_file_path, 'w'))
def read_credentials(out_file_path):
"""
Method to read credentials from data file.
If data file doesn't exist, gets info & creates it.
"""
cred = {}
browser_attr = ['Chrome', 'Firefox']
if not os.path.exists(out_file_path):
print("============== NFW-IITM credentials [ LDAP ] ==============")
cred['username'] = input('LDAP Username: ')
cred['password'] = getpass.getpass('LDAP Password: ')
while True:
c = int(input(
'Preferred browser [' + ''.join((str(i + 1) + '-' + b + ', ') for i, b in enumerate(browser_attr))[
:-2] + ']: '))
if c in [1, 2]:
cred['browser'] = {}
cred['browser']['name'] = browser_attr[c - 1]
if c == 1: # Chrome
while True:
try:
# Checks if /path/to/chromedriver exists in credentials
driver_path = cred['browser'].get('driverPath', base_path + os.sep + 'chromedriver')
webdriver.Chrome(driver_path)
cred['browser']['driverPath'] = base_path + os.sep + 'chromedriver'
break
except WebDriverException:
# Makes sure user downloads chromedriver & puts in appropriate location
print('Chrome driver needs to be installed. It can be installed from here: {}.'.format(
chrome_driver_path))
print('NOTE: Chrome version must be >= 51.0.2704.0')
input('Place it in {} & continue..'.format(base_path))
cred['browser']['driverPath'] = base_path + os.sep + 'chromedriver'
break
else:
print('Incorrect choice. Try again')
write_credentials(out_file_path, cred)
else:
cred = json.load(open(out_file_path, 'r'))
return cred
def auth(driver, cred):
"""
Method for automating login procedure
"""
try:
ele_un = driver.find_element_by_xpath("//input[@id='ft_un']")
ele_un.send_keys(cred['username'])
ele_pd = driver.find_element_by_xpath("//input[@id='ft_pd']")
ele_pd.send_keys(cred['password'])
driver.find_element_by_xpath("//input[@type='submit']").click()
except NoSuchElementException:
print('Already active or No internet connection')
def main():
"""
The expected 'main()' function :)
"""
while True:
cred = read_credentials(out_file_path)
try:
driver = webdriver.__getattribute__(cred['browser']['name'])(cred['browser'].get('driverPath', ''))
url = 'https://67.media.tumblr.com/tumblr_lmfix57faG1qhq4cpo1_400.gif'
driver.get(url)
auth(driver, cred)
break
except WebDriverException:
# Makes sure user downloads chromedriver & puts in appropriate location
print('Chrome driver needs to be installed. It can be installed from here: {}.'.format(
chrome_driver_path))
print('NOTE: Chrome version must be >= 51.0.2704.0')
input('Place it in {} & continue..'.format(base_path))
if __name__ == '__main__':
main()
| {
"repo_name": "Aravind-Suresh/nfw-access-iitm",
"path": "main.py",
"copies": "1",
"size": "4181",
"license": "mit",
"hash": 3599964512201232000,
"line_mean": 34.735042735,
"line_max": 115,
"alpha_frac": 0.5462807941,
"autogenerated": false,
"ratio": 4.012476007677543,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001366901358003208,
"num_lines": 117
} |
"""
Monte Carlo estimation of value functions for BlackJack for a given policy
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import axes3d
import sys
CARDS_LIST = ['A'] + map(str, range(2, 11)) + map(str, [10]*3)
ACTIONS = ['H', 'S']
tr = []
init = True
MAX_ITER = 10000*50
iter = 0
G = {}
V = {}
CARD_INT = {}
for i, v in enumerate(CARDS_LIST[:10]):
CARD_INT[v] = i + 1
class Params:
pass
class State(Params):
def __str__(self):
return ' '.join(map(str, [self.sum, self.d_card, self.usable]))
def __hash__(self):
return (self.sum, CARD_INT[self.d_card], int(self.usable))
def val(cards):
c = sorted(cards)
s = 0
l = len(c)
for i in range(l):
cc = c[i]
if cc == 'A':
break
else:
s += int(cc)
u = False
if (s+11) > 21:
u = False
else:
u = True
if u:
s += (11 + (l-1-i))
else:
s += (l-i)
return s, u
def deal_card():
# Infinite deck
idx = np.random.randint(len(CARDS_LIST))
return CARDS_LIST[idx]
def get_state(p_cards, d_cards):
s = State()
s_p, u_p = val(p_cards)
s.usable = u_p
s.sum = s_p
s.d_card = d_cards[0] # d_cards[0] - dealer's visible card
return s
def get_action(s):
# Some deterministic policy
if s.sum <= 19:
return ACTIONS[0]
else:
return ACTIONS[1]
def submit_action(s, a):
# Feedback from the environment
# Dealer follows a deterministic policy
if a == 'H':
p_cards.append(deal_card())
s_p = val(p_cards)
if s_p > 21:
return -1
s_d = val(d_cards)
if s_d > 21:
# Dealer busted; Player wins
return 1
elif s_d >= 17:
# Dealer sticks
return np.sign(s_p - s_d)
else:
# Dealer deals. New card is hidden
d_cards.append(deal_card())
return None
else:
# Player sticks
s_p, _ = val(p_cards)
s_d, _ = val(d_cards)
return np.sign(s_p - s_d)
def push_tr(tr, g):
# tr - trajectory
# g - return ( = terminal reward in this case )
# print 'Updated'
for (s, a) in tr:
ss = s.__hash__()
if not G.has_key(ss):
G[ss] = []
G[ss].append(g)
def compute_values():
for k, v in G.iteritems():
V[k] = np.mean(v)
def plot_values(V, u):
X, Y = np.meshgrid(range(1, 11), range(12, 22))
arr = np.zeros((10, 10))
for k, v in V.iteritems():
if k[2] == u:
arr[k[0]-12, k[1]-1] = v
Z = np.reshape(arr, (10, 10))
fig = plt.figure('Usable ace = ' + str(u))
ax = axes3d.Axes3D(fig)
surf = ax.plot_surface(X, Y, Z, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
m = MAX_ITER/100
print('Computing..')
while True:
if init:
p_cards = [ deal_card() for i in range(2) ]
d_cards = [ deal_card() for i in range(2) ]
init = False
s = get_state(p_cards, d_cards)
a = get_action(s)
r = submit_action(s, a)
if not iter % m:
print('Progress: ' + str(iter*100./MAX_ITER) + '%')
# print i, s, a, r
tr.append((s, a))
if r is not None:
push_tr(tr, r)
iter = iter + 1
tr = []
init = True
if iter >= MAX_ITER:
break
compute_values()
print('Done')
print('Plotting..')
plot_values(V, 0)
plot_values(V, 1)
plt.show()
| {
"repo_name": "Aravind-Suresh/rl-trials",
"path": "chapter-5/example-5.1/main.py",
"copies": "1",
"size": "3656",
"license": "mit",
"hash": 760592212909890400,
"line_mean": 21.1575757576,
"line_max": 74,
"alpha_frac": 0.5183260394,
"autogenerated": false,
"ratio": 2.8742138364779874,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3892539875877987,
"avg_score": null,
"num_lines": null
} |
"""
Monte Carlo control with Exploring starts for BlackJack
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import axes3d
import sys
CARDS_LIST = ['A'] + map(str, range(2, 11)) + map(str, [10]*3)
ACTIONS = ['H', 'S']
init = True
MAX_ITER = 10000#*50
iter = 0
G = {}
Q = {}
P = {}
CARD_INT = {}
for i, v in enumerate(CARDS_LIST[:10]):
CARD_INT[v] = i + 1
class Params:
pass
class State(Params):
def __init__(self, h = (12,1,False)):
self.sum, self.d_card, self.usable = h[0], CARDS_LIST[h[1]-1], bool(h[2])
def __str__(self):
return ' '.join(map(str, [self.sum, self.d_card, self.usable]))
def __hash__(self):
return (self.sum, CARD_INT[self.d_card], int(self.usable))
def val(cards):
c = sorted(cards[2:])
s = cards[0]
l = len(c)
for i in range(l):
cc = c[i]
if cc == 'A':
break
else:
s += int(cc)
u = cards[1]
if not l == 0:
if (s+11) > 21:
u = False
else:
u = cards[1]
if u:
s += (11 + (l-1-i))
else:
s += (l-i)
return s, u
def deal_card():
# Infinite deck
idx = np.random.randint(len(CARDS_LIST))
return CARDS_LIST[idx]
def get_state(p_cards, d_cards):
s = State()
s_p, u_p = val(p_cards)
s.usable = u_p
s.sum = s_p
s.d_card = str(d_cards[0]) # d_cards[0] - dealer's visible card
return s
def get_action(s):
# Some deterministic policy
h = s.__hash__()
return P[h]
def submit_action(s, a):
# Feedback from the environment
# Dealer follows a deterministic policy
if a == 'H':
p_cards.append(deal_card())
s_p = val(p_cards)
if s_p > 21:
return -1
s_d = val(d_cards)
if s_d > 21:
# Dealer busted; Player wins
return 1
elif s_d >= 17:
# Dealer sticks
return np.sign(s_p - s_d)
else:
# Dealer deals. New card is hidden
d_cards.append(deal_card())
return None
else:
# Player sticks
s_p, _ = val(p_cards)
s_d, _ = val(d_cards)
return np.sign(s_p - s_d)
def push_tr(tr, g):
# tr - trajectory
# g - return ( = terminal reward in this case )
# print 'Updated'
for (s, a) in tr:
ss = s.__hash__()
G[ss][a].append(g)
def compute_q_values():
for k, v in G.iteritems():
Q[k]['H'] = np.mean(v['H'])
Q[k]['S'] = np.mean(v['S'])
def update_policy(tr):
for (s, a) in tr:
h = s.__hash__()
P[h] = max(Q[h])
def plot_q_values(Q, u):
X, Y = np.meshgrid(range(1, 11), range(12, 22))
arr = np.zeros((10, 10))
for k, v in Q.iteritems():
if k[2] == u:
arr[k[0]-12, k[1]-1] = ACTIONS.index(max(v))
Z = np.reshape(arr, (10, 10))
fig = plt.figure('Usable ace = ' + str(u))
plt.scatter(X, Y, c=arr)
m = MAX_ITER/100
print('Computing..')
STATE_HASHES = [ (i,j,k) for i in range(12, 22) for j in range(1, 11) for k in range(2) ]
for s in STATE_HASHES:
Q[s] = {'H': [], 'S': []}
P[s] = ACTIONS[1] if (s[0] > 19) else ACTIONS[0]
G[s] = {'H': [], 'S': []}
while True:
idx = np.random.randint(len(STATE_HASHES))
h = STATE_HASHES[idx]
s = State(h) # Exploring start
a = ACTIONS[np.random.randint(2)]
p_cards = [h[0], h[2]]
d_cards = [h[1], h[2]]
# Generating an episode
r = None
tr = [(s, a)]
while True:
r = submit_action(s, a)
if r is not None:
break
s = get_state(p_cards, d_cards)
a = get_action(s)
tr.append((s, a))
print map(lambda x: (x[0].__hash__(), x[1]) ,tr), r
push_tr(tr, r)
compute_q_values()
update_policy(tr)
iter = iter + 1
if not iter % m:
print('Progress: ' + str(iter*100./MAX_ITER) + '%')
if iter >= MAX_ITER:
break
print('Done')
# print('Plotting..')
# plot_q_values(Q, 0)
# plot_q_values(Q, 1)
# plt.show()
| {
"repo_name": "Aravind-Suresh/rl-trials",
"path": "chapter-5/example-5.3/main.py",
"copies": "1",
"size": "4234",
"license": "mit",
"hash": 6331226023374175000,
"line_mean": 21.8864864865,
"line_max": 89,
"alpha_frac": 0.5075578649,
"autogenerated": false,
"ratio": 2.7565104166666665,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8735587445758736,
"avg_score": 0.005696167161586136,
"num_lines": 185
} |
__author__ = 'Archana V Menon, Sujith V'
def vertex_cover(G):
"""
Finds a 2-approximation for a minimal vertex cover of the specified
graph. The algorithm promises a cover that is at most double the size
of a minimal cover. The algorithm takes O(|E|) time.
http://en.wikipedia.org/wiki/Vertex_cover
:param G: networkx graph
:return: vertex cover as a list of nodes
:raise ValueError: when graph is null
"""
if not G:
raise ValueError("Error : null graph")
# copy given graph
graph = G.copy()
# vertex cover list
vertex_cover = []
while len(graph.edges()) > 0 :
# arbitrary edge of graph
e = graph.edges_iter().next()
# e = (u, v)
u = e[0]
v = e[1]
# C = C union {u,v}
vertex_cover.append(u)
vertex_cover.append(v)
# single node
if u == v :
graph.remove_node(u)
continue
# delete the nodes from graph
graph.remove_node(u)
graph.remove_node(v)
# return vertex_cover
return vertex_cover
if __name__ == "__main__":
# get graph
# from read_graph import read_graph
# G = read_graph("data/CA-GrQc.txt")
from test_graphs import test_graph2
G = test_graph2()
# find approximate vertex cover
vc = vertex_cover(G)
print "Approximate vertex cover \n"
print "Graph size : ", len(G)
print "Length : ", len(vc)
print "Nodes : ", vc
nodes_list1 = vc
nodes_list2 = G.nodes()
for node in nodes_list1:
nodes_list2.remove(node)
# draw graph
print "\n\nDrawing graph. Might take some time ..."
#from draw_graph import draw_graph
#draw_graph(G, nodes_list1, nodes_list2, G.edges(), None)
import networkx as nx
nx.draw_graphviz(G)
| {
"repo_name": "sujithvm/skynet",
"path": "code/approximate_vertex_cover.py",
"copies": "1",
"size": "1837",
"license": "mit",
"hash": 8934462671471336000,
"line_mean": 20.6117647059,
"line_max": 73,
"alpha_frac": 0.5797495917,
"autogenerated": false,
"ratio": 3.566990291262136,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9570416735837282,
"avg_score": 0.015264629424970982,
"num_lines": 85
} |
__author__ = 'Archana V Menon, Sujith V'
from approximate_vertex_cover import vertex_cover
def seed():
# get graph
from read_graph import read_graph
G = read_graph("data/CA-GrQc.txt")
print "Number of nodes: ", len(G.nodes())
v=vertex_cover(G)
v10=[]
for i in range(0,10,1):
v10.append(v[i])
print "\nInitial 10 nodes selected from dominating set: ", v10
current_infected_nodes = v10
time = int(raw_input("\nEnter time elapsed : "))
infected_nodes = []
infected_nodes.extend(current_infected_nodes)
x_cor = []
y_cor = []
x_cor.append(0)
y_cor.append(len(infected_nodes))
print ""
print "Time", "\t", "Number of infected nodes"
for t in range(0, time):
temp = set()
for x in current_infected_nodes:
temp = temp.union(set(G.neighbors(x)))
current_infected_nodes = temp
for x in temp :
if not x in infected_nodes:
infected_nodes.append(x)
print t + 1, "\t\t", len(infected_nodes)
x_cor.append(t + 1)
y_cor.append(len(infected_nodes))
safe_nodes = G.nodes()
for x in infected_nodes:
if (x in safe_nodes):
safe_nodes.remove(x)
print "\nSafe nodes : ", safe_nodes
print "\nInfected nodes : ", infected_nodes
from draw_graph import draw_graph
draw_graph(G, nodes_list1=infected_nodes, nodes_list2=safe_nodes, edge_list1=G.edges(), edge_list2=None)
#from draw_graph import draw_curve
#draw_curve(x_cor, y_cor)
# draw the graph
#from draw_graph import draw_graph_1
#draw_graph_1(G, safe_nodes, infected_nodes, G.edges(), None)
seed() | {
"repo_name": "sujithvm/skynet",
"path": "code/spread.py",
"copies": "1",
"size": "1691",
"license": "mit",
"hash": 6594275640039391000,
"line_mean": 23.1714285714,
"line_max": 108,
"alpha_frac": 0.6002365464,
"autogenerated": false,
"ratio": 3.0914076782449724,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9143845200255627,
"avg_score": 0.009559804877869001,
"num_lines": 70
} |
__author__ = 'Archana V Menon, Sujith V'
from test_graphs import test_graph2
from approximate_dominating_set import dominating_set
from fibonacci import fibonacci
# get graph
G = test_graph2()
# find approximate vertex cover
vertex_cover = dominating_set(G)
# generate fibonacci series
fib = fibonacci(len(vertex_cover))
# visited
visited = {}
# spread list
spread_list = []
# fib counter
ctr = 0
for node in vertex_cover :
# find neighbours of each node
neighbours = G.neighbors(node)
# empty list to find final list
neighbour_not_in_vertex_cover = [node]
for neigh in neighbours:
# for each neighbour which is not in vertex cover and not previously visited
if neigh not in vertex_cover and neigh not in visited :
# add to final list
neighbour_not_in_vertex_cover.append(neigh)
# set visited to true for that neigh(node)
visited[neigh] = True
# add ( fib, node, neighbours ) to final list
spread_list.append((fib[ctr], node, neighbour_not_in_vertex_cover))
# increment count
ctr += 1
# print data
for entry in spread_list:
print entry
# draw graph
# from draw_graph import draw_graph
# draw_graph(G, spread_list)
| {
"repo_name": "sujithvm/skynet",
"path": "code/clone.py",
"copies": "1",
"size": "1240",
"license": "mit",
"hash": 6973469797583474000,
"line_mean": 20.0169491525,
"line_max": 84,
"alpha_frac": 0.6758064516,
"autogenerated": false,
"ratio": 3.2291666666666665,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4404973118266666,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Archana V Menon, Sujith V'
import networkx as nx
from approximate_dominating_set import dominating_set
from approximate_vertex_cover import vertex_cover
from random import choice
from random import randint
from pso import pso
BENEFIT_LOWER = 30
BENEFIT_UPPER = 70
COST_LOWER = 20
COST_UPPER = 50
OPT_BENEFIT_LOWER = 50
OPT_BENEFIT_UPPER = 60
OPT_COST_LOWER = 20
OPT_COST_UPPER = 30
THRESHOLD_DIFF = 400
INITIAL_COST = 3 * COST_UPPER
USE_PSO = False
'''
type :
1 - dominating set
2 - vertex cover
3 - pso
4 - random
'''
pso_node_map = {}
D = 1
def opt_func(x):
x1 = x[0]
x2 = x[1]
#return x2 - D * x1
return x2 - x1
lb = [OPT_BENEFIT_LOWER, OPT_COST_LOWER]
ub = [OPT_BENEFIT_UPPER, OPT_COST_UPPER]
def optimize(G, nodes) :
nv = []
for node in nodes :
D = G.degree(node)
val = -opt_func( [G.node[node]["benefit"], G.node[node]["cost"]] )
#if val > pso_node_map[D] :
# nv.append([node, D])
nv.append([node, D])
# sort by degree
nv.sort(key=lambda x: x[1], reverse=True)
print nv
print " "
filtered_nodes = []
cost = INITIAL_COST
for entry in nv :
node = entry[0]
node_cost = G.node[node]["cost"]
if cost - node_cost >= 0 :
filtered_nodes.append(node)
cost -= node_cost
return filtered_nodes
def seed(G, type):
current_infected_nodes = []
if type == 1 :
ds = dominating_set(G)
if USE_PSO : current_infected_nodes.extend(optimize(G, ds))
else : current_infected_nodes.append(ds[0])
elif type == 2 :
vc = vertex_cover(G)
if USE_PSO : current_infected_nodes.extend(optimize(G, vc))
else : current_infected_nodes.append(vc[0])
elif type == 3 :
rand_node = choice(G.nodes())
current_infected_nodes.append(rand_node)
number_of_initial_affected_nodes = len(current_infected_nodes)
infected_nodes = []
infected_nodes.extend(current_infected_nodes)
x_cor = []
y_cor = []
x_cor.append(0)
y_cor.append(len(infected_nodes))
print "\nGraph details \n"
print "Graph size : ", len(G.nodes())
print "Number of initial affected nodes : ", number_of_initial_affected_nodes
print "Initial affected nodes : ", infected_nodes
print ""
print "Time", "\t", "Number of infected nodes"
time = 0
print time, "\t\t", len(infected_nodes)
while len(infected_nodes) != len(G) :
temp = set()
for x in current_infected_nodes:
temp = temp.union(set(G.neighbors(x)))
current_infected_nodes = temp
for x in temp :
if not x in infected_nodes:
infected_nodes.append(x)
print time + 1, "\t\t", len(infected_nodes)
x_cor.append(time + 1)
y_cor.append(len(infected_nodes))
time += 1
safe_nodes = G.nodes()
for x in infected_nodes:
if (x in safe_nodes):
safe_nodes.remove(x)
# print "\nSafe nodes : ", safe_nodes
# print "\nInfected nodes : ", infected_nodes
# from draw_graph import draw_graph
# draw_graph(G, nodes_list1=infected_nodes, nodes_list2=safe_nodes, edge_list1=G.edges(), edge_list2=None)
return x_cor, y_cor
if __name__ == '__main__' :
# get graph
from read_graph import read_graph
G1 = read_graph("data/CA-GrQc.txt")
G = max(nx.connected_component_subgraphs(G1), key=len)
degrees = G.degree()
max_deg = G.degree( max(degrees, key=degrees.get) )
for d in xrange(1, max_deg + 1) :
D = d
xopt, fopt = pso(opt_func, lb, ub)
pso_node_map[d] = -fopt
print d
for n in G.nodes() :
G.node[n]['cost'] = randint(COST_LOWER, COST_UPPER)
G.node[n]['benefit'] = randint(BENEFIT_LOWER, BENEFIT_UPPER)
ds_x_cor, ds_y_cor = seed(G, 1)
vc_x_cor, vc_y_cor = seed(G, 2)
rand_x_cor, rand_y_cor = seed(G, 3)
from draw_graph import draw_curve
draw_curve(ds_x_cor, ds_y_cor, "Dominating set",
vc_x_cor, vc_y_cor, "Vertex cover",
rand_x_cor, rand_y_cor, "Random selection")
| {
"repo_name": "sujithvm/skynet",
"path": "code/seeding_pso.py",
"copies": "1",
"size": "4181",
"license": "mit",
"hash": -8079048116457714000,
"line_mean": 21.722826087,
"line_max": 110,
"alpha_frac": 0.5850275054,
"autogenerated": false,
"ratio": 2.915620641562064,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40006481469620636,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Archana V Menon, Sujith V'
import networkx as nx
from approximate_dominating_set import dominating_set
from approximate_vertex_cover import vertex_cover
from random import choice
'''
type :
1 - dominating set
2 - vertex cover
3 - pso
4 - random
'''
def seed(G, type):
current_infected_nodes = []
disjoint_graphs_gen = nx.connected_component_subgraphs(G)
disjoint_graphs = list(disjoint_graphs_gen)
for cc in disjoint_graphs :
if type == 1 :
ds = dominating_set(cc)
current_infected_nodes.append(ds[0])
elif type == 2 :
vc = vertex_cover(cc)
current_infected_nodes.append(vc[0])
elif type == 3 :
rand_node = choice(cc.nodes())
current_infected_nodes.append(rand_node)
number_of_initial_affected_nodes = len(current_infected_nodes)
infected_nodes = []
infected_nodes.extend(current_infected_nodes)
x_cor = []
y_cor = []
x_cor.append(0)
y_cor.append(len(infected_nodes))
print "\nGraph details \n"
print "Graph size : ", len(G.nodes())
print "Number of disjoint graphs : ", len(disjoint_graphs)
print "Number of initial affected nodes : ", number_of_initial_affected_nodes
print "Initial affected nodes : ", infected_nodes
print ""
print "Time", "\t", "Number of infected nodes"
time = 0
print time, "\t\t", len(infected_nodes)
while len(infected_nodes) != len(G) :
temp = set()
for x in current_infected_nodes:
temp = temp.union(set(G.neighbors(x)))
current_infected_nodes = temp
for x in temp :
if not x in infected_nodes:
infected_nodes.append(x)
print time + 1, "\t\t", len(infected_nodes)
x_cor.append(time + 1)
y_cor.append(len(infected_nodes))
time += 1
safe_nodes = G.nodes()
for x in infected_nodes:
if (x in safe_nodes):
safe_nodes.remove(x)
# print "\nSafe nodes : ", safe_nodes
# print "\nInfected nodes : ", infected_nodes
# from draw_graph import draw_graph
# draw_graph(G, nodes_list1=infected_nodes, nodes_list2=safe_nodes, edge_list1=G.edges(), edge_list2=None)
return x_cor, y_cor
if __name__ == '__main__' :
# get graph
from read_graph import read_graph
G = read_graph("data/CA-GrQc.txt")
ds_x_cor, ds_y_cor = seed(G, 1)
vc_x_cor, vc_y_cor = seed(G, 2)
rand_x_cor, rand_y_cor = seed(G, 3)
from draw_graph import draw_curve
draw_curve(ds_x_cor, ds_y_cor, "Dominating set",
vc_x_cor, vc_y_cor, "Vertex cover",
rand_x_cor, rand_y_cor, "Random selection")
| {
"repo_name": "sujithvm/skynet",
"path": "code/seeding.py",
"copies": "1",
"size": "2726",
"license": "mit",
"hash": -8806401143688628000,
"line_mean": 24.2407407407,
"line_max": 110,
"alpha_frac": 0.5950110051,
"autogenerated": false,
"ratio": 3.1734575087310826,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42684685138310824,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Archana V Menon, Sujith V'
import networkx as nx
import matplotlib.pyplot as plt
def draw_graph(G, nodes_list1, nodes_list2, edge_list1, edge_list2):
"""
This function is used to draw graphs
:param G: networkx graph
:param nodes_list1: list of nodes
:param nodes_list2: list of nodes
:param edge_list1: list of edges
:param edge_list2: list of edges
"""
# positions for all nodes
pos = nx.spring_layout(G)
# draw set of nodes
nx.draw_networkx_nodes(G, pos, nodelist=nodes_list1, node_size=30, node_color='r')
# draw set of nodes
nx.draw_networkx_nodes(G, pos, nodelist=nodes_list2, node_size=50, node_color='y')
# draw set of edges
nx.draw_networkx_edges(G,pos, edgelist=edge_list1, width=1, edge_color='b')
# draw set of edges
nx.draw_networkx_edges(G,pos, edgelist=edge_list2, width=1, edge_color='g')
# labels for nodes
# nx.draw_networkx_labels(G,pos,font_size=10,font_family='sans-serif')
plt.show() # display
def draw_curve(x1, y1, lab_1, x2, y2, lab_2, x3, y3, lab_3):
l1 = plt.plot(x1, y1, label=lab_1)
l2 = plt.plot(x2, y2, label=lab_2)
l3 = plt.plot(x3, y3, label=lab_3)
plt.legend()
plt.axis([0, 20, 0, 6000])
plt.xlabel("time/iterations")
plt.ylabel("Number of nodes infected")
plt.show()
| {
"repo_name": "sujithvm/skynet",
"path": "code/draw_graph.py",
"copies": "1",
"size": "1349",
"license": "mit",
"hash": 2388437118848211500,
"line_mean": 24.9423076923,
"line_max": 86,
"alpha_frac": 0.6434395849,
"autogenerated": false,
"ratio": 2.8280922431865827,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3971531828086583,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Archana V Menon, Sujith V'
def approximate_set_cover(sets):
"""
Find approximate set cover for a list of sets
:param sets: list of sets
:return: set cover
:raise ValueError: when list of sets is null
"""
if not sets:
raise ValueError("Error : Empty list of sets")
# find universe
universe = set()
for s in sets:
universe.update(s)
# set cover list
set_cover = []
# while U != 0
while len(universe) != 0:
# find set S that maximizes |S intersection U|
S = set()
for s in sets:
if len(universe.intersection(s)) > len(S):
S = s
# U = U - S
universe.difference_update(S)
# set_cover = set_cover union S
set_cover.append(S)
# return
return set_cover
if __name__ == "__main__":
sets = [set([1, 2, 4, 9]),
set([3, 8, 10]),
set([9, 1]),
set([1]),
set([2, 3, 12]),
set([4, 5]),
set([5, 7, 1, 2]),
set([5, 6, 10, 3, 4]),
set([4, 7, 9]),
set([6]),
set([8, 9, 1, 4])]
print approximate_set_cover(sets)
| {
"repo_name": "sujithvm/skynet",
"path": "code/approximate_set_cover.py",
"copies": "1",
"size": "1143",
"license": "mit",
"hash": 956742298919094000,
"line_mean": 18.3728813559,
"line_max": 54,
"alpha_frac": 0.4978127734,
"autogenerated": false,
"ratio": 3.322674418604651,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43204871920046506,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Archana V Menon, Sujith V'
def dominating_set(G):
"""
Finds a greedy approximation for a minimal vertex cover of a specified
graph. At each iteration, the algorithm picks the vertex with the
highest degree and adds it to the cover, until all edges are covered.
:param G: networkx graph
:return: vertex cover as a list of nodes
:raise ValueError: when graph is null
"""
if not G:
raise ValueError("Error : null graph")
# copy given graph
graph = G.copy()
# vertex cover list
vertex_cover = []
while len(graph.edges()) > 0 :
# get degrees of all nodes in the graph
deg = graph.degree()
# find node with maximum degree
v = max(deg, key=deg.get)
# add the node to vertex cover
vertex_cover.append(v)
# delete the node from graph
graph.remove_node(v)
# return vertex_cover
return vertex_cover
if __name__ == "__main__":
# get graph
# from read_graph import read_graph
# G = read_graph("data/CA-GrQc.txt")
from test_graphs import test_graph2
G = test_graph2()
# find approximate dominating set
ds = dominating_set(G)
print "Minimum dominating set \n"
print "Graph size : ", len(G)
print "Length : ", len(ds)
print "Nodes : ", ds
nodes_list1 = ds
nodes_list2 = G.nodes()
for node in nodes_list1:
nodes_list2.remove(node)
# draw graph
print "\n\nDrawing graph. Might take some time ..."
from draw_graph import draw_graph
draw_graph(G, nodes_list1, nodes_list2, G.edges(), None) | {
"repo_name": "sujithvm/skynet",
"path": "code/approximate_dominating_set.py",
"copies": "1",
"size": "1633",
"license": "mit",
"hash": -2566313254082010000,
"line_mean": 21.6944444444,
"line_max": 74,
"alpha_frac": 0.6111451317,
"autogenerated": false,
"ratio": 3.7113636363636364,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48225087680636364,
"avg_score": null,
"num_lines": null
} |
__author__ = 'ArchBang'
### A stab at a spell system ###
# The idea is that you need at least 3 letters to form a workable spell
# one vowel must be included, this designates something about the spell
# perhaps its type (create rock, summon ally, etc)
# or its form (line, area, self, spot, nearest, everywhere)
# a, e, i, o, u, y
# The player starts with a full alphabet but each consonant, once used, is gone forever
# read in the allowed combinations here
# try if a spell is legitimate and return its effects
# (look at how items / spells are built anyway modularly to figure out the best way)
# should be shape 'consonant/vowel/consonant' but can chain them?
# 'rixbetfyz'
# 13/03/2014 reworking
# following on from conversing with Darren I think it's best to take his idea that
# the distance between two consonants establishes the strength
# further to that the polarity establishes something further
# retain the vowel in the middle, actually have two
# one sets the form
# one sets the content
#e.g. 'BAUM' -> medium strong enemy targeting damage spell
# 'VIIT' -> weak etc
# What do the vowels stand for?
# consonant, form, content, consonant
# Forms:
# a = area
# e = line
# i = self
# o = adjacent
# u = another
# y = nearest
# Contents:
# a = access / blocking
# e =
# i =
# o = healing / damage
# u =
# y = knowledge / confusion
# How should the spellcasting be structured?
# 1. see if it's a valid spell, respond appropriately if not
# 2. establish the form, content and power of the spell
# 3. remove the used consonants from the alphabet
# 4. play out the effects of the successfully cast spell
# runes the player can choose from
alphabetString = 'abcdefghijklmnopqrstuvwxyz'
alphabet = []
for l in alphabetString: alphabet.append(l)
vowels = "aeiouyAEIOUY"
consonants = "bcdfghjklmnpqrstvwxzBCDFGHJKLMNPQRSTVWXZ"
runeForms = \
{
'a' : "area",
'e' : "everything",
'i' : "line",
'o' : "nearest",
'u' : "other",
'y' : "self"
}
runeContents = \
{
'a' : "damage",
'e' : "heal",
'i' : "access",
'o' : "block",
'u' : "damage",
'y' : "heal"
}
def spellPower(runes):
# looks at the positions of the consonants in the alphabet
# and returns the difference, positive or negative
i = 0
a = 0
b = 0
while i < len(alphabet):
if runes[0] == alphabet[i]:
a = i
if runes[3] == alphabet[i]:
b = i
i += 1
return (b-a)
def checkRunes(runes):
# valid form [consonant, vowel, vowel, consonant]
#print runes
if len(runes) <> 4:
return "noForm"
elif runes[1] not in vowels:
# Error - spell has takes no form
return "noForm"
elif runes[2] not in vowels:
# Error - spell has no content
#print runes[2]
return "noContent"
if runes[0] and runes[3] not in consonants or runes[0] == runes[3]:
# Error - spell has no power
return "noPower"
if runes[0] not in alphabet or runes[3] not in alphabet:
return "noPower"
if runes[0] not in alphabet or runes[3] not in alphabet:
# runes forgotten
return "forgotten"
return "success"
def removeLetters(spell):
# prune the used up consonants from the alphabet
for l in spell:
if l not in vowels:
i = 0
while i < len(alphabet):
if l == alphabet[i]:
alphabet[i] = " "
i += 1
def castRunes(runes):
runeResponse = checkRunes(runes)
if runeResponse == "success":
power = spellPower(runes)
form = runeForms[runes[1]]
content = runeContents[runes[2]]
removeLetters(runes)
return power, form, content, 0
elif runeResponse == "noForm":
# display message of formless magic
return 0, 0, 0, 1
elif runeResponse == "noContent":
# display message of contentless magic
return 0, 0, 0, 2
elif runeResponse == "noPower":
# display message of powerless magic
return 0, 0, 0, 3
else: return 0, 0, 0, 1
# Read in the runebook | redundant
def readSpellsToDict(filePath):
# Read a 2-column CSV file into a dictionary
outputDict = {}
with open(filePath, 'rb') as source:
for line in source:
sides = line.split(',')
outputDict[sides[0]] = sides[1][:-2]
return outputDict
#Line shaped spells
| {
"repo_name": "ArchBang85/S_Crimson",
"path": "Vallat.py",
"copies": "1",
"size": "4776",
"license": "apache-2.0",
"hash": -3850972031202221600,
"line_mean": 25.9830508475,
"line_max": 87,
"alpha_frac": 0.5789363484,
"autogenerated": false,
"ratio": 3.478514202476329,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9529872906337189,
"avg_score": 0.0055155289078280935,
"num_lines": 177
} |
__author__ = 'archen'
# Core Django imports
from django.conf.urls import patterns, url
from django.contrib.auth.decorators import login_required
# Third party imports
# App-specific imports
from mantises import views
urlpatterns = patterns('',
url(r'^$', views.index, name='index'),
# Mantis URLs
url(r'^my-mantises/$', views.my_mantises, name='my-mantises'),
url(r'^(?P<mantis_id>\d+)/$', views.detail_mantis, name='detail-mantis'),
url(r'^(?P<pk>\d+)/edit$', login_required(views.MantisUpdate.as_view()), name='edit-mantis'),
url(r'^my-mantises/add/$', login_required(views.MantisCreate.as_view()), name='add-mantis'),
# Molt URLs
url(r'^(?P<mantis_id>\d+)/molt/$', views.do_molt, name='molt'),
url(r'^(?P<mantis_id>\d+)/molts/add/$', login_required(views.MoltCreate.as_view()), name='add-molt'),
url(r'^molts/(?P<pk>\d+)/edit/$', login_required(views.MoltUpdate.as_view()), name='edit-molt'),
url(r'^(?P<mantis_id>\d+)/molts/$', views.molt_history, name='molt-history'),
# Breed URLs
url(r'^breeds/$', views.breeds, name='breeds'),
url(r'^breeds/(?P<breed_id>\d+)/$', views.detail_breed, name='detail-breed'),
url(r'^breeds/add/$', login_required(views.BreedCreate.as_view()), name='add-breed'),
url(r'^breeds/(?P<pk>\d+)/edit$', login_required(views.BreedUpdate.as_view()), name='edit-breed'),
# Ooth URLs
url(r'^ooths/$', views.my_ooths, name='my-ooths'),
url(r'^(?P<mantis_id>\d+)/ooths/(?P<ooth_id>\d+)/$', views.detail_ooth, name='detail-ooth'),
url(r'^(?P<mantis_id>\d+)/ooths/add/$', login_required(views.OothCreate.as_view()), name='add-ooth'),
url(r'^ooths/(?P<pk>\d+)/edit$', login_required(views.OothUpdate.as_view()), name='edit-ooth'),
) | {
"repo_name": "archen/mantistrack",
"path": "mantistrack/mantises/urls.py",
"copies": "1",
"size": "1745",
"license": "mit",
"hash": -2667998183315673600,
"line_mean": 44.9473684211,
"line_max": 105,
"alpha_frac": 0.635530086,
"autogenerated": false,
"ratio": 2.7567140600315954,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.38922441460315954,
"avg_score": null,
"num_lines": null
} |
__author__ = 'archen'
# Core Django imports
from django import forms
# Third party imports
from datetimewidget.widgets import DateTimeWidget
# App-specific imports
from mantises.models import Mantis, Breed, Molt, Ooth
class MantisForm(forms.ModelForm):
class Meta:
model = Mantis
fields = ('name', 'breed', 'born', 'died', 'sex', 'gallery', 'container', 'from_colony')
widgets = {
'born': DateTimeWidget(),
'died': DateTimeWidget(),
}
class BreedForm(forms.ModelForm):
class Meta:
model = Breed
fields = ('picture', 'short_name', 'long_name', 'life_expectancy', 'adult_instar_male', 'adult_instar_female',
'low_temperature', 'high_temperature', 'low_humidity', 'high_humidity')
class MoltForm(forms.ModelForm):
class Meta:
model = Molt
fields = ('date', 'from_instar', 'to_instar')
widgets = {
'date': DateTimeWidget(),
}
class OothForm(forms.ModelForm):
class Meta:
model = Ooth
fields = ('name', 'picture', 'laid_by', 'date_laid', 'date_hatched', 'container', 'nymphs')
widgets = {
'date_laid': DateTimeWidget(),
'date_hatched': DateTimeWidget(),
} | {
"repo_name": "archen/mantistrack",
"path": "mantistrack/mantises/forms.py",
"copies": "1",
"size": "1267",
"license": "mit",
"hash": 5692427383241126000,
"line_mean": 26.5652173913,
"line_max": 118,
"alpha_frac": 0.588792423,
"autogenerated": false,
"ratio": 3.549019607843137,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4637812030843137,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Archon_ren'
import json
from gensim.models import Word2Vec
from gensim import matutils
from numpy import float32 as REAL,array
class patchdata(object):
def __init__(self,model_path,patch_patch,verbosity):
self.patch_path = patch_patch
self.user_data = {}
self.tag_data = []
self.model = None
self.verbose = verbosity
self.model_path = model_path
self.minimium_model = {}
self.no_match_tag = []
def union(self,a, b):
return list(set(a) | set(b))
def load_patch_data(self):
with open(self.patch_path) as json_file:
user_data = json.load(json_file)
json_file.close()
self.user_data = user_data
def get_tags(self):
tags = []
tags_set = []
i = 0
temp_data = []
for value in self.user_data.values():
tags = self.union(value,tags)
i += 1
if int(i/10000)*10000 == i:
tags_set.append(tags)
tags = []
self.tag_data = tags
for k in tags_set:
self.tag_data = self.union(k,temp_data)
def load_word_to_vec_model(self):
self.model = Word2Vec.load_word2vec_format(self.model_path, binary=True)
if self.verbose:
print('word to vec model loaded')
def get_minimium_model(self):
'''
:return: minimium model: dict contain just the tag in the bank and corresponding 300 dim vector
'''
if self.model == None:
self.load_word_to_vec_model()
for item in self.tag_data:
try:
vec = self.model[item]
self.minimium_model[item] = vec
except KeyError:
try:
mean = []
for word in item.split():
mean.append(self.model[word])
mean = matutils.unitvec(array(mean).mean(axis=0)).astype(REAL)
self.minimium_model[item] = mean
except KeyError:
self.no_match_tag.append(item)
if len(self.no_match_tag) !=0:
print('%d tags not found in word to vec model'% len(self.no_match_tag))
def load(self):
self.load_patch_data()
self.get_tags()
self.get_minimium_model()
| {
"repo_name": "archonren/similarity",
"path": "patch.py",
"copies": "1",
"size": "2347",
"license": "apache-2.0",
"hash": -4325610464215146500,
"line_mean": 32.0563380282,
"line_max": 103,
"alpha_frac": 0.5317426502,
"autogenerated": false,
"ratio": 3.7372611464968153,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9746831171287329,
"avg_score": 0.004434525081897085,
"num_lines": 71
} |
__author__ = 'Archon_ren'
import json,pickle
from gensim.models import Word2Vec
from gensim import matutils
from numpy import float32 as REAL,array,shape,str,zeros
from scivq import *
from scipy.stats.stats import pearsonr
class user(object):
def __init__(self,user_dict):
self.id = user_dict['id']
self.sig_id = user_dict['sig_id']
self.tags = user_dict['tags_new']
self.design_group_id =user_dict['design_group_id']
def show(self):
print(self.id)
print(self.sig_id)
print(self.design_group_id)
print(self.tags)
def output(self):
return str(self.tags)
class users_data(object):
def __init__(self,k = 20, data_path = 'users.json',model_path = 'GoogleNews.bin'):
self.data_path = data_path
self.model_path = model_path
self.minimium_model = {}
self.no_match_tag = []
self.tag_data = []
self.k = k
self.vec_dict = {}
self.abnormal = []
self.corr_dict = {}
self.design_group_tag = {}
self.design_group_sig = {}
self.no_match = []
self.out={}
self.match_count = {}
self.no_match_group = []
def load_Data(self):
with open(self.data_path) as jsonfile:
self.users_data = json.load(jsonfile)
for key, value in self.users_data.items():
self.users_data[key] = user(value)
jsonfile.close()
def load_word_to_vec_model(self):
self.model = Word2Vec.load_word2vec_format(self.model_path, binary=True)
def union(self,a, b):
return list(set(a) | set(b))
def get_tags(self):
for value in self.users_data.values():
self.tag_data = self.union(value.tags,self.tag_data)
def get_design_group_tag(self):
for value in self.users_data.values():
if value.design_group_id != '':
self.design_group_tag['group'+value.design_group_id] = []
self.design_group_sig['group'+value.design_group_id] = []
for value in self.users_data.values():
if value.design_group_id != '':
self.design_group_tag['group' + value.design_group_id] = self.union(value.tags,self.design_group_tag['group'+value.design_group_id])
self.design_group_sig['group'+value.design_group_id] = value.sig_id
def get_minimium_model(self):
'''
:return: minimium model: dict contain just the tag in the bank and corresponding 300 dim vector
'''
self.load_word_to_vec_model()
for item in self.tag_data:
try:
vec = self.model[item]
self.minimium_model[item] = vec
except KeyError:
try:
mean = []
if item != '':
for word in item.split():
mean.append(self.model[word])
mean = matutils.unitvec(array(mean).mean(axis=0)).astype(REAL)
self.minimium_model[item] = mean
except KeyError:
self.no_match_tag.append(item)
if len(self.no_match_tag) !=0:
print('%d tags not found in word to vec model'% len(self.no_match_tag))
def clustering(self):
'''
:return: dict that tells which tag belongs to which cluster {key: tag, value: cluster id}
'''
if self.minimium_model == {}:
print('minimium model has not been load')
raise AttributeError
elif type(self.minimium_model) != dict:
raise TypeError
else:
vec = []
vec_key = []
for key in self.minimium_model.keys():
vec_key.append(key)
vec.append(self.minimium_model[key])
vec_array = array(vec).reshape((-1,300))
centers,dist = kmeans(vec_array,self.k)
code,distance = vq(vec_array,centers)
for i in range(len(code)):
self.vec_dict[vec_key[i]] = code[i]
def vote(self):
if self.vec_dict == {}:
print('clustering not completed')
raise AttributeError
elif type(self.vec_dict) != dict:
raise TypeError
else:
user_item_dict = {}
x=0
for key in self.users_data.keys():
vote_array = zeros((self.k,1))
for item in self.users_data[key].tags:
if item not in self.no_match_tag:
try:
vote_array[self.vec_dict[item]] += 1
except KeyError:
x +=1
if sum(vote_array) != 0:
user_item_dict[key] = vote_array/sum(vote_array)
else:
self.abnormal.append(key)
self.user_item_dict = user_item_dict
design_item_dict = {}
for key in self.design_group_tag.keys():
vote_array = zeros((self.k,1))
for item in self.design_group_tag[key]:
if item not in self.no_match_tag:
try:
vote_array[self.vec_dict[item]] += 1
except KeyError:
x +=1
if sum(vote_array) != 0:
design_item_dict[key] = vote_array/sum(vote_array)
else:
self.abnormal.append(key)
self.design_item_dict = design_item_dict
def most_similar(self,key1,topN):
temp_dict = {}
short_list = []
try:
for key2 in self.user_item_dict.keys():
if key2!=key1:
temp_dict[key2] = (pearsonr(self.user_item_dict[key1],self.user_item_dict[key2])[0])
most_similar = sorted(temp_dict, key=temp_dict.get,reverse=True)
for i in range(topN):
short_list.append(most_similar[i])
self.corr_dict[key1] = short_list
return short_list
except KeyError:
pass
def match(self,user_id):
temp_dict = {}
short_list = []
try:
for design_group_id in self.design_item_dict.keys():
if self.users_data[user_id].sig_id == self.design_group_sig[design_group_id]:
temp_dict[design_group_id] = (pearsonr(self.design_item_dict[design_group_id],self.user_item_dict[user_id])[0])
most_similar = sorted(temp_dict, key=temp_dict.get,reverse=True)
return most_similar
except KeyError:
pass
def final_suggestion(self):
self.corr_dict = {}
for key in self.users_data.keys():
if self.users_data[key].sig_id != '':
short_list = self.match(key)
self.corr_dict[key] = short_list
for key in self.corr_dict.keys():
if self.corr_dict[key] ==None:
self.no_match.append(key)
elif len(self.corr_dict[key]) == 0:
self.no_match.append(key)
else:
if self.users_data[key].design_group_id != '':
try:
self.out[key] = self.corr_dict[key][1]
except:
self.no_match.append(key)
else:
self.out[key] = self.corr_dict[key][0]
for key in self.design_group_sig.keys():
if key not in self.out.values():
self.no_match_group.append(key)
for key in self.design_item_dict.keys():
self.match_count[key] = 0
for item in self.no_match_group:
self.match_count[item] = 0
for k in self.out.values():
self.match_count[k] +=1
for key in self.no_match:
sig = self.users_data[key].sig_id
pool = []
for k,v in self.design_group_sig.items():
if v == sig:
pool.append([self.match_count[k],k])
outcome = sorted(pool,reverse=False)
if len(outcome) != 0:
self.out[key]=outcome[0][1]
self.match_count[outcome[0][1]] +=1
def load_clustering_result(self):
try:
with open('class.dat', 'rb') as infile:
self.vec_dict = pickle.load(infile)
infile.close()
self.k = 1000
except:
pass | {
"repo_name": "archonren/similarity",
"path": "design_group.py",
"copies": "1",
"size": "8532",
"license": "apache-2.0",
"hash": -8498851844953210000,
"line_mean": 37.09375,
"line_max": 148,
"alpha_frac": 0.5094936709,
"autogenerated": false,
"ratio": 3.7819148936170213,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4791408564517021,
"avg_score": null,
"num_lines": null
} |
__author__ = 'ardevelop'
import os
import sys
import pwd
import grp
import time
import signal
import fcntl
import threading
import multiprocessing
import argparse
#region constants
ERROR_MESSAGE_PATTERN = "ERROR: %s\n"
WARNING_MESSAGE_PATTERN = "WARNING: %s\n"
START = "start"
STOP = "stop"
RESTART = "restart"
INSTALL = "install"
ACTIONS = [START, STOP, RESTART, INSTALL]
INSTALL_SCRIPT = """
#!/bin/sh
#
# %(name)s: autogenerated by ardaemon on %(generated)s
#
# chkconfig: - 20 80
# description: Starts and stops daemon.
# Source function library.
. /etc/rc.d/init.d/functions
name="%(name)s"
executable="%(executable)s"
pidfile="%(pidfile)s"
start() {
$executable -s start
retval=$?
rh_status
return $retval
}
stop() {
$executable -s stop
retval=$?
rh_status
return $retval
}
restart() {
stop
start
}
reload() {
false
}
rh_status() {
status -p $pidfile $name
}
rh_status_q() {
rh_status >/dev/null 2>&1
}
case "$1" in
start)
$1
;;
stop)
$1
;;
restart)
$1
;;
reload)
false
;;
status)
rh_status
;;
condrestart|try-restart)
rh_status_q || exit 0
restart
;;
*)
echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart}"
exit 2
esac
exit $?
"""
#endregion
try:
from setproctitle import setproctitle
except ImportError:
sys.stderr.write(WARNING_MESSAGE_PATTERN % "No module \"setproctitle\"\n")
def setproctitle(title):
pass
class Daemon:
def __init__(self, name=None, pid_path="/var/run", title=None, user=None, group=None, parser=None, working_dir=None,
stdout=os.devnull, stdin=os.devnull, stderr=os.devnull):
path, executable = os.path.split(os.path.abspath(sys.argv[0]))
self.name = name = name or os.path.splitext(executable)[0]
self.pid_path = pid_path = pid_path or path
self.pid_file = os.path.join(pid_path, "%s.pid" % name)
self.working_dir = working_dir or path
self.title = title
self.user = user
self.group = group
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.parser = parser
self.daemon = False
def __enter__(self):
parser = self.parser or argparse.ArgumentParser()
group = parser.add_argument_group('service')
group.add_argument("-s", metavar="cmd", default=None, choices=ACTIONS, type=str, dest="_cmd", help="command")
group.add_argument("-sn", metavar="name", default=None, type=str, dest="_name", help="name")
group.add_argument("-su", metavar="user", default=None, type=str, dest="_user", help="run as user")
group.add_argument("-sg", metavar="group", default=None, type=str, dest="_group", help="run as group")
group.add_argument("-sp", metavar="path", default=None, type=str, dest="_path", help="pid file path")
group.add_argument("-sw", metavar="path", default=None, type=str, dest="_wd", help="working directory")
group.add_argument("-st", metavar="title", default=None, type=str, dest="_title", help="process title")
group.add_argument("-si", metavar="python", default=None, type=str, dest="_python", help="python interpreter")
group.add_argument("-stdout", metavar="path", default=None, type=str, dest="_stdout", help="output stream")
group.add_argument("-stdin", metavar="path", default=None, type=str, dest="_stdin", help="input stream")
group.add_argument("-stderr", metavar="path", default=None, type=str, dest="_stderr", help="error stream")
self.args = args = parser.parse_args()
self.name = args._name or self.name
self.user = args._user or self.user
self.group = args._group or self.group
self.title = args._title or self.title
self.working_dir = args._wd or self.working_dir
self.stdout = args._stdout or self.stdout
self.stdin = args._stdin or self.stdin
self.stderr = args._stderr or self.stderr
self.pid_file = os.path.join(args._path or self.pid_path, "%s.pid" % self.name)
command = self.args._cmd
if START == command:
self.start()
elif STOP == command:
self.stop()
sys.exit(0)
elif RESTART == command:
self.stop()
self.start()
elif INSTALL == command:
self.install()
sys.exit(0)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.daemon and self.daemon_process == os.getpid():
self.pf_del()
def error(self, msg):
sys.stderr.write(ERROR_MESSAGE_PATTERN % msg)
sys.exit(1)
def pf_del(self):
try:
os.remove(self.pid_file)
except OSError:
pass
def pf_get(self):
try:
with open(self.pid_file, "r") as fp:
return int(fp.read().strip())
except (IOError, ValueError):
return None
def pf_set(self):
try:
pid = os.getpid()
with open(self.pid_file, "w+") as fp:
fp.write(str(pid))
except (IOError, OSError), ex:
self.error("Cannot create pid file to \"%s\" with error \"%s\"." % (self.pid_file, ex))
def pf_init(self, uid, gid):
try:
with open(self.pid_file, "w+"):
pass
os.chmod(self.pid_file, 0660)
os.chown(self.pid_file, uid, gid)
except (IOError, OSError), ex:
self.error("Cannot init pid file to \"%s\" with error \"%s\"." % (self.pid_file, ex))
def demonize(self):
try:
if self.user:
user = pwd.getpwnam(self.user)
else:
user = pwd.getpwuid(os.getuid())
except KeyError:
return self.error("User \"%s\" not found." % self.user)
try:
gid = grp.getgrnam(self.group).gr_gid if self.group else user.pw_gid
except KeyError:
return self.error(ERROR_MESSAGE_PATTERN % ("Group \"%s\" not found." % self.group))
try:
pid = os.fork()
if pid > 0:
sys.exit(0)
except OSError:
return self.error("Error occurred on fork #1.")
self.pf_init(user.pw_uid, gid)
os.setgid(gid)
os.setuid(user.pw_uid)
os.chdir(self.working_dir)
os.setsid()
os.umask(0)
try:
pid = os.fork()
if pid > 0:
sys.exit(0)
except OSError:
return self.error("Error occurred on fork #2.")
self.pf_set()
if self.title:
setproctitle(self.title)
sys.stdin = file(self.stdin, 'r') if isinstance(self.stdin, str) else self.stdin
sys.stdout = file(self.stdout, 'w+') if isinstance(self.stdout, str) else self.stdout
sys.stderr = file(self.stderr, 'w+', 0) if isinstance(self.stderr, str) else self.stderr
def start(self):
pid = self.pf_get()
if pid:
return self.error("Daemon is already running.")
self.daemon = True
self.daemon_process = os.getpid()
self.demonize()
def stop(self):
pid = self.pf_get()
if pid:
try:
while 1:
os.kill(pid, signal.SIGTERM)
time.sleep(0.1)
except OSError, ex:
if str(ex).find("No such process") > 0:
self.pf_del()
else:
return self.error("Error on stopping server with message \"%s\"." % ex)
else:
return self.error("Daemon pid file not found.")
def install(self):
import platform
system = platform.system()
if "Linux" == system:
self.install_for_linux()
else:
self.error("Not implemented install script for system \"%s\"" % system)
def install_for_linux(self):
import datetime
executable = self.args._python or "python"
argv_iter = iter(sys.argv)
try:
while 1:
val = argv_iter.next()
if val in ("-s", "-si"):
argv_iter.next()
else:
executable += " " + val
except StopIteration:
pass
script = INSTALL_SCRIPT % {
"name": self.name,
"pidfile": self.pid_file,
"executable": executable,
"generated": datetime.datetime.now()
}
script_path = "/etc/rc.d/init.d/%s" % self.name
if os.path.exists(script_path):
self.error("Daemon already installed.")
else:
try:
with open(script_path, "w+") as fp:
fp.write(script)
os.chmod(script_path, 0755)
print "Successfully install."
except (IOError, OSError), ex:
self.error("Installation Error. %s." % ex)
def add_watch_thread(parent_process_id, frequency=0.1):
def _watch_thread_job(pid):
while True:
try:
os.kill(pid, 0)
time.sleep(frequency)
except OSError:
os.kill(os.getpid(), signal.SIGTERM)
threading.Thread(target=_watch_thread_job, args=(parent_process_id,)).start()
def subprocess(target, title=None, args=None, kwargs=None):
parent_pid = os.getpid()
def child(parent_pid, title, target, args, kwargs):
if title:
setproctitle(title)
add_watch_thread(parent_pid)
target(*(args or ()), **(kwargs or {}))
process = multiprocessing.Process(target=child, args=(parent_pid, title, target, args, kwargs))
process.start()
return process
def subprocess_module(module_name, method_name, title=None, args=None, kwargs=None):
def target(*args, **kwargs):
module = __import__(module_name)
module_path = module_name.split('.')
if len(module_path) > 1:
module_path = module_path[1:]
for module_part in module_path:
module = getattr(module, module_part)
getattr(module, method_name)(*args, **kwargs)
subprocess(target, title, args, kwargs)
def get_process_id():
return os.getpid()
def set_title(title):
setproctitle(title)
def infinite_loop():
while True:
time.sleep(1)
if "__main__" == __name__:
if len(sys.argv) > 1:
with Daemon():
pass
else:
executable = sys.argv[0]
os.system("python %s -h" % executable) | {
"repo_name": "ardevelop/ardaemon",
"path": "ardaemon/__init__.py",
"copies": "1",
"size": "10763",
"license": "mit",
"hash": 5975705047077450000,
"line_mean": 26.8139534884,
"line_max": 120,
"alpha_frac": 0.5519836477,
"autogenerated": false,
"ratio": 3.7488679902473008,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9795452708529966,
"avg_score": 0.001079785883466856,
"num_lines": 387
} |
__author__ = 'ardevelop'
import struct
import zlib
import cStringIO
read_ui8 = lambda c: struct.unpack("<B", c)[0]
read_ui16 = lambda c: struct.unpack("<H", c)[0]
read_ui32 = lambda c: struct.unpack("<I", c)[0]
def from_stream(stream):
metadata = {}
signature = "".join(struct.unpack("<3c", stream.read(3)))
if signature not in ("FWS", "CWS"):
raise ValueError("Invalid SWF signature: %s." % signature)
metadata["compressed"] = signature.startswith("C")
metadata["version"] = read_ui8(stream.read(1))
metadata["size"] = read_ui32(stream.read(4))
buf = stream.read(metadata["size"])
if metadata["compressed"]:
buf = zlib.decompress(buf)
nbits = read_ui8(buf[0]) >> 3
current_byte, buf = read_ui8(buf[0]), buf[1:]
bit_cursor = 5
for item in "xmin", "xmax", "ymin", "ymax":
value = 0
for value_bit in xrange(nbits - 1, -1, -1):
if (current_byte << bit_cursor) & 0x80:
value |= 1 << value_bit
bit_cursor += 1
if bit_cursor > 7:
current_byte, buf = read_ui8(buf[0]), buf[1:]
bit_cursor = 0
metadata[item] = value / 20
metadata["width"] = metadata["xmax"] - metadata["xmin"]
metadata["height"] = metadata["ymax"] - metadata["ymin"]
metadata["fps"] = read_ui16(buf[0:2]) >> 8
metadata["frames"] = read_ui16(buf[2:4])
return metadata
def metadata(swf=None, filename=None):
if filename:
with open(filename, "r") as stream:
return from_stream(stream)
elif swf:
if hasattr(swf, "read") and hasattr(swf, "seek"):
swf.seek(0)
return from_stream(swf)
elif isinstance(swf, str):
stream = cStringIO.StringIO(swf)
try:
return from_stream(stream)
finally:
stream.close()
raise ValueError("Invalid input: %s." % type(swf))
if "__main__" == __name__:
import sys
if len(sys.argv) > 1:
filename = sys.argv[1]
else:
sys.stdout.write("Path to swf: ")
filename = sys.stdin.readline()[:-1]
print metadata(filename=filename) | {
"repo_name": "ardevelop/arswf",
"path": "arswf/__init__.py",
"copies": "1",
"size": "2189",
"license": "mit",
"hash": 5601663334607820000,
"line_mean": 26.7215189873,
"line_max": 66,
"alpha_frac": 0.5577889447,
"autogenerated": false,
"ratio": 3.5024,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45601889447,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Areg'
import random
index = 0
def InsertionSort(InputArray):
for j in range(1, len(InputArray), 1):
key = InputArray[j]
i = j-1
while (InputArray[i] < key) and (i >= 0):
InputArray[i+1] = InputArray[i]
i = i - 1
InputArray[i+1] = key
for x in InputArray:
print x
def Merge(InputArray, p, q, r):
L = []
R = []
n1 = q - p + 1
n2 = r - q
for i in range(0, n1, 1):
L.append(InputArray[p+i-1])
for j in range(0, n2, 1):
R.append(InputArray[q + j])
i = 0
j = 0
for k in range(p, r, 1):
if L[i] <= R[j]:
InputArray[k] = L[i]
i = i + 1
else:
InputArray[k] = R[j]
j = j + 1
def MergeSort(InputArray, p, r):
if p < r:
q = int((p+r) / 2)
MergeSort(InputArray, p, q)
MergeSort(InputArray, q+1, r)
Merge(InputArray, p, q, r)
def MyBubbleSort(InputArray):
for i in range(len(InputArray)-2, 0, -1):
if InputArray[i] >= InputArray[i+1]:
a = InputArray[i+1]
InputArray[i+1] = InputArray[i]
InputArray[i] = a
if InputArray[i] <= InputArray[i-1]:
a = InputArray[i-1]
InputArray[i-1] = InputArray[i]
InputArray[i] = a
for i in range(1, len(InputArray)-2, 1):
if InputArray[i] >= InputArray[i+1]:
a = InputArray[i+1]
InputArray[i+1] = InputArray[i]
InputArray[i] = a
if InputArray[i] <= InputArray[i-1]:
a = InputArray[i-1]
InputArray[i-1] = InputArray[i]
InputArray[i] = a
for i in range(len(InputArray)-2, 0, -1):
if InputArray[i] >= InputArray[i+1]:
a = InputArray[i+1]
InputArray[i+1] = InputArray[i]
InputArray[i] = a
if InputArray[i] <= InputArray[i-1]:
a = InputArray[i-1]
InputArray[i-1] = InputArray[i]
InputArray[i] = a
for i in range(1, len(InputArray)-2, 1):
if InputArray[i] >= InputArray[i+1]:
a = InputArray[i+1]
InputArray[i+1] = InputArray[i]
InputArray[i] = a
if InputArray[i] <= InputArray[i-1]:
a = InputArray[i-1]
InputArray[i-1] = InputArray[i]
InputArray[i] = a
for i in range(len(InputArray)-2, 0, -1):
if InputArray[i] >= InputArray[i+1]:
a = InputArray[i+1]
InputArray[i+1] = InputArray[i]
InputArray[i] = a
if InputArray[i] <= InputArray[i-1]:
a = InputArray[i-1]
InputArray[i-1] = InputArray[i]
InputArray[i] = a
for i in range(1, len(InputArray)-2, 1):
if InputArray[i] >= InputArray[i+1]:
a = InputArray[i+1]
InputArray[i+1] = InputArray[i]
InputArray[i] = a
if InputArray[i] <= InputArray[i-1]:
a = InputArray[i-1]
InputArray[i-1] = InputArray[i]
InputArray[i] = a
for i in range(len(InputArray)-2, 0, -1):
if InputArray[i] >= InputArray[i+1]:
a = InputArray[i+1]
InputArray[i+1] = InputArray[i]
InputArray[i] = a
if InputArray[i] <= InputArray[i-1]:
a = InputArray[i-1]
InputArray[i-1] = InputArray[i]
InputArray[i] = a
for i in range(1, len(InputArray)-2, 1):
if InputArray[i] >= InputArray[i+1]:
a = InputArray[i+1]
InputArray[i+1] = InputArray[i]
InputArray[i] = a
if InputArray[i] <= InputArray[i-1]:
a = InputArray[i-1]
InputArray[i-1] = InputArray[i]
InputArray[i] = a
for i in range(len(InputArray)-2, 0, -1):
if InputArray[i] >= InputArray[i+1]:
a = InputArray[i+1]
InputArray[i+1] = InputArray[i]
InputArray[i] = a
if InputArray[i] <= InputArray[i-1]:
a = InputArray[i-1]
InputArray[i-1] = InputArray[i]
InputArray[i] = a
for i in range(1, len(InputArray)-2, 1):
if InputArray[i] >= InputArray[i+1]:
a = InputArray[i+1]
InputArray[i+1] = InputArray[i]
InputArray[i] = a
if InputArray[i] <= InputArray[i-1]:
a = InputArray[i-1]
InputArray[i-1] = InputArray[i]
InputArray[i] = a
for i in range(len(InputArray)-2, 0, -1):
if InputArray[i] >= InputArray[i+1]:
a = InputArray[i+1]
InputArray[i+1] = InputArray[i]
InputArray[i] = a
if InputArray[i] <= InputArray[i-1]:
a = InputArray[i-1]
InputArray[i-1] = InputArray[i]
InputArray[i] = a
for i in range(1, len(InputArray)-2, 1):
if InputArray[i] >= InputArray[i+1]:
a = InputArray[i+1]
InputArray[i+1] = InputArray[i]
InputArray[i] = a
if InputArray[i] <= InputArray[i-1]:
a = InputArray[i-1]
InputArray[i-1] = InputArray[i]
InputArray[i] = a
for i in range(len(InputArray)-2, 0, -1):
if InputArray[i] >= InputArray[i+1]:
a = InputArray[i+1]
InputArray[i+1] = InputArray[i]
InputArray[i] = a
if InputArray[i] <= InputArray[i-1]:
a = InputArray[i-1]
InputArray[i-1] = InputArray[i]
InputArray[i] = a
for i in range(1, len(InputArray)-2, 1):
if InputArray[i] >= InputArray[i+1]:
a = InputArray[i+1]
InputArray[i+1] = InputArray[i]
InputArray[i] = a
if InputArray[i] <= InputArray[i-1]:
a = InputArray[i-1]
InputArray[i-1] = InputArray[i]
InputArray[i] = a
#MergeSort(InputArray, 0, len(InputArray)-1)
#for i in range(0, 1000000, 1):
#InputArray = random.sample(range(1, 100), 20)
#MyBubbleSort(InputArray)
#for k in range(0, len(InputArray)-1, 1):
# if InputArray[k] > InputArray[k+1]:
#print "no !"
def MybinaryCalc():
A = [0, 0, 0, 1]
B = [0, 0, 1, 1]
C = [0] * (len(A)+1)
for i in range(len(A)-1, -1, -1):
if A[i] + B[i] + C[i+1] == 0:
C[i+1] = 0
elif A[i] + B[i] + C[i+1] == 1:
C[i+1] = 1
elif A[i] + B[i] + C[i+1] == 2:
C[i+1] = 0
C[i] = 1
elif A[i] + B[i] + C[i+1] == 3:
C[i+1] = 1
C[i] = 1
print C
MybinaryCalc() | {
"repo_name": "Arnukk/DAA",
"path": "main.py",
"copies": "1",
"size": "6606",
"license": "mit",
"hash": 1309539648214479400,
"line_mean": 27.6017316017,
"line_max": 50,
"alpha_frac": 0.4831970936,
"autogenerated": false,
"ratio": 2.983739837398374,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8953209253829679,
"avg_score": 0.002745535433739174,
"num_lines": 231
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.