text stringlengths 0 1.05M | meta dict |
|---|---|
__author__ = 'ASUS'
class GroupHelper:
def __init__(self, app):
self.app = app
def open_groups_page(self):
wd = self.app.wd
wd.find_element_by_link_text("groups").click()
def create(self, group):
self.open_groups_page()
self.init_group_creation()
self.fill_group_form(group)
self.submit_group_creation()
self.return_to_groups_page()
def init_group_creation(self):
wd = self.app.wd
wd.find_element_by_name("new").click()
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def fill_group_form(self, group):
wd = self.app.wd
self.change_field_value("group_name", group.name)
self.change_field_value("group_header", group.header)
self.change_field_value("group_footer", group.footer)
def submit_group_creation(self):
wd = self.app.wd
wd.find_element_by_name("submit").click()
def select_first_group(self):
wd = self.app.wd
wd.find_element_by_name("selected[]").click()
def delete_first_group(self):
wd = self.app.wd
self.open_groups_page()
self.select_first_group()
#submit deletion
wd.find_element_by_name("delete").click()
self.return_to_groups_page()
def modify_first_group(self, new_group_data):
wd = self.app.wd
self.open_groups_page()
self.select_first_group()
# open modification form
wd.find_element_by_name("edit").click()
# fill group form
self.fill_group_form(new_group_data)
# submit modification
wd.find_element_by_name("update").click()
self.return_to_groups_page()
def return_to_groups_page(self):
wd = self.app.wd
wd.find_element_by_link_text("group page").click() | {
"repo_name": "alen4ik/python_training",
"path": "fixture/group.py",
"copies": "1",
"size": "2048",
"license": "apache-2.0",
"hash": 8689464730519984000,
"line_mean": 29.5820895522,
"line_max": 63,
"alpha_frac": 0.59375,
"autogenerated": false,
"ratio": 3.4478114478114477,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9537289703086795,
"avg_score": 0.0008543489449305198,
"num_lines": 67
} |
__author__ = 'Asus'
__author__ = 'Asus'
from IClassifier import IClassifier
from utilities import load_stf
from glove import Glove
from scipy.spatial.distance import cosine
from scipy.spatial.distance import euclidean
from nltk.corpus import wordnet as wn
class GloveWordnetRetrofitClassifier(IClassifier):
def __init__(self, filename, no_d):
self.GloveInstance = Glove()
self.GloveInstance = load_stf(filename, no_d)
def answerQuestion(self,wordAskedFor,question,possibilities):
qWs = self.converter(wordAskedFor)
pWs = []
for i,p in enumerate(possibilities):
pWs.append([])
for pS in self.converter(p):
pWs[i].append(pS)
pVs = []
qVs = []
maxSim = 0
correct = -1
comment = ''
for i,pW in enumerate(pWs):
pVs.append([])
for p in pW:
try:
pVs[i].append(self.GloveInstance.word_vectors[self.GloveInstance.dictionary[p]])
except:
print p
for q in qWs:
try:
qVs.append(self.GloveInstance.word_vectors[self.GloveInstance.dictionary[q]])
except:
print q
for j,qV in enumerate(qVs):
for i,pVn in enumerate(pVs):
for k,pV in enumerate(pVn):
a = 1-cosine(qV,pV)
#a = 1/euclidean(qV,pV)
comment += '\n\t\t\tsim(' + qWs[j] + ',' + pWs[i][k] + ')=' +str(a)
if a>maxSim:
maxSim = a
correct = i
print correct
return (possibilities[correct],comment)
def converter(self,word):
wnFO = wn.abspath(wn._FILES[2]).open()
ret = []
for l in wnFO:
if l.split('%')[0].strip() == word.strip():
ret.append(l.split()[0])
if len(ret) == 0:
ret = [word+'%0:00:00::']
return ret
| {
"repo_name": "dudenzz/word_embedding",
"path": "SimilarityClassification/Classifiers/GloveWordnetRetrofitClassifier.py",
"copies": "1",
"size": "2075",
"license": "mit",
"hash": 7949070969346828000,
"line_mean": 32.0163934426,
"line_max": 100,
"alpha_frac": 0.4968674699,
"autogenerated": false,
"ratio": 3.6024305555555554,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4599298025455555,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Asus'
from IClassifier import IClassifier
from utilities import load_stf
from glove import Glove
from scipy.spatial.distance import cosine
from scipy.spatial.distance import euclidean
import numpy as np
class GloveClassifier(IClassifier):
def __init__(self,k):
self.GloveInstance = None
self.k = k+1
def average_knn_distance(self,word):
total = 0
for w in self.GloveInstance.most_similar(word,self.k):
total += w[1]
return total/self.k
def calculate_centroid(self):
total = np.zeros(300)
for w in self.GloveInstance.word_vectors:
total += w
self.centroid = total/len(self.GloveInstance.word_vectors)
def answerQuestion(self,wordAskedFor,question,possibilities):
qV = self.GloveInstance.word_vectors[self.GloveInstance.dictionary[wordAskedFor]]-self.centroid
pVs = []
maxSim = 0
correct = -1
comment = ''
distances = []
for p in possibilities:
print 'working'
pVs.append((0,self.GloveInstance.word_vectors[self.GloveInstance.dictionary[p]]-self.centroid))
for i,pV in enumerate(pVs):
a =1-cosine(qV,pV[1])
comment += '\n\t\t\tsim(' + wordAskedFor + ',' + possibilities[i] + ')=' +str(a)
if a>maxSim:
maxSim = a
correct = i
return (possibilities[correct],comment)
| {
"repo_name": "dudenzz/word_embedding",
"path": "SimilarityClassification/Classifiers/HubnessNormalizedGloveClassifier.py",
"copies": "1",
"size": "1448",
"license": "mit",
"hash": -2464367086936393000,
"line_mean": 34.2,
"line_max": 107,
"alpha_frac": 0.6139502762,
"autogenerated": false,
"ratio": 3.447619047619048,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45615693238190475,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Asus'
import time
from glove import Glove
import numpy as np
import io
def calculate_size(filename):
start = time.clock()
max = 0
with io.open(filename, 'r', encoding='utf-8') as savefile:
for i, line in enumerate(savefile):
if line.strip() == "":
continue;
if(max%100000==0):
print(max,'stamp',time.clock() - start)
max = max+1
return max
def load_stf(filename, no_d):
start = time.clock()
dct = {}
size = calculate_size(filename);
vectors = np.ndarray(shape=(size,no_d), dtype=float)
iter = 0
with io.open(filename, 'r', encoding='utf-8') as savefile:
for i, line in enumerate(savefile):
if (line.strip() == ""):
continue;
tokens = line.strip().split(' ')
if(iter%10000 == 0):
print(iter, size, 'stamp',time.clock() - start)
word = tokens[0]
try:
vectors[iter] = tokens[1:]
except:
print tokens
if len(tokens)==300:
word = ''
vectors[iter] = tokens
else:
raise
dct[word] = i
iter = iter+1
# Infer word vectors dimensions.
no_vectors = len(dct)
print('stampnv',time.clock() - start)
# Set up the model instance.
instance = Glove()
print('stampinst',time.clock() - start)
instance.no_components = size
print('stampnoc',time.clock() - start)
instance.word_vectors = vectors
print('stampwv',time.clock() - start)
instance.word_biases = np.zeros(no_vectors)
print('stampwb',time.clock() - start)
instance.add_dictionary(dct)
print('stampdict',time.clock() - start)
return instance
| {
"repo_name": "dudenzz/word_embedding",
"path": "SimilarityClassification/Utils/utilities.py",
"copies": "2",
"size": "1890",
"license": "mit",
"hash": -2085848145017212000,
"line_mean": 28.9836065574,
"line_max": 67,
"alpha_frac": 0.5142857143,
"autogenerated": false,
"ratio": 3.825910931174089,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5340196645474089,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Atash'
from selenium.webdriver.support.ui import Select
import os
from model.contact import Contact
from selenium.webdriver.common.by import By
import re
class ContatHelper:
def __init__(self, app):
self.app = app
def add_new_contact_page(self):
wd = self.app.wd
wd.find_element_by_link_text("add new").click()
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def fill_contact_form(self, contact):
wd = self.app.wd
# add first name
self.change_field_value("firstname", contact.firstname)
# add middle name
self.change_field_value("middlename", contact.middlename)
# add last name
self.change_field_value("lastname", contact.lastname)
# add nickname
self.change_field_value("nickname", contact.nickname)
# add photo
#wd.find_element_by_name("photo").send_keys(os.getcwd() + contact.photo_path)
# add title
self.change_field_value("title", contact.title)
# add company
self.change_field_value("company", contact.company)
# add company address
self.change_field_value("address", contact.company_address)
# add homephone
self.change_field_value("home", contact.home)
# add mobile number
self.change_field_value("mobile", contact.mobile_phone_num)
# add work number
self.change_field_value("work", contact.work_phone_num)
# add fax number
self.change_field_value("fax", contact.fax_num)
# add e-mail
self.change_field_value("email", contact.email1)
# add e-mail2
self.change_field_value("email2", contact.email2)
# add e-mail3
self.change_field_value("email3", contact.email3)
# add homepage
self.change_field_value("homepage", contact.homepage)
# select birthday
self.select_birthday(contact.birthday_d, contact.birthday_m, contact.birthday_y)
#select anniversary day
self.select_anniversaryday(contact.anniversary_d, contact.anniversary_m, contact.anniversary_y)
# add second address
self.change_field_value("address2", contact.second_address)
# add home phone 2
self.change_field_value("phone2", contact.second_home)
# add notes
self.change_field_value("notes", contact.notes)
def select_anniversaryday(self, day, month, year):
wd = self.app.wd
if day is not None:
Ann_Dropdownlist_day = Select(wd.find_element_by_name("aday"))
Ann_Dropdownlist_day.select_by_visible_text(day)
if month is not None:
# select anniversary month
Ann_Dropdownlist_month = Select(wd.find_element_by_name("amonth"))
Ann_Dropdownlist_month.select_by_visible_text(month)
if year is not None:
# select anniversary year
wd.find_element_by_name("ayear").send_keys(year)
def select_birthday(self, day, month, year):
wd = self.app.wd
if day is not None:
# select birthday day
Birthday_Dropdownlist_day = Select(wd.find_element_by_name("bday"))
Birthday_Dropdownlist_day.select_by_visible_text(day)
if month is not None:
# select birthday month
Birthday_Dropdownlist_month = Select(wd.find_element_by_name("bmonth"))
Birthday_Dropdownlist_month.select_by_visible_text(month)
if year is not None:
# select birthday year
wd.find_element_by_name("byear").send_keys(year)
def create(self, contact):
wd = self.app.wd
self.add_new_contact_page()
self.fill_contact_form(contact)
# submit adding new contact
wd.find_element_by_xpath("//div[@id='content']/form/input[21]").click()
self.return_home_page()
self.contact_cash = None
def select_contact_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
def delete_first_contact(self):
self.delete_contact_by_index(0)
def delete_contact_by_index(self, index):
wd = self.app.wd
self.open_home_page()
self.select_contact_by_index(index)
#submit deleting
wd.find_element_by_xpath("//div[@id='content']/form[2]/div[2]/input").click()
wd.switch_to_alert().accept()
self.return_home_page()
self.contact_cash = None
def modify_first_contact(self, new_contact):
self.modify_contact_by_index(new_contact, 0)
def modify_contact_by_index(self, new_contact, index):
wd = self.app.wd
self.open_home_page()
self.select_contact_by_index(index)
# click modify
wd.find_elements_by_css_selector('img[alt="Edit"]')[index].click()
self.fill_contact_form(new_contact)
#submit update
wd.find_element_by_name("update").click()
self.return_home_page()
self.contact_cash = None
def return_home_page(self):
wd = self.app.wd
wd.find_element_by_link_text("home").click()
def open_home_page(self):
wd = self.app.wd
if not wd.current_url.endswith("/addressbook/"):
wd.find_element_by_link_text("home").click()
def count(self):
wd = self.app.wd
return len(wd.find_elements_by_name("selected[]"))
contact_cash = None
def get_contact_list(self):
if self.contact_cash is None:
wd = self.app.wd
self.open_home_page()
self.contact_cash = []
for row in wd.find_elements_by_name("entry"):
cells = row.find_elements(By.TAG_NAME, "td")
last_name = cells[1].text
first_name = cells[2].text
all_phones = cells[5].text
all_mails = cells[4].text
address = cells[3].text
id = row.find_element_by_name("selected[]").get_attribute("value")
self.contact_cash.append(Contact(lastname=last_name, firstname=first_name, id = id,
company_address=address,
all_mails_from_home_page = all_mails,
all_phones_from_home_page=all_phones))
return list(self.contact_cash)
def open_modify_contact_by_index(self, index):
wd = self.app.wd
self.open_home_page()
row = wd.find_elements_by_name("entry")[index]
cell = row.find_elements(By.TAG_NAME, "td")[7]
cell.find_element(By.TAG_NAME, "a").click()
def open_contact_view_by_index(self, index):
wd = self.app.wd
self.open_home_page()
row = wd.find_elements_by_name("entry")[index]
cell = row.find_elements(By.TAG_NAME, "td")[6]
cell.find_element(By.TAG_NAME, "a").click()
def get_contact_info_from_edit_page(self, index):
wd = self.app.wd
self.open_modify_contact_by_index(index)
firstname = wd.find_element_by_name("firstname").get_attribute("value")
lastname = wd.find_element_by_name("lastname").get_attribute("value")
id = wd.find_element_by_name("id").get_attribute("value")
address = wd.find_element_by_name("address").get_attribute("value")
#e-mails
email1 = wd.find_element_by_name("email").get_attribute("value")
email2 = wd.find_element_by_name("email2").get_attribute("value")
email3 = wd.find_element_by_name("email3").get_attribute("value")
#phones
homephone = wd.find_element_by_name("home").get_attribute("value")
mobile_phone = wd.find_element_by_name("mobile").get_attribute("value")
work_phone = wd.find_element_by_name("work").get_attribute("value")
second_homephone = wd.find_element_by_name("phone2").get_attribute("value")
return Contact(firstname=firstname, lastname=lastname, id=id,
company_address=address,
email1=email1, email2=email2, email3=email3,
home=homephone, mobile_phone_num=mobile_phone,
work_phone_num=work_phone, second_home=second_homephone)
def get_contact_from_view_page(self, index):
wd = self.app.wd
self.open_contact_view_by_index(index)
text = wd.find_element_by_id("content").text
homephone = re.search("H: (.*)", text).group(1)
mobile_phone = re.search("M: (.*)", text).group(1)
work_phone = re.search("W: (.*)", text).group(1)
second_homephone = re.search("P: (.*)", text).group(1)
return Contact(home=homephone, mobile_phone_num=mobile_phone,
work_phone_num=work_phone, second_home=second_homephone) | {
"repo_name": "aalekperov/Task1",
"path": "fixture/contact.py",
"copies": "1",
"size": "9059",
"license": "apache-2.0",
"hash": 4447524462610200000,
"line_mean": 40.7511520737,
"line_max": 103,
"alpha_frac": 0.5984104206,
"autogenerated": false,
"ratio": 3.5567334118570866,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46551438324570865,
"avg_score": null,
"num_lines": null
} |
__author__ = 'atash'
from sys import maxsize
class Contact:
def __init__(self, firstname=None, middlename=None, lastname=None, nickname=None,
photo_path=None, title=None,
company=None, company_address=None, home=None,
all_phones_from_home_page=None,
mobile_phone_num=None, work_phone_num=None, fax_num=None,
all_mails_from_home_page=None,
email1=None, email2=None, email3=None, homepage=None,
birthday_d=None, birthday_m=None, birthday_y=None,
anniversary_d=None, anniversary_m=None, anniversary_y=None,
second_address=None, second_home=None, notes=None, id=None ):
self.firstname = firstname
self.middlename = middlename
self.lastname = lastname
self.nickname = nickname
self.photo_path = photo_path
self.title = title
self.company = company
self.company_address = company_address
self.home = home
self.mobile_phone_num = mobile_phone_num
self.work_phone_num = work_phone_num
self.fax_num = fax_num
self.email1 = email1
self.email2 = email2
self.email3 = email3
self.homepage = homepage
self.birthday_d = birthday_d
self.birthday_m = birthday_m
self.birthday_y = birthday_y
self.anniversary_d = anniversary_d
self.anniversary_m = anniversary_m
self.anniversary_y = anniversary_y
self.second_address = second_address
self.second_home = second_home
self.notes = notes
self.id = id
self.all_phones_from_home_page = all_phones_from_home_page
self.all_mails_from_home_page = all_mails_from_home_page
def __repr__(self):
return "%s:%s:%s:%s:%s:%s:%s:%s:%s:%s:%s:%s:%s:%s:%s:%s:%s:%s:%s:%s:%s:%s:%s:%s:%s" % (self.id, self.lastname, self.firstname, self.middlename, self.nickname,
self.title, self.company, self.company_address,
self.home, self.mobile_phone_num, self.work_phone_num, self.fax_num,
self.email1, self.email2, self.email3, self.homepage,
self.birthday_d, self.birthday_m, self.birthday_y,
self.anniversary_d, self.anniversary_m, self.anniversary_y,
self.second_address, self.second_home, self.notes)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id) \
and self.lastname == other.lastname \
and self.firstname == other.firstname
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize | {
"repo_name": "aalekperov/Task1",
"path": "model/contact.py",
"copies": "1",
"size": "3213",
"license": "apache-2.0",
"hash": 2958461026120925000,
"line_mean": 49.21875,
"line_max": 166,
"alpha_frac": 0.5004668534,
"autogenerated": false,
"ratio": 3.9764851485148514,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4976952001914851,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Atash'
class SessionHelper:
def __init__(self, app):
self.app = app
def login(self, username, password):
wd = self.app.wd
self.app.open_home_page()
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys(username)
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys(password)
wd.find_element_by_css_selector('input[type="submit"]').click()
def logout(self):
wd = self.app.wd
wd.find_element_by_link_text("Logout").click()
def is_logged_in(self):
wd = self.app.wd
return len(wd.find_elements_by_link_text("Logout")) > 0
def is_logged_in_as(self, username):
wd = self.app.wd
return self.get_logged_user() == username
def get_logged_user(self):
wd = self.app.wd
return wd.find_element_by_xpath("//div/div[1]/form/b").text[1:-1]
def ensure_logout(self):
wd = self.app.wd
if self.is_logged_in():
self.logout()
def ensure_login(self, username, password):
wd = self.app.wd
if self.is_logged_in():
if self.is_logged_in_as(username):
return
else:
self.logout()
self.login(username,password)
| {
"repo_name": "aalekperov/Task1",
"path": "fixture/session.py",
"copies": "1",
"size": "1425",
"license": "apache-2.0",
"hash": -1768377598208185000,
"line_mean": 29.3191489362,
"line_max": 73,
"alpha_frac": 0.5649122807,
"autogenerated": false,
"ratio": 3.3608490566037736,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44257613373037735,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Atash'
from model.group import Group
class GroupHelper:
def __init__(self, app):
self.app = app
def open_groups_page(self):
wd = self.app.wd
if not (wd.current_url.endswith("/group.php") and len(wd.find_elements_by_name("new")) > 0):
wd.find_element_by_link_text("groups").click()
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def fill_group_form(self, group):
wd = self.app.wd
self.change_field_value("group_name", group.name)
self.change_field_value("group_header", group.header)
self.change_field_value("group_footer", group.footer)
def create(self, group):
wd = self.app.wd
self.open_groups_page()
# group creation
wd.find_element_by_name("new").click()
self.fill_group_form(group)
# submit group creation
wd.find_element_by_name("submit").click()
# return to group page
self.return_to_groups_page()
self.group_cash = None
def select_group_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
def delete_first_group(self):
self.delete_group_by_index(0)
def delete_group_by_index(self, index):
wd = self.app.wd
self.open_groups_page()
self.select_group_by_index(index)
#delete group
wd.find_element_by_name("delete").click()
# return to group page
self.return_to_groups_page()
self.group_cash = None
def modify_first_group(self, index, new_group):
self.modify_group_by_index(index, new_group)
def modify_group_by_index(self, index, new_group):
wd = self.app.wd
self.open_groups_page()
self.select_group_by_index(index)
# click edit button
wd.find_element_by_name("edit").click()
#modify group form
self.fill_group_form(new_group)
#submit modify
wd.find_element_by_name("update").click()
# return to group page
self.return_to_groups_page()
self.group_cash = None
def return_to_groups_page(self):
wd = self.app.wd
wd.find_element_by_link_text("group page").click()
def count(self):
wd = self.app.wd
self.open_groups_page()
return len(wd.find_elements_by_name("selected[]"))
group_cash = None
def get_group_list(self):
if self.group_cash is None:
wd = self.app.wd
self.open_groups_page()
self.group_cash = []
for element in wd.find_elements_by_css_selector('span.group'):
text = element.text
id = element.find_element_by_name("selected[]").get_attribute("value")
self.group_cash.append(Group(name = text, id = id))
return list(self.group_cash) | {
"repo_name": "aalekperov/Task1",
"path": "fixture/group.py",
"copies": "1",
"size": "3094",
"license": "apache-2.0",
"hash": 7817131440786895000,
"line_mean": 31.9255319149,
"line_max": 100,
"alpha_frac": 0.5875888817,
"autogenerated": false,
"ratio": 3.5039637599093996,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9576308068605643,
"avg_score": 0.0030489146007514015,
"num_lines": 94
} |
__author__ = 'ataylor'
from pandas import DataFrame
import numpy as np
import warnings
def create_sounding_df(data=None, index=None, columns=None, dtype=None, copy=False):
df = DataFrame(data, index, columns, dtype, copy)
if 'pressure' not in df.columns:
df['pressure'] = np.nan
warnings.warn('warning no pressure data present')
else:
df.set_index('pressure', inplace=True)
if 'temperature' not in df.columns:
df['temperature'] = np.nan
if 'dew_point' not in df.columns:
df['dew_point'] = np.nan
if 'wind_dir' not in df.columns:
df['wind_dir'] = np.nan
if 'wind_speed' not in df.columns:
df['wind_speed'] = np.nan
return df
def line_generator(filename):
"""Generate a line of data from the wyo text file.
At the moment only accepts numerical lines of 11 and
the returned values are pres, Height, T, Td, WindDir, WindSpeed
"""
with open(filename) as sound_file:
good = True
for line in sound_file:
if line.startswith('-'):
continue
split = line.split()
if len(split) == 0:
continue
try:
vals = [float(x) for x in split]
except ValueError:
continue
if len(vals) != 11:
continue
yield [vals[0], vals[1], vals[2], vals[3], vals[6], vals[7]]
def from_wyo_text(filename):
gen = line_generator(filename)
sound = create_sounding_df(list(gen), columns=['pressure', 'height', 'temperature',
'dew_point', 'wind_dir', 'wind_speed'])
return sound
| {
"repo_name": "aktaylor08/skewtpy",
"path": "skewtpy/profile.py",
"copies": "1",
"size": "1706",
"license": "apache-2.0",
"hash": 964827880890642000,
"line_mean": 27.9152542373,
"line_max": 93,
"alpha_frac": 0.5603751465,
"autogenerated": false,
"ratio": 3.8080357142857144,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48684108607857146,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Atef Bellaaj'
__author__ = 'Bellaaj'
import collections
import nltk.metrics
import nltk.classify.util
from nltk.classify import NaiveBayesClassifier
from nltk.corpus import movie_reviews
neg_ids = movie_reviews.fileids('neg')
pos_ids = movie_reviews.fileids('pos')
from nltk.corpus import stopwords
stopset = set(stopwords.words('english'))
def stopword_filtered_word_feats(words):
return dict([(word, True) for word in words if word not in stopset])
neg_feats = [(stopword_filtered_word_feats(movie_reviews.words(fileids=[f])), 'neg') for f in neg_ids]
pos_feats = [(stopword_filtered_word_feats(movie_reviews.words(fileids=[f])), 'pos') for f in pos_ids]
neg_limit = len(neg_feats)*3/4
pos_limit = len(pos_feats)*3/4
trainfeats = neg_feats[:neg_limit] + pos_feats[:pos_limit]
testfeats = neg_feats[neg_limit:] + pos_feats[pos_limit:]
print 'train on %d instances, test.ipynb on %d instances' % (len(trainfeats), len(testfeats))
print neg_feats[1]
classifier = NaiveBayesClassifier.train(trainfeats)
import pickle
f = open('stop_word_classifier.pickle', 'wb')
pickle.dump(classifier, f)
f.close()
print 'accuracy:', nltk.classify.util.accuracy(classifier, testfeats)
classifier.show_most_informative_features()
refsets = collections.defaultdict(set)
testsets = collections.defaultdict(set)
for i, (feats, label) in enumerate(testfeats):
refsets[label].add(i)
observed = classifier.classify(feats)
testsets[observed].add(i)
print 'pos precision:', nltk.metrics.precision(refsets['pos'], testsets['pos'])
print 'pos recall:', nltk.metrics.recall(refsets['pos'], testsets['pos'])
print 'pos F-measure:', nltk.metrics.f_measure(refsets['pos'], testsets['pos'])
print 'neg precision:', nltk.metrics.precision(refsets['neg'], testsets['neg'])
print 'neg recall:', nltk.metrics.recall(refsets['neg'], testsets['neg'])
print 'neg F-measure:', nltk.metrics.f_measure(refsets['neg'], testsets['neg'])
| {
"repo_name": "HaythemSahbani/Web-mining-university-project",
"path": "src/3-trained-classifier/Polarity_stop_words.py",
"copies": "1",
"size": "1937",
"license": "mit",
"hash": -5206195233096113000,
"line_mean": 34.2181818182,
"line_max": 102,
"alpha_frac": 0.7320598864,
"autogenerated": false,
"ratio": 3.149593495934959,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9363980031770655,
"avg_score": 0.0035346701128608836,
"num_lines": 55
} |
__author__ = 'athira'
import cv2
# Load an image and read it
img1 = cv2.imread('cat.jpg') #load the image
img2 = cv2.imread('cat.jpg', 0) #load the image in gray scale
cv2.imshow('image1', img1)
cv2.waitKey(0)
cv2.namedWindow('image2', cv2.WINDOW_NORMAL) # There is a special case where you can already create a window and
# load image to it later. In that case, you can specify whether window is resizable or not.
# It is done with the function cv2.namedWindow().
# By default, the flag is cv2.WINDOW_AUTOSIZE.
# But if you specify flag to be cv2.WINDOW_NORMAL, you can resize window.
# It will be helpful when image is too large in dimension and adding track bar to windows.
cv2.imshow('image2', img2)
cv2.waitKey(0)
cv2.destroyAllWindows()
# Write an Image
cv2.imwrite('catgray.png', img2)
#sum up version
cv2.imshow('image3', img1)
k = cv2.waitKey(0)
if k == 10: # wait for ESC key to exit
cv2.destroyAllWindows()
elif k == ord('s'): # wait for 's' key to save and exit
cv2.imwrite('meow.png', img1)
cv2.destroyAllWindows()
| {
"repo_name": "Farious/PersonTracker",
"path": "Source/readImage.py",
"copies": "1",
"size": "1053",
"license": "apache-2.0",
"hash": -7623888780767886000,
"line_mean": 30.9090909091,
"line_max": 113,
"alpha_frac": 0.7113010446,
"autogenerated": false,
"ratio": 2.908839779005525,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4120140823605525,
"avg_score": null,
"num_lines": null
} |
import os
import json
def make_standalone(fig):
'''This is used to wrap a fig with tex headers
'''
header = '\\documentclass[border=3mm]{standalone}\n'
header += '\\usepackage{pgfplots}\n'
header += '\\pgfplotsset{compat=newest}\n'
header += '\\pagestyle{empty}\n'
header += '\\begin{document}\n'
header += '\\begin{tikzpicture}\n'
end = '\\end{tikzpicture}\n'
end += '\\end{document}'
return header + fig + end
class PlotSet:
def __init__(self, name):
self.ps_name = name
self.nl = '&%&'
self.items = []
def name(self):
return self.ps_name
def set_one(self, item):
self.items.append(str(item))
def set_two(self, key, value):
self.items.append(str(key) + ' = ' + str(value))
def set_list(self, key, value):
ns = '{'
for item in value:
ns += '{' + str(item) + '}, '
ns += '}'
self.items.append(str(key) + ' = ' + str(ns))
def set_dict(self, key, dict):
ns = '{'
for k, v in dict.items():
ns += str(k) + '=' + str(v) + ', '
ns += '}'
self.items.append(str(key) + ' = ' + str(ns))
def __generate_json_str(self):
res = '\\pgfplotsset{' + self.ps_name + '/.style={%' + self.nl
for item in self.items:
res += '\t' +str(item) + ',' + self.nl
res += '}}'
return res
def output_to_fp(self, fp):
content = ''
tag = '%content:'
if os.path.exists(fp):
content = open(fp).read()
pses =''
for line in content.splitlines():
if not line.startswith(tag):
continue
pses = line[len(tag):]
ps = {}
if len(pses) != 0:
print pses
ps = json.loads(pses)
ps[self.ps_name] = self.__generate_json_str()
##output to fp
fh = open(fp, 'w')
fh.write(tag + json.dumps(ps))
fh.write('\n')
for k, v in ps.items():
s = v.replace(self.nl, '\n')
fh.write(s)
fh.write('\n')
fh.close()
class PlotBase:
def __init__(self):
self.hs = '\\begin{axis}'
self.plotset = None
self.es = '\\end{axis}\n'
self.legends = []
self.plots = []
self.styles = ''
self.addplot_sufix = ''
self.annotations = ''
def config(self, ps):
self.plotset = ps
def append_style(self, style):
self.styles += style.strip()
def add_annotation(self, ann):
self.annotations += ann + '\n'
def plot_sufix(self, sufix):
self.addplot_sufix = sufix
def dump(self):
rs = ''
rs += self.hs
has_ps_style = False
if (not self.plotset is None) or len(self.styles) != 0:
has_ps_style = True
if has_ps_style:
rs += '['
if not self.plotset is None:
rs += self.plotset.name()
if len(self.styles) != 0:
rs += ', '
if len(self.styles) != 0:
rs += self.styles
if has_ps_style:
rs += ']'
rs += ' \n'
for p in self.plots:
rs += p
if len(self.annotations) != 0:
rs += self.annotations + '\n'
if len(self.legends) != 0:
rs += '\\legend{'
for l in self.legends :
rs += l + ','
if len(self.legends) != 0:
rs = rs.strip(',') + '}\n'
rs += self.es
return rs
class Plot(PlotBase):
def __init__(self):
PlotBase.__init__(self)
def addplot(self, data, xfunc, yfunc, style='', legend=''):
if len(data) == 0:
return
s = '\\addplot '
if len(style) != 0:
s += '[ ' + style + ' ]'
s += ' plot coordinates {\n'
for d in data:
s += '(' + str(xfunc(d)) + ', ' + str(yfunc(d)) + ')\n'
s += '};'
self.plots.append(s)
if len(legend) != 0:
self.legends.append(legend)
class CDFPlot(PlotBase):
def __init__(self):
PlotBase.__init__(self)
def addplot(self, data, style='', legend=''):
'''@data - should be a data set'''
if len(data) == 0:
return
nd = []
delta = 100.0 / len(data)
md = {}
for d in data:
if not md.has_key(d):
md[d] = 0
md[d] += 1
nd.append((0, 0))
last = 0.0
for k, v in sorted(md.items(), key=lambda d:d[0]):
last += v * delta
nd.append((k, last))
s = '\\addplot '
if len(style) != 0:
s += '[ ' + style + ' ]'
s += ' plot coordinates {\n'
for d in nd:
s += '(' + str(d[0]) + ', ' + str(d[1]) + ')\n'
s += '}'
if len(self.addplot_sufix) == 0:
s += self.addplot_sufix
s+= ';'
self.plots.append(s)
if len(legend) != 0:
self.legends.append(legend)
if __name__ == '__main__':
p = Plot()
import math
p.addplot(range(0, 1000), lambda x: x, lambda x : round(math.sin(x*math.pi/180), 2))
s = p.dump()
s = make_standalone(s)
import os
if not os.path.exists('tt'):
os.mkdir('tt')
open('tt/xx.tex', 'w').write(s)
| {
"repo_name": "AthrunArthur/pypgf",
"path": "pgf.py",
"copies": "1",
"size": "5399",
"license": "mit",
"hash": -1757050749636263700,
"line_mean": 23.5409090909,
"line_max": 88,
"alpha_frac": 0.4517503241,
"autogenerated": false,
"ratio": 3.399874055415617,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4351624379515617,
"avg_score": null,
"num_lines": null
} |
__author__ = 'at'
from app import AfricasTalkingGateway, AfricasTalkingGatewayException
from app import settings
from app import logging
from urllib import urlencode
import requests
# redis queue
from rq.decorators import job
from redis import Redis
redis_conn = Redis()
from app.lib.witty import ask_wit
class FetchUrl(object):
"""
Form urls, parse and
"""
def __init__(self, base_url, metric, username, apikey, granularity, start_date, end_date):
"""
:param base_url:
:param metric:
:param username:
:param apikey:
:param granularity:
:param start_date:
:param end_date:
:return: url object
"""
self.base_url = base_url
self.metric = metric
self.username = username
self.apikey = apikey
self.granularity = granularity
self.start_date = start_date
self.end_date = end_date
def form_url(self):
query_args = {'granularity': self.granularity, 'startDate': self.start_date,
'endDate': self.end_date, 'metric': self.metric, 'username': self.username}
_url = urlencode(query_args)
return self.base_url + _url
def get_apikey(self):
return self.apikey
class MakeRequests(object):
"""
Some request helper classes
"""
def __init__(self, url, apikey, method='GET'):
"""
:param url:
:param apikey:
:param method:
:return: None
"""
self.method = method
self.url = url
self.apikey = apikey
def send_(self):
"""
:param self
:return: response object
"""
if self.method is 'GET':
headers = {'apikey': self.apikey}
r = requests.get(self.url, headers=headers)
return r
@job('high', connection=redis_conn, timeout=5)
def consume_call(from_, to):
api = AfricasTalkingGateway(apiKey_=settings.api_key, username_=settings.username)
try:
api.call(from_, to)
except AfricasTalkingGatewayException:
logging.warning("call init failed")
@job('high', connection=redis_conn, timeout=5)
def get_witty_intent(text):
try:
intent = ask_wit(text)
# pull game info (game info for scores and results)
return intent
except Exception as e:
logging.error("call init failed", e)
# send_message job | {
"repo_name": "ianjuma/octopus",
"path": "app/api/_utils.py",
"copies": "2",
"size": "2423",
"license": "apache-2.0",
"hash": -7775597148978341000,
"line_mean": 24.5157894737,
"line_max": 97,
"alpha_frac": 0.5984316962,
"autogenerated": false,
"ratio": 3.8278041074249605,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0013574386993629559,
"num_lines": 95
} |
__author__ = 'AT'
from GPy.util.linalg import mdot
from mog_diag import MoG_Diag
from scipy.misc import logsumexp
from util import log_diag_gaussian
import numpy as np
from savigp import SAVIGP
class SAVIGP_Diag(SAVIGP):
"""
Implementation of the SAVIGP model in the case that posterior is the mixture of diagonal Gaussians.
"""
def __init__(self, X, Y, num_inducing, num_mog_comp, likelihood, kernels, n_samples, config_list,
latent_noise, is_exact_ell, inducing_on_Xs, n_threads=1, image=None, partition_size=3000):
super(SAVIGP_Diag, self).__init__(X, Y, num_inducing, num_mog_comp, likelihood,
kernels, n_samples, config_list, latent_noise, is_exact_ell,
inducing_on_Xs, n_threads, image, partition_size)
def _get_mog(self):
return MoG_Diag(self.num_mog_comp, self.num_latent_proc, self.num_inducing)
def _dell_ds(self, k, j, cond_ll, A, sigma_kj, norm_samples):
s = self._average(cond_ll, (np.square(norm_samples) - 1) / sigma_kj[k, j], True)
return (mdot(s, np.square(A[j])) * self.MoG.pi[k] / 2.)
def update_N_z(self):
self.log_z = np.zeros((self.num_mog_comp))
self.log_N_kl = np.zeros((self.num_mog_comp, self.num_mog_comp))
for k in range(self.num_mog_comp):
for l in range(self.num_mog_comp):
for j in range(self.num_latent_proc):
self.log_N_kl[k, l] += log_diag_gaussian(self.MoG.m[k, j], self.MoG.m[l, j],
logsumexp(
[self.MoG.log_s[k, j, :], self.MoG.log_s[l, j, :]],
axis=0))
self.log_z[k] = logsumexp(self.log_N_kl[k, :] + np.log(self.MoG.pi))
def _update(self):
self.update_N_z()
SAVIGP._update(self)
def mdot_Aj(self, Ajn, Kxnz):
return Ajn[0] * Ajn[0]
def _d_ent_d_m(self):
dent_dm = np.empty((self.num_mog_comp, self.num_latent_proc, self.num_inducing))
for k in range(self.num_mog_comp):
for j in range(self.num_latent_proc):
dent_dm[k, j, :] = self._d_ent_d_m_kj(k, j)
return dent_dm
def _d_ent_d_pi(self):
pi = np.empty(self.num_mog_comp)
for k in range(self.num_mog_comp):
pi[k] = -self.log_z[k]
for l in range(self.num_mog_comp):
pi[k] -= self.MoG.pi[l] * (np.exp(self.log_N_kl[k, l] - self.log_z[l]))
return pi
def _d_ent_d_S_kj(self, k, j):
"""
Calculates gradient of the entropy term of ELBO wrt to the posterior covariance for component ``k`` and latent
process ``j``. The returned gradient will be in the raw space.
"""
s_k = np.zeros(self.MoG.S_dim())
for l in range(self.num_mog_comp):
s_k += self.MoG.pi[k] * self.MoG.pi[l] * (np.exp(self.log_N_kl[l, k] - self.log_z[k]) +
np.exp(self.log_N_kl[l, k] - self.log_z[l])) * \
self.MoG.C_m_C(j, k, l)
return 1. / 2 * s_k
def _d_ent_d_S(self):
r"""
Calculated gradient of the entropy term of ELBO wrt to the posterior covariance.
Returns
-------
ds : ndarray
dent \\ ds. Gradients will be in the raw space. Dimensions : K * Q * ``self.MoG.S_dim()``
"""
dent_ds = np.empty((self.num_mog_comp, self.num_latent_proc) + self.MoG.S_dim())
for k in range(self.num_mog_comp):
for j in range(self.num_latent_proc):
dent_ds[k, j] = self._d_ent_d_S_kj(k, j)
return dent_ds
def _transformed_d_ent_d_S(self):
return (self._d_ent_d_S()).flatten()
def _l_ent(self):
return -np.dot(self.MoG.pi, self.log_z)
| {
"repo_name": "adezfouli/savigp",
"path": "GP/savigp_diag.py",
"copies": "1",
"size": "3962",
"license": "apache-2.0",
"hash": -8126128708674861000,
"line_mean": 39.8453608247,
"line_max": 118,
"alpha_frac": 0.5254921757,
"autogenerated": false,
"ratio": 2.954511558538404,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.891484616701965,
"avg_score": 0.013031513443750921,
"num_lines": 97
} |
__author__ = 'AT'
from GPy.util.linalg import mdot
import numpy as np
class MoG:
"""
A generic class for representing posterior distribution. MoG stands for mixture of Gaussians, and this class is
suitable when posterior is a mixture of Gaussians. The posterior distribution is as follows:
q(u) = \sum_K \mult_Q N(u, m[k,j], s[k,j])
"""
def __init__(self, num_comp, num_process, num_dim):
"""
:param num_comp: number of components
:param num_process: number of latent processes
:param num_dim: dimensionality of each Gaussian.
:return: None
"""
self.num_comp = num_comp
self.num_process = num_process
self.num_dim = num_dim
self.m = []
self.pi = []
self.parameters = []
def __str__(self):
return 'm:' + str(self.m) + '\n' + 's:' + str(self.s) + '\n' + 'pi:' + str(self.pi)
def update_parameters(self, params):
"""
Sets parameters of the posterior distribution.
Parameters
----------
params : ndarray
an array of size = self.get_m_size() + self.get_s_size() + self.num_ comp, which will be used to update
parameters
"""
self.parameters = params
self.m_from_array(params[:self.get_m_size()])
self.s_from_array(params[self.get_m_size():(self.get_m_size() + self.get_s_size())])
self.pi_from_array(params[(self.get_m_size() + self.get_s_size()):])
self._update()
def pi_dim(self):
"""
:return: number of components (dimensionality of the ``pi`` array)
"""
return self.num_comp
def m_dim(self):
"""
:return: dimensionality of the mean of distributions
"""
return self.num_comp, self.num_process, self.num_dim
def _fixed_init(self):
"""
Initializes posterior distributions using fixed numbers
:return: None
"""
self.m = np.random.uniform(low=0.0, high=0.0, size=(self.num_comp, self.num_process, self.num_dim))
self.pi_from_array(np.random.uniform(low=1.0, high=5.0, size=self.num_comp))
def transform_S_grad(self, g):
r"""
transforms gradients of of ``s`` to be in the original space, i.e., space of the values the was used
in ``updata_parameters``. Assume:
g = df \\ dS, where S is the posterior covariance,
then this function returns:
:returns: df \\ dL, where L the representation of the parameter in the raw space.
"""
raise NotImplementedError
def _random_init(self):
"""
Initialised posterior parameters randomly
:return:
"""
self.m = np.random.uniform(low=-15.1, high=15.1, size=(self.num_comp, self.num_process, self.num_dim))
self.pi_from_array(np.random.uniform(low=1.0, high=10.0, size=self.num_comp))
def random_init(self):
self._random_init()
self._update()
def pi_from_array(self, p):
"""
Builds ``p`` (weight of each component) from an unconstrained array.
Parameters
----------
p : ndarray
an array of size num_comp
"""
pis = np.exp(p)
self.pi = pis / sum(pis)
self.pi_untrans = p.copy()
def dpi_dx(self):
pit = np.repeat(np.array([self.pi.T]), self.num_comp, 0)
return pit * (-pit.T + np.eye(self.num_comp))
def transform_pi_grad(self, p):
"""
Returns gradient of the ``p`` array wrt to the untransformed parameters, i.e., the parameters that will be exposed
to the optimiser.
Parameters
----------
p : ndarray
input array to calculate its gradient
"""
return mdot(p, self.dpi_dx())
def get_m_size(self):
"""
:return: total size of the array containing mean of the posterior for all components and processes
"""
return self.num_comp * self.num_process * self.num_dim
def update_mean(self, j, mj):
"""
Update mean of the latent process ``j`` using ``mj`` for all components.
:param j:
the latent process to update
:param mj:
the mean used to update
:return: None
"""
for k in range(self.num_comp):
self.m[k, j, :] = mj.copy()
self._update()
def update_covariance(self, j, Sj):
""" updates covariance matrix j using Sj """
raise NotImplementedError
def num_parameters(self):
""" returns number of free parameters of a model """
raise NotImplementedError
def get_s_size(self):
""" returns the size of an array needed to represent the posterior covariance when flattened. For example,
in the case of diagonal covariance the size will be K * Q * M (K : number of components; Q : number of latent processes;
M : number of inducing points)"""
raise NotImplementedError
def S_dim(self):
""" dimensionality of nonzero elements in the covariance matrix (e.g., M in the case of diagonal posterior
and M^2 in the case of full posterior covariance) """
raise NotImplementedError
def m_from_array(self, ma):
""" initializes the mean from ma"""
raise NotImplementedError
def get_sjk_size(self):
""" returns number of elements needed to represent covariance matrix of each component and each latent process"""
raise NotImplementedError
def s_from_array(self, sa):
""" initializes the covariance matrix from ``sa``. Note that ``sa`` is in the raw space, ie., it is coming directly
form the optimiser"""
raise NotImplementedError
def log_pdf(self, j, k, l):
""" :return N_j(m_k|m_l, S_l + S_k)"""
raise NotImplementedError
def tr_AinvS(self, L, k, j):
"""
Assuming that ``L`` is the Cholesky decomposition of A
:return trace(A^-1 s[k,j]) """
raise NotImplementedError
def tr_AS(self, A, k, j):
"""
:return: trace (A s[k,j])
"""
raise NotImplementedError
def aSa(self, a, k, j):
""" :return a s[k,j] a"""
raise NotImplementedError
def mmTS(self, k, j):
""" :return m_kj m_kj^T + s_kj """
raise NotImplementedError
def dAinvS_dS(self, L, k, j):
r"""
Assuming ``L`` = chol (A), then this function calculates dA^{-1}s[k,j] \\ ds[k,j] and transforms the results to the
raw space i.e., ready for exposing to the optimiser"""
raise NotImplementedError
def dAS_dS(self, L, k, j):
""" :return dA^{-1}S dS """
raise NotImplementedError
def Sa(self, a, k, j):
""" :return S_kj a """
raise NotImplementedError
def _update(self):
""" updates internal variables of the class """
pass
def get_m_S_params(self):
"""
Returns a tuple (m, s), which contains mean and covariance matrix of the posterior, which can be used for
example by the optimize to evaluate the amount of change in posterior parameters.
"""
raise NotImplementedError
| {
"repo_name": "adezfouli/savigp",
"path": "GP/mog.py",
"copies": "1",
"size": "7239",
"license": "apache-2.0",
"hash": 6457086204631298000,
"line_mean": 30.6113537118,
"line_max": 128,
"alpha_frac": 0.577980384,
"autogenerated": false,
"ratio": 3.8,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4877980384,
"avg_score": null,
"num_lines": null
} |
__author__ = 'AT'
from savigp import SAVIGP
from GPy.util.linalg import mdot
from mog_single_comp import MoG_SingleComponent
import numpy as np
class SAVIGP_SingleComponent(SAVIGP):
"""
Implementation of SAVIGP in the case that the posterior covariance is full, and the mixture has only one component.
"""
def __init__(self, X, Y, num_inducing, likelihood, kernels, n_samples,
config_list, latent_noise, is_exact_ell, inducing_on_Xs, n_threads =1, image=None, partition_size=3000):
super(SAVIGP_SingleComponent, self).__init__(X, Y, num_inducing, 1, likelihood,
kernels, n_samples, config_list, latent_noise,
is_exact_ell, inducing_on_Xs, n_threads, image, partition_size)
def _dell_ds(self, k, j, cond_ll, A, sigma_kj, norm_samples):
return mdot(A[j].T * self._average(cond_ll, (norm_samples**2 - 1)/sigma_kj[k,j], True), A[j]) \
* self.MoG.pi[k] / 2.
# a bit less memory intensive
# return np.einsum('i,ij,ki->jk', mdot(self.normal_samples[j,:]**2 - 1, cond_ll / sigma_kj[k,j])
# , A[j], A[j].T) * self.MoG.pi[k] / n_sample / 2.
# a bit faster but high memory
# return mdot(self.normal_samples[j,:]**2 - 1, cond_ll / sigma_kj[k,j]
# , np.einsum('ij,ki->ijk', A[j], A[j].T)) * self.MoG.pi[k] / n_sample / 2.
def init_mog(self, init_m):
super(SAVIGP_SingleComponent, self).init_mog(init_m)
for j in range(self.num_latent_proc):
self.MoG.update_covariance(j, self.Kzz[j])
def _update(self):
self.update_N_z()
SAVIGP._update(self)
def mdot_Aj(self, Ajn, Kxnz):
return mdot(Ajn.T, Ajn)
def _get_mog(self):
return MoG_SingleComponent(self.num_latent_proc, self.num_inducing)
def _d_ent_d_m(self):
return np.zeros((self.num_mog_comp, self.num_latent_proc, self.num_inducing))
def _d_ent_d_pi(self):
return -self.log_z[0] - 1
def _l_ent(self):
return -np.dot(self.MoG.pi, self.log_z)
def update_N_z(self):
self.log_z = np.zeros((self.num_mog_comp))
for j in range(self.num_latent_proc):
self.log_z[0] += self.MoG.log_pdf(j, 0, 0)
def _transformed_d_ent_d_S(self):
r"""
In the case of posterior distribution with a single component, the gradients of the entropy term wrt to the
Cholesky decomposition of the posterior covariance (L) is an identity matrix, i.e., \n
dEntropy \\ dL = I
Therefore this function returns a flatten identity matrix.
"""
return self.MoG.transform_eye_grad()
| {
"repo_name": "adezfouli/savigp",
"path": "GP/savigp_single_comp.py",
"copies": "1",
"size": "2840",
"license": "apache-2.0",
"hash": 5281746222936914000,
"line_mean": 39.5714285714,
"line_max": 125,
"alpha_frac": 0.5654929577,
"autogenerated": false,
"ratio": 3.1243124312431245,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9179709312556048,
"avg_score": 0.0020192152774153615,
"num_lines": 70
} |
__author__ = 'AT'
from scipy.linalg import cho_solve, solve_triangular
from mog import MoG
from GPy.util.linalg import mdot
import math
import numpy as np
from util import chol_grad, pddet, jitchol, tr_AB
class MoG_SingleComponent(MoG):
"""
Implementation of posterior distribution when it is a single component Gaussian distribution with full
covariance matrix.
Attributes
----------
L : ndarray
Cholesky decomposition of the covariance matrix
"""
def __init__(self, num_process, num_dim):
MoG.__init__(self, 1, num_process, num_dim)
self.invC_klj = np.empty((self.num_comp, self.num_comp, self.num_process, self.num_dim, self.num_dim))
self.m = []
self.pi = []
self.L_flatten = np.empty((self.num_comp, self.num_process, self.get_sjk_size()))
self.s = np.empty((self.num_comp, self.num_process, self.num_dim, self.num_dim))
self.L = np.empty((self.num_comp, self.num_process, self.num_dim, self.num_dim))
self.log_det = np.empty((self.num_comp, self.num_comp, self.num_process))
self._fixed_init()
self._update()
self.num_free_params = self.parameters.shape[0]
def log_pdf(self, j, k, l):
return -((self.s[0, j, :, :].shape[0])/2) * (math.log(2 * math.pi) + math.log(2.0)) - \
0.5 * pddet(self.L[0,j,:])
def aSa(self, a, k, j):
return np.diagonal(mdot(a, self.s[k,j,:,:], a.T))
def mmTS(self, k, j):
return mdot(self.m[k,j,:,np.newaxis], self.m[k,j,:,np.newaxis].T) + self.s[k,j]
def Sa(self, a, k, j):
return mdot(self.s[k,j], a)
def transform_eye_grad(self):
"""
In the case of posterior distribution with one component, gradients of the
entropy term wrt to the posterior covariance is identity. This function returns flatten lower-triangular terms
of the identity matrices for all processes.
"""
grad = np.empty((self.num_comp, self.num_process, self.get_sjk_size()))
meye = np.eye((self.num_dim))[np.tril_indices_from(self.L[0,0])]
for k in range(self.num_comp):
for j in range(self.num_process):
grad[k,j] = meye
return grad.flatten()
def get_parameters(self):
return np.hstack([self.m.flatten(), self.L_flatten.flatten(), self.pi_untrans])
def update_covariance(self, j, Sj):
Sj = Sj.copy()
mm = min(Sj[np.diag_indices_from(Sj)])
if mm < 0:
Sj[np.diag_indices_from(Sj)] = Sj[np.diag_indices_from(Sj)] - 1.1 * mm
for k in range(self.num_comp):
self.s[k,j] = Sj.copy()
self.L[k,j] = jitchol(Sj,10)
tmp = self.L[k,j].copy()
tmp[np.diag_indices_from(tmp)] = np.log(tmp[np.diag_indices_from(tmp)])
self.L_flatten[k,j] = tmp[np.tril_indices_from(tmp)]
self._update()
def num_parameters(self):
return self.num_free_params
def _fixed_init(self):
MoG._fixed_init(self)
for k in range(self.num_comp):
for j in range(self.num_process):
self.L_flatten[k,j,:] = np.random.uniform(low=1.0, high=1.0, size=self.get_sjk_size())
def _random_init(self):
MoG._random_init(self)
# self.m = np.zeros((self.num_comp, self.num_process, self.num_dim))
for k in range(self.num_comp):
for j in range(self.num_process):
self.L_flatten[k,j,:] = np.random.uniform(low=1.1, high=5.0, size=self.get_sjk_size())
def get_sjk_size(self):
# return self.num_dim
return self.num_dim * (self.num_dim + 1) / 2
def get_s_size(self):
return self.num_comp * self.num_process * self.get_sjk_size()
def S_dim(self):
return self.num_dim, self.num_dim
def m_from_array(self, ma):
self.m = ma.reshape((self.num_comp, self.num_process, self.num_dim))
def s_from_array(self, sa):
self.L_flatten = sa.reshape((self.num_comp, self.num_process, self.get_sjk_size()))
# def tr_A_mult_S(self, A, k, j):
# return trace(cho_solve((A, True), self.s[k,j]))
def get_m_S_params(self):
return self.m, self.L_flatten
def tr_AinvS(self, L, k, j):
a = solve_triangular(L, self.L[k, j, :], lower=True)
return tr_AB(a.T, a)
def tr_AS(self, A, k, j):
return tr_AB(A, self.s[k, j, :])
def dAinvS_dS(self, L, k, j):
tmp = 2 * cho_solve((L, True), self.L[k,j])
tmp[np.diag_indices_from(tmp)] *= self.L[k,j][np.diag_indices_from(tmp)]
return tmp[np.tril_indices_from(self.L[k,j])]
def dAS_dS(self, S, k, j):
tmp = 2 * mdot(S, self.L[k,j])
tmp[np.diag_indices_from(tmp)] *= self.L[k,j][np.diag_indices_from(tmp)]
return tmp[np.tril_indices_from(self.L[k,j])]
def transform_S_grad(self, g):
r"""
Assume:
g = df \\ dS
then this function returns:
:returns df \\ dL, where L is the Cholesky decomposition of S
"""
grad = np.empty((self.num_comp, self.num_process, self.get_sjk_size()))
for k in range(self.num_comp):
for j in range(self.num_process):
tmp = chol_grad(self.L[k,j], g[k,j])
tmp[np.diag_indices_from(tmp)] *= self.L[k,j][ np.diag_indices_from(tmp)]
grad[k,j] = tmp[np.tril_indices_from(self.L[k,j])]
return grad.flatten()
def _update(self):
self.parameters = self.get_parameters()
for k in range(self.num_comp):
for j in range(self.num_process):
temp = np.zeros((self.num_dim, self.num_dim))
temp[np.tril_indices_from(temp)] = self.L_flatten[k,j,:].copy()
temp[np.diag_indices_from(temp)] = np.exp(temp[np.diag_indices_from(temp)])
# temp[np.diag_indices_from(temp)] = temp[np.diag_indices_from(temp)] ** 2
self.L[k,j,:,:] = temp
self.s[k,j] = mdot(self.L[k,j,:,:], self.L[k,j,:,:].T)
| {
"repo_name": "adezfouli/savigp",
"path": "GP/mog_single_comp.py",
"copies": "1",
"size": "6040",
"license": "apache-2.0",
"hash": 1154992935167493600,
"line_mean": 37.2278481013,
"line_max": 118,
"alpha_frac": 0.5700331126,
"autogenerated": false,
"ratio": 2.965144820814924,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.900143502036071,
"avg_score": 0.006748582610842907,
"num_lines": 158
} |
__author__ = 'AT'
from util import inv_chol
from GPy.util.linalg import mdot
from mog import MoG
import numpy as np
class MoG_Diag(MoG):
"""
Implementation of a posterior distribution where the covariance matrix is a mixture of diagonal Gaussians.
The class has to important internal field as follows:
Attributes
----------
log_s : ndarray
Logarithm of the diagonal of covariance matrix
invC_klj_Sk : ndarray
(s[k,j] + s[l,j])^-1 * s[k,j]
"""
def __init__(self, num_comp, num_process, num_dim):
MoG.__init__(self, num_comp, num_process, num_dim)
self.invC_klj_Sk = np.empty((self.num_comp, self.num_comp, self.num_process, self.num_dim))
self.s = []
self._fixed_init()
self._update()
self.num_free_params = self.parameters.shape[0]
def get_parameters(self):
return np.hstack([self.m.flatten(), self.log_s.flatten(), self.pi_untrans])
def num_parameters(self):
return self.num_free_params
def _fixed_init(self):
MoG._fixed_init(self)
self.s = np.random.uniform(low=0.5, high=0.5, size=(self.num_comp, self.num_process, self.num_dim))
self.log_s = np.log(self.s)
def _random_init(self):
MoG._random_init(self)
self.s = np.random.uniform(low=1.0, high=3.0, size=(self.num_comp, self.num_process, self.num_dim))
self.log_s = np.log(self.s)
def update_covariance(self, j, Sj):
for k in range(self.num_comp):
self.s[k,j,:] = np.diagonal(Sj).copy()
if min(self.s[k,j,:]) < 0:
self.s[k,j,:] = self.s[k,j,:] - 2 * min(self.s[k,j,:])
self.log_s = np.log(self.s)
self._update()
def transform_S_grad(self, g):
r"""
Assume:
g = df \\ dS
then this function returns:
:returns df \\ d log(s)
therefore transforming the gradient to the raw space (log(s) space).
"""
return g.flatten() * self.s.flatten()
def get_s_size(self):
return self.num_comp * self.num_process * self.num_dim
def get_sjk_size(self):
return self.num_dim
def S_dim(self):
return self.num_dim,
def m_from_array(self, ma):
self.m = ma.reshape((self.num_comp, self.num_process, self.num_dim))
def s_from_array(self, sa):
self.s = np.exp(sa).reshape((self.num_comp, self.num_process, self.num_dim))
self.log_s = sa.reshape((self.num_comp, self.num_process, self.num_dim))
def tr_AinvS(self, L, k, j):
return np.dot(np.diagonal(inv_chol(L)), self.s[k,j,:])
def tr_AS(self, A, k, j):
return np.dot(np.diagonal(A), self.s[k,j,:])
def C_m(self, j, k, l):
"""
Returns (m[k,j] - m[l,j]) / (s[l,j] + s[k,j])
"""
return (self.m[k, j, :] - self.m[l, j, :]) / (self.s[l, j, :] + self.s[k, j, :])
def C_m_C(self, j, k, l):
"""
Returns (1 / (s[k,j] + s[l,j]) - (m[k,j] - m[l,j]) ** 2 / (s[k,j] + s[l,j])) * s[k,j]
None that the last multiplication by s[k,j] is because this function is used to calculate
gradients, and this multiplication brings the gradients to the raw space (log(s) space)
"""
return (self.invC_klj_Sk[k, l, j] -
np.square(self.invC_klj_Sk[k, l, j] * (self.m[k, j, :] - self.m[l, j, :])) / self.s[k,j])
def aSa(self, a, k, j):
# return mdot(self.s[k, j, :], (a ** 2))
return np.diagonal(mdot(a, np.diag(self.s[k,j,:]), a.T))
def mmTS(self, k, j):
return mdot(self.m[k,j, np.newaxis].T, self.m[k,j, np.newaxis]) + np.diag(self.s[k,j])
def dAinvS_dS(self, L, k, j):
return np.diagonal(inv_chol(L)) * self.s[k,j,:].flatten()
def dAS_dS(self, S, k, j):
return np.diagonal(S) * self.s[k,j,:].flatten()
def Sa(self, a, k, j):
return mdot(np.diag(self.s[k,j]), a)
def _update(self):
self.parameters = self.get_parameters()
for k in range(self.num_comp):
for l in range(self.num_comp):
for j in range(self.num_process):
self.invC_klj_Sk[k,l,j] = self._s_k_skl(k,l,j)
def _s_k_skl(self, k, l, j):
"""
calculates s[k,j] / (s[k,j] + s[k,l]) in a hopefully numerical stable manner.
"""
a = np.maximum(self.log_s[k, j, :], self.log_s[l, j, :])
return np.exp((-a + self.log_s[k, j, :])) / (np.exp((-a + self.log_s[l, j, :])) + np.exp((-a + self.log_s[k, j, :])))
def get_m_S_params(self):
return self.m, self.log_s
| {
"repo_name": "adezfouli/savigp",
"path": "GP/mog_diag.py",
"copies": "1",
"size": "4598",
"license": "apache-2.0",
"hash": 2010812814273499600,
"line_mean": 31.8428571429,
"line_max": 126,
"alpha_frac": 0.5428447151,
"autogenerated": false,
"ratio": 2.769879518072289,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3812724233172289,
"avg_score": null,
"num_lines": null
} |
__author__ = 'AT'
import math
from GPy.util.linalg import mdot
from numpy.ma import argsort, sort
from scipy.linalg import inv, det
from scipy.misc import logsumexp
from scipy.special import erfinv
from scipy.special._ufuncs import gammaln
import numpy as np
from util import cross_ent_normal
class Likelihood:
"""
A generic class which provides likelihood for the model.
"""
def __init__(self):
pass
def ll_F_Y(self, F, Y):
r"""
Method which calculates p(Y|F), and dp(Y|F)\\dlambda, for multiple latent function values (``F``), and multiple
output points (``Y``).
Parameters
----------
F : ndarray
dim(F) = S * N * Q,
where S is the number of samples, N number of datapoints, and Q number of latent processes
Y : ndarray
dim(Y) = N * dim(O),
where dim(O) is the output dimensionality.
Returns
-------
P : ndarray
dim(P) = N * S
P[n,s] = p(Y[n,:]| F[s,n,:])
dP : ndarray
dim(dP) = N * S
dP[n,s] = p(Y[n,:]| F[s,n,:])\\dlambda,
where lambda is the likelihood parameters
"""
raise Exception("not implemented yet")
def get_num_params(self):
"""
:returns number of likelihood parameters to optimize
"""
raise Exception("not implemented yet")
def set_params(self, p):
"""
sets likelihood parameters.
Parameters
----------
p : ndarray
array containing values.
"""
raise Exception("not implemented yet")
def get_params(self):
"""
returns likelihood parameters.
Returns
-------
p : ndarray
parameters of the likelihood.
"""
raise Exception("not implemented yet")
def map_Y_to_f(self, Y):
"""
This functions is used by the model to initialise latent processes.
Parameters
----------
Y : ndarray
input matrix. dim(Y) = N * dim(out).
Returns
-------
f : array_like
initial value of ``f`` given ``Y``. dim(f) = Q, where Q is the number of latent processes.
"""
return np.mean(Y, axis=0)
def output_dim(self):
"""
This function returns dimensionality of the output. It is used by the model to create matrices for prediction.
"""
raise Exception("not implemented yet")
def nlpd_dim(self):
"""
This function returns number of NLPD. This can be useful in the case that for example the likelihood
returns NLPD for each output separately.
"""
return 1
def ell(self, mu, sigma, Y):
"""
The method returns exact expected log likelihood. It is not generally used by the model, but it is used
by the grad checker to have access to the exact objective function.
Returns
-------
ell : float
ell = log \integral p(Y|f)N(f|mu, sigma)
"""
raise Exception("not implemented yet")
def predict(self, mu, sigma, Ys, model=None):
"""
Makes predictions about mean, and variance for the output and calculates NLPD based on Ys.
Parameters
----------
mu : ndarray
dim(mu) = N * Q, where Q is the number of latent processes.
sigma : ndarray
dim(sigma) = N * Q
Ys : ndarray
dim(Ys) = N * output dimension
Returns
-------
mean : array_like
mean = \integral y * P(y|f)N(f|mu, sigma) df dy
var :
variance of the prediction
NLPD:
NLPD = -log \integral p(Ys|f)N(f|mu, sigma) df
"""
raise Exception("not implemented yet")
class MultivariateGaussian(Likelihood):
"""
Implementation of a multi-variate Gaussian likelihood
log P(y|f) = -0.5 * log det (sigma) - size(sigma)/2 * log (2 * pi) - 0.5 * (f-y)T sigma^-1 (f-y)
"""
def __init__(self, sigma):
Likelihood.__init__(self)
self.sigma = sigma
self.sigma_inv = inv(self.sigma)
self.const = -1.0 / 2 * np.log(det(self.sigma)) - float(len(self.sigma)) / 2 * np.log(2 * math.pi)
def ll_F_Y(self, F, Y):
c = 1.0 / 2 * (mdot((F-Y), self.sigma_inv) * (F-Y)).sum(axis=2)
return (self.const + -c), None
def get_sigma(self):
return self.sigma
def get_params(self):
return self.sigma.flatten()
def get_num_params(self):
return self.sigma.flatten().shape[0]
def ell(self, mu, sigma, Y):
return cross_ent_normal(mu, np.diag(sigma), Y, self.sigma)
def output_dim(self):
return self.sigma.shape[0]
class UnivariateGaussian(Likelihood):
"""
Implementation of the a univariate likelihood
log p(y|f) = -0.5 * log(sigma) - 0.5 log (2pi) - 0.5 * (f-y)^2 / sigma
"""
def __init__(self, sigma):
Likelihood.__init__(self)
self.set_params(np.log([sigma]))
def ll_F_Y(self, F, Y):
c = 1.0 / 2 * np.square(F - Y) / self.sigma
return (self.const + -c)[:, :, 0], (self.const_grad * self.sigma + c)[:, :, 0]
def set_params(self, p):
self.sigma = math.exp(p[0])
self.const = -1.0 / 2 * np.log(self.sigma) - 1.0 / 2 * np.log(2 * math.pi)
self.const_grad = -1.0 / 2 / self.sigma
def get_sigma(self):
return np.array([[self.sigma]])
def get_params(self):
return np.array(np.log([self.sigma]))
def get_num_params(self):
return 1
def predict(self, mu, sigma, Ys, model=None):
var = sigma + self.sigma
lpd = None
if not (Ys is None):
lpd = -(np.square(0.5 * (Ys - mu)) / var + np.log(2. * math.pi * var))[:, 0]
return mu, var, lpd[:, np.newaxis]
def ell(self, mu, sigma, Y):
return cross_ent_normal(mu, np.diag(sigma), Y, np.array([[self.sigma]]))
def output_dim(self):
return 1
class LogGaussianCox(Likelihood):
"""
Implementation of a Log Gaussian Cox process
p(y|f) = (lambda)^y exp(-lambda) / y!
lambda = f + offset
"""
def __init__(self, offset):
Likelihood.__init__(self)
self.offset = offset
def ll_F_Y(self, F, Y):
_log_lambda = (F + self.offset)
return (Y * _log_lambda - np.exp(_log_lambda) - gammaln(Y + 1))[:, :, 0], (Y - np.exp(F + self.offset))[:, :, 0]
def set_params(self, p):
self.offset = p[0]
def get_params(self):
return np.array([self.offset])
def get_num_params(self):
return 1
def predict(self, mu, sigma, Ys, model=None):
meanval = np.exp(mu + sigma / 2) * np.exp(self.offset)
varval = (np.exp(sigma) - 1) * np.exp(2 * mu + sigma) * np.exp(2 * self.offset)
return meanval, varval, None
def output_dim(self):
return 1
class LogisticLL(object, Likelihood):
"""
Logistic likelihood
p(y|f) = 1 / (1 + exp(-f))
The output is assumed to be either 1 or -1
"""
def __init__(self):
Likelihood.__init__(self)
self.n_samples = 20000
self.normal_samples = np.random.normal(0, 1, self.n_samples).reshape((1, self.n_samples))
def ll_F_Y(self, F, Y):
return -np.log(1 + np.exp(F * Y))[:, :, 0], None
def set_params(self, p):
if p.shape[0] != 0:
raise Exception("Logistic function does not have free parameters")
def predict(self, mu, sigma, Ys, model=None):
f = self.normal_samples * np.sqrt(sigma) + mu
mean = np.exp(self.ll_F_Y(f.T[:, :, np.newaxis], np.array([[1]]))[0]).mean(axis=0)[:, np.newaxis]
lpd = None
if not (Ys is None):
lpd = np.log((-Ys + 1) / 2 + Ys * mean)
return mean, mean * (1 - mean), lpd[:, 0][:, np.newaxis]
def get_params(self):
return np.array([])
def get_num_params(self):
return 0
def output_dim(self):
return 1
class SoftmaxLL(Likelihood):
"""
Softmax likelihood:
p(y=c|f) = exp(f_c) / (exp(f_1) + ... + exp(f_N))
output is supposed to be in the form of for example [1 0 0] for class1, and [0 1 0] for class2 etc.
"""
def __init__(self, dim):
Likelihood.__init__(self)
self.dim = dim
self.n_samples = 20000
self.normal_samples = np.random.normal(0, 1, self.n_samples * dim) \
.reshape((self.dim, self.n_samples))
def ll_F_Y(self, F, Y):
return -logsumexp(F - (F * Y).sum(2)[:, :, np.newaxis], 2), None
def predict(self, mu, sigma, Ys, model=None):
F = np.empty((self.n_samples, mu.shape[0], self.dim))
for j in range(self.dim):
F[:, :, j] = np.outer(self.normal_samples[j, :], np.sqrt(sigma[:, j])) + mu[:, j]
expF = np.exp(F)
mean = (expF / expF.sum(2)[:, :, np.newaxis]).mean(axis=0)
lpd = None
if not (Ys is None):
lpd = np.log((Ys * mean).sum(axis=1))
return mean, None, lpd[:, np.newaxis]
def set_params(self, p):
if p.shape[0] != 0:
raise Exception("Softmax function does not have free parameters")
def get_params(self):
return np.array([])
def get_num_params(self):
return 0
def output_dim(self):
return self.dim
class WarpLL(object, Likelihood):
"""
Implementation of a Warp likelihood.
The log likelihood for warped Gaussian processes and its derivatives.
p(y|f) = dt(y)/dy p(t(y)|f)
where t(y) = nnwarp(y)
The likelihood parameters are
hyp.lik = [a, b ,c log(sqrt(sn2))]
where a,b,c are parameter vectors of the warping t(y).
"""
def __init__(self, ea, eb, c, log_s):
Likelihood.__init__(self)
self.set_params(np.hstack((ea, eb, c, [log_s])))
def warp(self, Y):
ea = np.exp(self.params[0, :])
eb = np.exp(self.params[1, :])
c = self.params[2, :]
tanhcb = np.tanh(np.add.outer(Y, c) * eb)
t = (tanhcb * ea).sum(axis=2) + Y
w = ((1. - np.square(tanhcb)) * ea * eb).sum(axis=2) + 1
return t, w
def warpinv(self, z, t0, N):
for n in range(N):
t1, dt1 = self.warp(t0)
t0 -= (t1 - z) / dt1
return t0
def ll_F_Y(self, F, Y):
t, w = self.warp(Y)
sq = 1.0 / 2 * np.square(F - t) / self.sigma
return (self.const + -sq + np.log(w))[:, :, 0], \
(self.const_grad * self.sigma + sq)[:, :, 0]
def set_params(self, p):
self.sigma = np.exp(p[-1])
self.const = -1.0 / 2 * np.log(self.sigma) - 1.0 / 2 * np.log(2 * math.pi)
self.const_grad = -1.0 / 2 / self.sigma
if p.shape[0] > 1:
n = (p.shape[0] - 1) / 3
self.params = p[:-1].reshape(3, n)
def predict(self, mu, sigma, Ys, model=None):
#calculating var
s = sigma + self.sigma
alpha = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8000, 0.9000])
q = np.outer(np.sqrt(2 * s), erfinv(2 * alpha - 1)) + mu
z = self.warp(model.Y)[0]
I = argsort(z, axis=0)
sortz = sort(z, axis=0)
sortt = model.Y[I]
quant = self.warpinv(q, self._get_initial_points(q, sortz, sortt), 100)
var = np.square((quant[:, 8] - (quant[:, 0])) / 4)
#calculating mu
H = np.array([7.6e-07, 0.0013436, 0.0338744, 0.2401386, 0.6108626, 0.6108626, 0.2401386, 0.0338744, 0.0013436, 7.6e-07])
quard = np.array([-3.4361591, -2.5327317, -1.7566836, -1.0366108, -0.3429013, 0.3429013, 1.0366108, 1.7566836, 2.5327317, 3.4361591])
mu_quad = np.outer(np.sqrt(2 * s), quard) + mu
mean = self.warpinv(mu_quad, self._get_initial_points(mu_quad, sortz, sortt), 100)
mean = mdot(mean, H[:, np.newaxis]) / np.sqrt(math.pi)
lpd = None
if not (Ys is None):
ts, w = self.warp(Ys)
lpd = -0.5*np.log(2*math.pi*s) - 0.5 * np.square(ts-mu)/s + np.log(w)
return mean, var[:, np.newaxis], lpd[:, 0][:, np.newaxis]
def output_dim(self):
return 1
def _get_initial_points(self, q, sortz, sortt):
t0 = np.empty(q.shape)
for j in range(q.shape[0]):
for k in range(q.shape[1]):
if q[j, k] > sortz[-1]:
t0[j,k] = sortt[-1]
elif q[j,k] < sortz[0]:
t0[j,k] = sortt[0]
else:
I = np.argmax(sortz > q[j,k])
I = np.array([I - 1, I])
t0[j,k] = sortt[I].mean()
return t0
def test(self):
"""
It's a function for testing this class against the Matlob code from which this class is adapted
"""
mu = np.array([[1.13395340993645e-06, 5.65190424705805e-06, 5.78826209038103e-06, 2.83243484612040e-06, -7.38434570563690e-07]]).T
sigma = np.array([[ 0.299216202282485, 0.243742513817980, 0.295996476326654, 0.230752860541760, 0.281672812756221
]]).T
Ys = np.array([[-0.200000000000000, -0.150000000000000, -0.100000000000000, -0.150000000000000, -0.250000000000000]]).T
self.set_params(np.array([-2.0485, 1.7991, 1.5814, 2.7421, 0.9426, 1.7804, 0.1856, 0.7024, -0.7421, -0.0712]))
self.sigma = 0.8672
def get_params(self):
return np.array(np.log([self.sigma]))
def get_num_params(self):
return 1
class CogLL(Likelihood):
"""
Implementation of a Gaussian process network likelihood.
y ~ N (W * F, sigma)
where dim(W) = P * Q, and dim(F) = Q * 1.
W and F are made of latent processes
"""
def __init__(self, sigma_y, P, Q):
"""
:param sigma_y: input noise
:param P: output dimensionality
:param Q: number of latent functions in the network
:return: None
"""
Likelihood.__init__(self)
self.P = P
self.Q = Q
self.f_num = (P + 1) * Q
self.set_params(np.array([np.log(sigma_y)]))
self.n_samples = 20000
self.normal_samples = np.random.normal(0, 1, self.n_samples * self.f_num) \
.reshape((self.f_num, self.n_samples))
def ll_F_Y(self, F, Y):
W = F[:, :, :self.P * self.Q].reshape(F.shape[0], F.shape[1], self.P, self.Q)
f = F[:, :, self.P * self.Q:]
Wf = np.einsum('ijlk,ijk->ijl', W, f)
c = 1.0 / 2 * (mdot((Y - Wf), self.sigma_inv) * (Y - Wf)).sum(axis=2)
return (self.const + -c), (self.const_grad * self.sigma_y + c)
def get_params(self):
return np.array([np.log(self.sigma_y)])
def get_num_params(self):
return 1
def ell(self, mu, sigma, Y):
return cross_ent_normal(mu, np.diag(sigma), Y, self.sigma)
def output_dim(self):
return self.P
def map_Y_to_f(self, Y):
return np.mean(Y) * np.ones(self.f_num)
def set_params(self, p):
self.sigma_y = math.exp(p[0])
self.sigma = self.sigma_y * np.eye(self.P)
self.sigma_inv = inv(self.sigma)
self.const = -1.0 / 2 * np.log(det(self.sigma)) - float(len(self.sigma)) / 2 * np.log(2 * math.pi)
self.const_grad = -float(self.P) / 2. / self.sigma_y
def predict(self, mu, sigma, Ys, model=None):
F = np.empty((self.n_samples, mu.shape[0], self.f_num))
for j in range(self.f_num):
F[:, :, j] = np.outer(self.normal_samples[j, :], np.sqrt(sigma[:, j])) + mu[:, j]
W = F[:, :, :self.P * self.Q].reshape(F.shape[0], F.shape[1], self.P, self.Q)
f = F[:, :, self.P * self.Q:]
Wf = np.einsum('ijlk,ijk->ijl', W, f)
lpd = None
if Ys is not None:
lpd = self._calc_nlpd(Ys, Wf)
return Wf.mean(axis=0), None, lpd
def _calc_nlpd(self, Ys, Wf):
lpd = np.empty((Ys.shape[0], Ys.shape[1] + 1))
c = 1.0 / 2 * (mdot((Ys - Wf), self.sigma_inv) * (Ys - Wf)).sum(axis=2)
lpd[:, 0] = np.log(np.exp(self.const + -c).mean(axis=0))
for i in range(Ys.shape[1]):
c = 1.0 / 2 * (np.square((Ys[:, i] - Wf[:, :, i])) * self.sigma_inv[i,i])
const = -1.0 / 2 * np.log((self.sigma[i,i])) - 1. / 2 * np.log(2 * math.pi)
lpd[:, i+1] = np.log(np.exp(const + -c).mean(axis=0))
return lpd
def nlpd_dim(self):
return self.P + 1
| {
"repo_name": "adezfouli/savigp",
"path": "GP/likelihood.py",
"copies": "1",
"size": "16432",
"license": "apache-2.0",
"hash": 3025340559836770000,
"line_mean": 29.2058823529,
"line_max": 141,
"alpha_frac": 0.5311587147,
"autogenerated": false,
"ratio": 3.089302500470013,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9103500023756028,
"avg_score": 0.0033922382827970394,
"num_lines": 544
} |
__author__ = 'AT'
from GPy.util.linalg import mdot
import math
from numpy.linalg import inv
from savigp_single_comp import SAVIGP_SingleComponent
from savigp import SAVIGP, Configuration
import numpy as np
class SAVIGP_Reparam(SAVIGP_SingleComponent):
def init_mog(self, init_m):
super(SAVIGP_SingleComponent, self).init_mog(init_m)
for j in range(self.num_latent_proc):
self.MoG.update_covariance(j, inv(self.Kzz[j] + 0.001))
def _proj_m_grad(self, j, dl_dm):
return dl_dm
def mdot_Aj(self, Ajn, Kxnz):
return mdot(Kxnz.T, Kxnz)
def _b_n(self, n, j, Aj, Kzx):
"""
calculating [b_k(n)]j for latent process j (eq 19) for all k
returns: a
"""
return mdot(Kzx[n, :], self.MoG.m[:, j, :].T)
def _sigma_n(self, n, j, Kj, Aj, Kzx):
"""
calculating [sigma_k(n)]j,j for latent process j (eq 20) for all k
"""
if Kj[n] < 0:
Kj[n] = 0
return Kj[n] + self.MoG.aSa(Kzx[n, :], j)
def _dcorss_dm(self):
"""
calculating d corss / dm
"""
dcdm = np.empty((self.num_mog_comp, self.num_latent_proc, self.num_inducing))
for j in range(self.num_latent_proc):
dcdm[:, j, :] = -mdot(self.Kzz[j, :, :], self.MoG.m[:, j, :].T).T * self.MoG.pi[:, np.newaxis]
return dcdm
def _dcross_ds(self):
"""
calculating L_corss by s_k for all k's
"""
dc_ds = np.empty((self.num_mog_comp, self.num_latent_proc, self.MoG.get_sjk_size()))
for j in range(self.num_latent_proc):
dc_ds[:, j] = -1. / 2 * np.array(
[self.MoG.dAS_dS(self.Kzz[j, :, :], k, j) * self.MoG.pi[k] for k in range(self.num_mog_comp)])
return dc_ds
def transform_dcorss_dS(self):
return self._dcross_ds().flatten()
def _cross_dcorss_dpi(self, N):
"""
calculating L_corss by pi_k, and also calculates the cross term
:returns d cross / d pi, cross
"""
cross = 0
d_pi = np.zeros(self.num_mog_comp)
for j in range(self.num_latent_proc):
for k in range(self.num_mog_comp):
d_pi[k] += \
N * math.log(2 * math.pi) + \
self.log_detZ[j] + \
mdot(self.MoG.m[k, j, :].T, self.Kzz[j, :, :], self.MoG.m[k, j, :].T) + \
self.MoG.tr_AS(self.Kzz[j, :, :], k, j)
for k in range(self.num_mog_comp):
cross += self.MoG.pi[k] * d_pi[k]
d_pi *= -1. / 2
cross *= -1. / 2
return cross, d_pi
def _dcross_K(self, j):
dc_dK = np.zeros((self.num_inducing, self.num_inducing))
for k in range(self.num_mog_comp):
dc_dK += -0.5 * self.MoG.pi[k] * (self.invZ[j]
+ mdot(self.MoG.m[k, j, :, np.newaxis], self.MoG.m[k, j, :, np.newaxis].T) +
self.MoG.s[k, j, :, :]
)
return dc_dK
def _dent_dhyper(self):
dc_dh = np.empty((self.num_latent_proc, self.num_hyper_params))
for j in range(self.num_latent_proc):
self.kernels_latent[j].update_gradients_full(self.invZ[j], self.Z[j])
dc_dh[j] = self.kernels[j].gradient.copy()
return dc_dh
def _l_ent(self):
ent = -np.dot(self.MoG.pi, self.log_z)
for j in range(self.num_latent_proc):
ent += self.log_detZ[j]
return ent
def _dsigma_n_dhyp(self, j, k, A, Kxnz, n, xn):
return self.dKx_dhyper(j, xn) \
- self.dA_dhyper_n_mult_x(xn, j, A[j, n], -Kxnz.T) \
- self.dKzxn_dhyper_mult_x(j, xn, A[j, n]) + \
2 * self.dKzxn_dhyper_mult_x(j, xn, self.MoG.Sa(Kxnz, k, j))
def _db_n_dhyp(self, j, k, A, n, xn):
return self.dKzxn_dhyper_mult_x(j, xn, self.MoG.m[k, j])
def calculate_dhyper(self):
return Configuration.HYPER in self.config_list
def dKzxn_dhyper_mult_x(self, j, x_n, x):
self.kernels[j].update_gradients_full(x[:, np.newaxis], self.Z[j], x_n)
return self.kernels[j].gradient.copy()
def dKx_dhyper(self, j, x_n):
self.kernels[j].update_gradients_full(np.array([[1]]), x_n)
return self.kernels[j].gradient.copy()
| {
"repo_name": "adezfouli/savigp",
"path": "GP/savigp_reparam.py",
"copies": "1",
"size": "4389",
"license": "apache-2.0",
"hash": 2554066154695920600,
"line_mean": 34.3951612903,
"line_max": 122,
"alpha_frac": 0.5165185692,
"autogenerated": false,
"ratio": 2.758642363293526,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3775160932493526,
"avg_score": null,
"num_lines": null
} |
__author__ = 'aus'
import facebook
import re
#mesaave
mesa_ave_id = "621422037868632"
#tarifamesaalicantemadridalicante
tarifa_mesa_alc_mad_alc_id = "256075921093789"
#https://www.facebook.com/groups/512002085573921/
ave_alc_mad_alc_id = "512002085573921"
access_token = "CAACEdEose0cBAFI6ll267OFJhCyc2kjbZCtBzoL681Ieo6LC9F5K5o9EhlF1c7C0EdKFDuJfzQ4jUvUEZCtHKUaeZA3oHywcsr0x7y6TB02bsf3ynzZBRlupwDogefp21LVstadh3w9NAXOSND1sORN2GEGjrQAFtfz2sQ1aEb07kNnFQvrY8HKpnOqeIenCwOLQKu0fiwZDZD"
version = "2.4"
graph = facebook.GraphAPI(access_token, version)
graph.timeout = 100
# Get all the comments from a post
connection_name = 'feed'
feed = graph.get_connections(mesa_ave_id, connection_name)
feed_data = feed['data']
# Ejemplo del post 0 del feed
# post0 = feed_data[0]
# print post0['message']
def mad_alc_filter_search(mensaje):
"""Filtro para trayectos de Madrid a Alicante"""
is_valid = False
pattern_trip = "(mad.{1,}al)" # Pilla Alguien o Al como Alicante
matcher_trip = re.search(pattern_trip, mensaje, re.IGNORECASE)
if matcher_trip != None:
is_valid = True
return is_valid
def alc_mad_filter_search(mensaje):
"""Filtro para trayectos de Alicante a Madrid"""
is_valid = False
#Separadores: Que empice por alicante y termine por Madrid
#filter = refindall('Alicante')
#matcher = filter.search(mensaje)
#print "MATCHER: " + str(matcher)
alicante = ['ALC', 'ALIC', 'ALI', 'ALICANTE']
separador = [' ', ' a ', ' -', '- ', ' - ', ' /', '/ ', ' / ']
madrid = ['MAD', 'MADRID']
#Tengo que tener varios patterns e ir mejorandolos --> Alicante- Madrid
pattern_trip = "(al.{1,}mad)" # Pilla Alguien o Al como Alicante
matcher_trip = re.search(pattern_trip, mensaje, re.IGNORECASE)
if matcher_trip != None:
is_valid = True
return is_valid
def date_filter_search(mensaje):
is_valid = False
dia = 17
month = "septiembre" # Cuidado con el mes porque a veces la gente no lo pone
pattern_day = "( " + str(dia) + " )"
matcher_date = re.search(pattern_day, mensaje, re.IGNORECASE)
if matcher_date != None:
is_valid = True
return is_valid
#Mostrar todos los mensajes del feed
feed_len = len(feed_data)
print "Feed lenght is " + str(feed_len)
for i in range(0, (feed_len)):
mensaje = feed_data[i]['message']
if (alc_mad_filter_search(mensaje) == True) and (date_filter_search(mensaje) == True):
print "-----------------------------------"
print "ID: " + feed_data[i]['id']
print "Created time :" + feed_data[i]['created_time']
print "MENSAJE = " + mensaje
print "MATCHER = TRUE"
print "-----------------------------------"
# else:
# print "MENSAJE = " + mensaje
# print "MATCHER = FALSE"
# print "-------------------------------------------"
| {
"repo_name": "albertoaus/facebook-utils",
"path": "version1.py",
"copies": "1",
"size": "2878",
"license": "apache-2.0",
"hash": -3419460071526091300,
"line_mean": 28.3673469388,
"line_max": 224,
"alpha_frac": 0.6296038916,
"autogenerated": false,
"ratio": 2.6847014925373136,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.873481380206844,
"avg_score": 0.015898316413774864,
"num_lines": 98
} |
import numpy as np
from tensorflow.contrib.learn import TensorFlowDNNRegressor
import evolutionLearning as e
from trainingFunctions import addThem
# Initializes the Neural Network to evolve.
NN = TensorFlowDNNRegressor(hidden_units=[2], steps=5000)
# Evolves the neural network to learn the function.
# Params:
# NN: NN - neural network to evolve.
# numSpecies: 10 - number of species to train at a time.
# numGenerations: 10 - number of generations to evolve through.
# func: addThem - function to train the neural network on.
# sizeData: 1000000 - number of data examples each species will generate
# and train on.
# low: 1 - smallest number to generate in the data.
# high: 500 - largest number to generate in the data.
NN = e.evolve(NN, 20, 2, addThem, 100000, 1, 1000)
# Enters loop to predict inputs from user.
print("\nEnter exit to leave loop.")
while True:
first = input("Number 1... ")
try:
# succeeds if user typed a number
first = int(first)
except:
# exit loop
break
second = input("Number 2... ")
try:
# succeeds if user typed a number
second = int(second)
except:
# exit loop
break
# Calculates prediction from NN
result = NN.predict(np.array([[first, second]]))
print("I think %d + %d = %d"
% (first, second, int(np.rint(result[0][0])))) | {
"repo_name": "derrowap/MA490-MachineLearning-FinalProject",
"path": "evolveAddThem.py",
"copies": "1",
"size": "1499",
"license": "mit",
"hash": 5071125132117359000,
"line_mean": 30.914893617,
"line_max": 78,
"alpha_frac": 0.6797865243,
"autogenerated": false,
"ratio": 3.3237250554323725,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9330381536231307,
"avg_score": 0.03462600870021305,
"num_lines": 47
} |
import numpy as np
import pandas as pd
from tensorflow.contrib import skflow
from sklearn.metrics import accuracy_score
from sklearn.cross_validation import train_test_split
# import threading
from trainingFunctions import addThem
# class myThread(threading.Thread):
# def __init__(self, size):
# threading.Thread.__init__(self)
# self.size = size
# self.x = np.zeros((size, 2))
# self.y = np.zeros(size)
# def run(self):
# for i in range(self.size):
# a = float(np.random.randint(1, 500))
# b = float(np.random.randint(1, 500))
# self.x[i] = [a, b]
# self.y[i] = addThem(a, b)
def train(ID):
# # Create new threads
# thread1 = myThread(1000000)
# thread2 = myThread(1000000)
# # Start new Threads
# thread1.start()
# thread2.start()
# # Wait for threads to complete
# thread1.join()
# thread2.join()
# # Combine two threads output together
# input_ = thread1.x + thread2.x
# target = thread1.y + thread2.y
size = 1000000
x = np.zeros((size, 2))
y = np.zeros(size)
for i in range(size):
a = float(np.random.randint(1, 500))
b = float(np.random.randint(1, 500))
x[i] = [a, b]
y[i] = addThem(a, b)
x_train, x_test, y_train, y_test = train_test_split(x, y,
test_size=0.2, random_state=0)
# Neural Network from skflow
try:
NN = skflow.TensorFlowEstimator.restore('/home/derrowap/models/addThem'+str(ID))
except:
NN = skflow.TensorFlowDNNRegressor(hidden_units=[2], steps=5000)
# Train the NN with training data
NN.fit(x_train, y_train)
# Calculates training error
# pred = NN.predict(x_train)
# pred = np.reshape(pred, -1)
# pred = np.rint(pred)
# error_train = 1 - accuracy_score(y_train, pred)
# Calculates testing error
pred = NN.predict(x_test)
pred = np.reshape(pred, -1)
pred = np.rint(pred)
error_test = 1 - accuracy_score(y_test, pred)
NN.save('/home/derrowap/models/addThem'+str(ID))
return(error_test)
# NN = train(-1)
NN = skflow.TensorFlowEstimator.restore('/home/derrowap/models/addThem1')
# Enters loop to predict inputs from user.
print("\nEnter exit to leave loop.")
while True:
first = input("Number 1... ")
try:
# succeeds if user typed a number
first = int(first)
except:
# exit loop
break
second = input("Number 2... ")
try:
# succeeds if user typed a number
second = int(second)
except:
# exit loop
break
# Calculates prediction from NN
result = NN.predict(np.array([[first, second]]))
print("I think %d + %d = %d"
% (first, second, int(np.rint(result[0][0])))) | {
"repo_name": "derrowap/MA490-MachineLearning-FinalProject",
"path": "nnAddThem2.py",
"copies": "1",
"size": "3048",
"license": "mit",
"hash": 5727477442769801000,
"line_mean": 25.0598290598,
"line_max": 82,
"alpha_frac": 0.6617454068,
"autogenerated": false,
"ratio": 2.83271375464684,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.399445916144684,
"avg_score": null,
"num_lines": null
} |
import numpy as np
from tensorflow.contrib.learn import TensorFlowDNNRegressor
from trainingFunctions import fib
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import normalize
x = np.zeros(70)
y = np.zeros(70, dtype='int64')
for i in range(70):
x[i] = i + 1
y[i] = fib(i + 1)
x = normalize(x, norm='max')
numSteps = 10000
NN = TensorFlowDNNRegressor(hidden_units=[100, 100, 100], steps=numSteps)
NN.fit(x, y)
pred = NN.predict(x)
pred = np.reshape(pred, -1)
pred = np.rint(pred)
error = 1 - accuracy_score(y, pred)
print('Steps %d, error %f' % (numSteps, error))
print("\nEnter exit to leave loop.")
while True:
first = input("Number 1... ")
try:
first = int(first) / 70
except:
break
result = NN.predict(np.array([first]))
print("I think fib_%d = %d"
% (first, int(np.rint(result[0][0])))) | {
"repo_name": "derrowap/MA490-MachineLearning-FinalProject",
"path": "skFib.py",
"copies": "1",
"size": "1045",
"license": "mit",
"hash": 8778740807808165000,
"line_mean": 25.15,
"line_max": 78,
"alpha_frac": 0.6449760766,
"autogenerated": false,
"ratio": 3.011527377521614,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9045901195680383,
"avg_score": 0.022120451688246313,
"num_lines": 40
} |
import numpy as np
from tensorflow.contrib.learn import TensorFlowDNNRegressor
from trainingFunctions import multiply
from sklearn.metrics import accuracy_score
top = 20
x = np.zeros((top ** 2, 2))
y = np.zeros(top ** 2)
count = 0
for i in range(1, top+1):
for j in range(1, top+1):
x[count] = [i, j]
y[count] = multiply(i, j)
count += 1
numSteps = 500000
NN = TensorFlowDNNRegressor(hidden_units=[400], steps=numSteps)
NN.fit(x, y)
pred = NN.predict(x)
pred = np.reshape(pred, -1)
pred = np.rint(pred)
error = 1 - accuracy_score(y, pred)
print('Steps %d, error %f' % (numSteps, error))
print("\nEnter exit to leave loop.")
while True:
first = input("Number 1... ")
try:
first = int(first)
except:
break
second = input("Number 2... ")
try:
second = int(second)
except:
break
result = NN.predict(np.array([[first, second]]))
print("I think %d * %d = %d"
% (first, second, int(np.rint(result[0][0])))) | {
"repo_name": "derrowap/MA490-MachineLearning-FinalProject",
"path": "skMultiply.py",
"copies": "1",
"size": "1141",
"license": "mit",
"hash": 1625173426123785000,
"line_mean": 23.8260869565,
"line_max": 78,
"alpha_frac": 0.6275197195,
"autogenerated": false,
"ratio": 2.9407216494845363,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4068241368984536,
"avg_score": null,
"num_lines": null
} |
import tensorflow as tf
import numpy as np
from trainingFunctions import addThem
# ============================================================================
# START TENSORFLOW INTERACTIVE SESSION
# ============================================================================
"""TensorFlow uses a highly efficient C++ backend.
This connection to the backend is called a session.
An interactive session allows you to interleave operations which build a
computation graph with ones that run the graph.
Non-interactive sessions should have the entire computation graph built before
starting a session.
"""
sess = tf.InteractiveSession()
# ============================================================================
# BUILD A FEED-FORWARD NEURAL NETWORK
# ============================================================================
# This builds a feed-forward model with a single hidden layer.
# Placeholders to create nodes for input images and target output classes.
#
# x: 2D tensor of floating point numbers.
# Dimension 1 is the batch size (any number of example data points).
# Dimension 2 is an array of two numbers.
# y_: 2D tensor of floating point numbers.
# Dimension 1 is the batch size (any number of example data points).
# Dimension 2 is a single value representing the target output.
x = tf.placeholder(tf.float32, shape=[None, 2])
y_ = tf.placeholder(tf.float32, shape=[None, 1])
# Defines weights W and biases b in our model.
#
# Variables are used for model parameters to feed values to.
#
# W: 2x1 matrix of floating points numbers.
# Each value represents a connection weight from one input to the output.
# b: Bias variable for the output.
W_hidden = tf.Variable(tf.truncated_normal([2, 2], stddev=0.1))
b_hidden = tf.Variable(tf.truncated_normal([2], stddev=0.1))
# Applies ReLU function to get activation for each hidden node.
h_out = tf.nn.relu(tf.matmul(x, W_hidden) + b_hidden)
keep_prob = tf.placeholder(tf.float32)
h_drop = tf.nn.dropout(h_out, keep_prob)
# Weights and biases from hidden layer to the output layer.
W_out = tf.Variable(tf.truncated_normal([2, 1], stddev=0.1))
b_out = tf.Variable(tf.truncated_normal([1], stddev=0.1))
# Sums activations of output nodes to get final answer.
y = tf.reduce_sum(tf.nn.relu(tf.matmul(h_drop, W_out) + b_out))
# MSE is our cost function to reduce when training.
mse = tf.reduce_mean(tf.square(y - y_))
# Adam optimizer that trains the model.
train_step = tf.train.AdamOptimizer(0.1).minimize(mse)
# Initializes all variables.
#
# Takes the input values (in this case tensors full of zeros) that have been
# specified, and assigns them to each Variable object.
sess.run(tf.initialize_all_variables())
batchSize = 100
batchInput = [None] * batchSize
batchTarget = [None] * batchSize
for i in range(10000):
for j in range(batchSize):
a = np.random.randint(1, 10)
b = np.random.randint(1, 10)
batchInput[j] = [a, b]
batchTarget[j] = [addThem(a, b)]
if i % 1000 == 0:
print("Iteration %d, MSE = %f" % (i, mse.eval(feed_dict={
x: batchInput, y_: batchTarget, keep_prob: 1.0})))
train_step.run(feed_dict={x: batchInput, y_: batchTarget, keep_prob: 0.2})
# Calculates accuracy by using a new set of data.
for j in range(batchSize):
a = np.random.randint(1, 10)
b = np.random.randint(1, 10)
batchInput[j] = [a, b]
batchTarget[j] = [addThem(a, b)]
print("MSE for multiply function, summing over ReLU:")
print(mse.eval(feed_dict={x: batchInput, y_: batchTarget, keep_prob: 1.0}))
sess.close() | {
"repo_name": "derrowap/MA490-MachineLearning-FinalProject",
"path": "nnAddThem.py",
"copies": "1",
"size": "3710",
"license": "mit",
"hash": 8153697782086737000,
"line_mean": 35.0291262136,
"line_max": 78,
"alpha_frac": 0.6514824798,
"autogenerated": false,
"ratio": 3.438368860055607,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9560679416006943,
"avg_score": 0.005834384769732859,
"num_lines": 103
} |
import tensorflow as tf
import numpy as np
from trainingFunctions import multiply
# ============================================================================
# START TENSORFLOW INTERACTIVE SESSION
# ============================================================================
"""TensorFlow uses a highly efficient C++ backend.
This connection to the backend is called a session.
An interactive session allows you to interleave operations which build a
computation graph with ones that run the graph.
Non-interactive sessions should have the entire computation graph built before
starting a session.
"""
sess = tf.InteractiveSession()
# ============================================================================
# BUILD A FEED-FORWARD NEURAL NETWORK
# ============================================================================
# This builds a feed-forward model with a single hidden layer.
# Placeholders to create nodes for input images and target output classes.
#
# x: 2D tensor of floating point numbers.
# Dimension 1 is the batch size (any number of example data points).
# Dimension 2 is an array of two numbers.
# y_: 2D tensor of floating point numbers.
# Dimension 1 is the batch size (any number of example data points).
# Dimension 2 is a single value representing the target output.
x = tf.placeholder(tf.float32, shape=[None, 2])
y_ = tf.placeholder(tf.float32, shape=[None, 1])
# Defines weights W and biases b in our model.
#
# Variables are used for model parameters to feed values to.
#
# W: 2x1 matrix of floating points numbers.
# Each value represents a connection weight from one input to the output.
# b: Bias variable for the output.
W_hidden = tf.Variable(tf.truncated_normal([2, 10], stddev=0.1))
b_hidden = tf.Variable(tf.truncated_normal([10], stddev=0.1))
# Applies ReLU function to get activation for each hidden node.
h_out = tf.nn.relu(tf.matmul(x, W_hidden) + b_hidden)
keep_prob = tf.placeholder(tf.float32)
h_drop = tf.nn.dropout(h_out, keep_prob)
# Weights and biases from hidden layer to the output layer.
W_out = tf.Variable(tf.truncated_normal([10, 1], stddev=0.1))
b_out = tf.Variable(tf.truncated_normal([1], stddev=0.1))
# Sums activations of output nodes to get final answer.
y = tf.reduce_sum(tf.nn.relu(tf.matmul(h_drop, W_out) + b_out))
# MSE is our cost function to reduce when training.
mse = tf.reduce_mean(tf.square(y - y_))
# Adam optimizer that trains the model.
train_step = tf.train.AdamOptimizer(0.1).minimize(mse)
# Initializes all variables.
#
# Takes the input values (in this case tensors full of zeros) that have been
# specified, and assigns them to each Variable object.
sess.run(tf.initialize_all_variables())
for i in range(10000):
batchSize = 100
batchInput = [None] * batchSize
batchTarget = [None] * batchSize
for j in range(batchSize):
a = np.random.randint(1, 10)
b = np.random.randint(1, 10)
batchInput[j] = [a, b]
batchTarget[j] = [multiply(a, b)]
if i % 1000 == 0:
print("Iteration %d, MSE = %f" % (i, mse.eval(feed_dict={
x: batchInput, y_: batchTarget, keep_prob: 1.0})))
train_step.run(feed_dict={x: batchInput, y_: batchTarget, keep_prob: 0.2})
# Calculates accuracy by using a new set of data.
print("MSE for multiply function, summing over ReLU:")
print(mse.eval(feed_dict={x: batchInput, y_: batchTarget, keep_prob: 1.0}))
sess.close() | {
"repo_name": "derrowap/MA490-MachineLearning-FinalProject",
"path": "nnMultiply2.py",
"copies": "1",
"size": "3572",
"license": "mit",
"hash": 8430598625097149000,
"line_mean": 35.8350515464,
"line_max": 78,
"alpha_frac": 0.6522956327,
"autogenerated": false,
"ratio": 3.5366336633663367,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9658983873031786,
"avg_score": 0.00598908460691026,
"num_lines": 97
} |
import tensorflow as tf
import numpy as np
from trainingFunctions import multiply
def one_hot(index):
"""Defines a one-hot encoding array for specified index.
Args:
index: The index in the array to assign as a 1.
Returns:
An array filled with zeros, except a 1 at the specified index.
"""
output = np.zeros(100)
output[index] = 1
return output
# ============================================================================
# START TENSORFLOW INTERACTIVE SESSION
# ============================================================================
"""TensorFlow uses a highly efficient C++ backend.
This connection to the backend is called a session.
An interactive session allows you to interleave operations which build a
computation graph with ones that run the graph.
Non-interactive sessions should have the entire computation graph built before
starting a session.
"""
sess = tf.InteractiveSession()
# ============================================================================
# BUILD A SOFTMAX REGRESSION MODEL
# ============================================================================
# This builds a softmax regression model with a single linear layer.
# Placeholders to create nodes for input images and target output classes.
#
# x: 2D tensor of floating point numbers.
# Dimension 1 is the batch size (any number of example data points).
# Dimension 2 is an array of two numbers.
# y_: 2D tensor of floating point numbers.
# Dimension 1 is the batch size (any number of example data points).
# Dimension 2 is a single value representing the target output.
x = tf.placeholder(tf.float32, shape=[None, 2])
y_ = tf.placeholder(tf.float32, shape=[None, 100])
# Defines weights W and biases b in our model.
#
# Variables are used for model parameters to feed values to.
#
# W: 2x1 matrix of floating points numbers.
# Each value represents a connection weight from one input to the output.
# b: Bias variable for the output.
W = tf.Variable(tf.zeros([2, 100]))
b = tf.Variable(tf.zeros([100]))
# Initializes all variables.
#
# Takes the input values (in this case tensors full of zeros) that have been
# specified, and assigns them to each Variable object.
sess.run(tf.initialize_all_variables())
# Defines softmax regression model.
y = tf.nn.softmax(tf.matmul(x, W) + b)
# Defines cross entropy cost function to minimize.
#
# tf.reduce_sum sums across all classes.
# tf.reduce_mean takes the average over these sums.
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y),
reduction_indices=[1]))
# ============================================================================
# TRAIN THE MODEL
# ============================================================================
# Defines steepest gradient descent optimization algorithm.
#
# Step length of 0.5 and minimizes cross_entropy cost function.
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
# Trains gradient descent 1000 times using 50 training examples each loop.
for i in range(10000):
batchSize = 10
batchInput = [None] * batchSize
batchTarget = [None] * batchSize
for j in range(batchSize):
batchInput[j] = [np.random.randint(1, 10), np.random.randint(1, 10)]
batchTarget[j] = one_hot(multiply(batchInput[j][0], batchInput[j][1]))
train_step.run(feed_dict={x: batchInput, y_: batchTarget})
# ============================================================================
# TEST THE MODEL
# ============================================================================
# Create boolean array indicating if model predicts them correctly.
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
# Defines accuracy as a percent by converting booleans to 0's and 1's.
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Calculates accuracy by using a new set of examples.
batchSize = 50
batchInput = [None] * batchSize
batchTarget = [None] * batchSize
for j in range(batchSize):
batchInput[j] = [np.random.randint(1, 10), np.random.randint(1, 10)]
batchTarget[j] = one_hot(multiply(batchInput[j][0], batchInput[j][1]))
# Calculates accuracy by using a new set of data.
print("Accuracy from softmax regression model with a single linear layer:")
print(accuracy.eval(feed_dict={x: batchInput, y_: batchTarget}))
# close interactive session
sess.close() | {
"repo_name": "derrowap/MA490-MachineLearning-FinalProject",
"path": "nnMultiply.py",
"copies": "1",
"size": "5066",
"license": "mit",
"hash": -8538565542291408000,
"line_mean": 37.0977443609,
"line_max": 78,
"alpha_frac": 0.645282274,
"autogenerated": false,
"ratio": 3.951638065522621,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.009248341441877205,
"num_lines": 133
} |
import tensorflow as tf
import numpy as np
from trainingFunctions import adder
# ============================================================================
# START TENSORFLOW INTERACTIVE SESSION
# ============================================================================
"""TensorFlow uses a highly efficient C++ backend.
This connection to the backend is called a session.
An interactive session allows you to interleave operations which build a
computation graph with ones that run the graph.
Non-interactive sessions should have the entire computation graph built before
starting a session.
"""
sess = tf.InteractiveSession()
# ============================================================================
# BUILD A FEED-FORWARD NEURAL NETWORK
# ============================================================================
# This builds a feed-forward model with a single hidden layer.
def weight_variable(shape):
"""Defines a Variable with specified shape as weights.
Initializes with a little noise to prevent symmetry breaking and 0
gradients.
Args:
shape: Shape of the weight Variable to define.
Returns:
Variable of specified shape containing weights initialized with noise.
"""
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
"""Defines a Variabel with specified shape as biases.
All bias variables are initialized to 0.1.
Args:
shape: Shape of the bias Variable to define.
Returns:
Variable of specified shape containing biases initialized to 0.1.
"""
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
# Placeholders to create nodes for input images and target output classes.
#
# x: 2D tensor of floating point numbers.
# Dimension 1 is the batch size (any number of example data points).
# Dimension 2 is an array of two numbers.
# y_: 2D tensor of floating point numbers.
# Dimension 1 is the batch size (any number of example data points).
# Dimension 2 is a single value representing the target output.
x = tf.placeholder(tf.float32, shape=[None, 1])
y_ = tf.placeholder(tf.float32, shape=[None, 1])
# Defines weights W and biases b in our model.
#
# Variables are used for model parameters to feed values to.
#
# W: 2x1 matrix of floating points numbers.
# Each value represents a connection weight from one input to the output.
# b: Bias variable for the output.
W_hidden = tf.Variable(tf.truncated_normal([1, 2], stddev=0.1))
b_hidden = tf.Variable(tf.truncated_normal([1], stddev=0.1))
# Applies ReLU function to get activation for each hidden node.
h_out = tf.nn.relu(tf.matmul(x, W_hidden) + b_hidden)
keep_prob = tf.placeholder(tf.float32)
h_drop = tf.nn.dropout(h_out, keep_prob)
# Weights and biases from hidden layer to the output layer.
W_out = tf.Variable(tf.truncated_normal([2, 4], stddev=0.1))
b_out = tf.Variable(tf.truncated_normal([1], stddev=0.1))
# Sums activations of hidden node to get final output.
y = tf.reduce_sum(tf.nn.relu(tf.matmul(h_drop, W_out) + b_out))
# Defines cross entropy cost function to minimize.
#
# tf.reduce_sum sums across all classes.
# tf.reduce_mean takes the average over these sums.
# cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y),
# reduction_indices=[1]))
mse = tf.reduce_mean(tf.square(y - y_))
train_step = tf.train.AdamOptimizer(1e-4).minimize(mse)
# Initializes all variables.
#
# Takes the input values (in this case tensors full of zeros) that have been
# specified, and assigns them to each Variable object.
sess.run(tf.initialize_all_variables())
# generate data
batchSize = 1000
batchInput = np.zeros((batchSize, 1))
batchTarget = np.zeros((batchSize, 1))
k = 0
for i in range(2000):
for j in range(37):
batchInput[j] = [k]
batchTarget[j] = [adder(k)]
k+=1
if(k == 100):
k = 0
print("Iteration %d, MSE = %f" % (i, mse.eval(feed_dict={
x: batchInput, y_: batchTarget, keep_prob: 1.0})))
print(y.eval(feed_dict={x: [[1]], keep_prob:1.0}))
train_step.run(feed_dict={x: batchInput, y_: batchTarget, keep_prob: 0.5})
batchTestSize = 100
batchTest = np.zeros((batchTestSize, 1))
batchTestTarget = np.zeros((batchTestSize, 1))
for j in range(batchTestSize):
batchTest[j] = [j]
batchTestTarget[j] = [adder(j)]
# Calculates accuracy by using a new set of data.
print("MSE for adder function, summing over ReLU:")
print(mse.eval(feed_dict={x: batchTest, y_: batchTestTarget, keep_prob:1.0}))
print(y.eval(feed_dict={x: batchTest, y_:batchTestTarget, keep_prob:1.0}))
sess.close() | {
"repo_name": "derrowap/MA490-MachineLearning-FinalProject",
"path": "adderNN_v3.py",
"copies": "1",
"size": "4879",
"license": "mit",
"hash": -2907102527761652000,
"line_mean": 31.7516778523,
"line_max": 78,
"alpha_frac": 0.6495183439,
"autogenerated": false,
"ratio": 3.6302083333333335,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47797266772333336,
"avg_score": null,
"num_lines": null
} |
import numpy as np
from tensorflow.contrib.learn import TensorFlowDNNClassifier
from trainingFunctions import evenParity
from sklearn.metrics import accuracy_score
x = np.zeros((65536, 16), dtype='int32')
y = np.zeros(65536, dtype='int32')
for i in range(65536):
temp = bin(i)[2:].zfill(16)
x[i] = [int(j) for j in temp]
y[i] = evenParity(i)
numSteps = 1000000
NN = TensorFlowDNNClassifier(hidden_units=[16], steps=numSteps,
n_classes=2)
NN.fit(x, y)
pred = NN.predict(x)
pred = np.reshape(pred, -1)
pred = np.rint(pred)
error = 1 - accuracy_score(y, pred)
print('Steps %d, error %f' % (numSteps, error))
print("\nEnter exit to leave loop.")
while True:
first = input("Number 1... ")
try:
first = int(first)
except:
break
result = NN.predict(np.array([[int(j) for j in bin(first)[2:].zfill(16)]]))
print("I think evenParity(%d) = %d"
% (first, int(result)))
print("True answer of evenParity(%d) = %d" % (first, evenParity(first))) | {
"repo_name": "derrowap/MA490-MachineLearning-FinalProject",
"path": "skParity.py",
"copies": "1",
"size": "1154",
"license": "mit",
"hash": 7088790843754486000,
"line_mean": 27.875,
"line_max": 78,
"alpha_frac": 0.6447140381,
"autogenerated": false,
"ratio": 2.906801007556675,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8916954267779551,
"avg_score": 0.026912155575424885,
"num_lines": 40
} |
import os
import sys
import tensorflow as tf
# We have five actions
n_classes = 5
# Grab height and width arguments
height = int(sys.argv[1])
width = int(sys.argv[2])
# Total image matrix size
input_size = height * width
# Reshaping convolutions
reshape_param = (int(height/4))*(int(width/4))*64
# Define the image input and action outputs
x = tf.placeholder(tf.float32, [None, input_size], name="input")
y = tf.placeholder(tf.float32)
# Perform 2D convolution
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1,1,1,1], padding='SAME')
# Perform 2D pooling
def maxpool2d(x):
# Moving pool window by 2px at a time
return tf.nn.max_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
# Build the network architecture
def _build_network(x):
# Define the convolution layer weignts
weights = {'W_conv1': tf.Variable(tf.random_normal([5, 5, 1, 32])),
'W_conv2': tf.Variable(tf.random_normal([5, 5, 32, 64])),
'W_fc': tf.Variable(tf.random_normal([reshape_param, 1024])),
'out': tf.Variable(tf.random_normal([1024, n_classes]))}
# Define the convolution layer biases
biases = {'B_conv1': tf.Variable(tf.random_normal([32])),
'B_conv2': tf.Variable(tf.random_normal([64])),
'B_fc': tf.Variable(tf.random_normal([1024])),
'out': tf.Variable(tf.random_normal([n_classes]))}
# Wire together the connections
x = tf.reshape(x, shape=[-1,height,width,1])
conv1 = conv2d(x, weights['W_conv1']+biases['B_conv1'])
conv1 = maxpool2d(conv1)
conv2 = conv2d(conv1, weights['W_conv2']+biases['B_conv2'])
conv2 = maxpool2d(conv2)
fc = tf.reshape(conv2, [-1, reshape_param])
fc = tf.nn.relu(tf.matmul(fc, weights['W_fc'])+biases['B_fc'])
# Make the output accessible in Java
output = tf.matmul(fc, weights['out'])+biases['out']
output = tf.identity(output, name="output")
return output
# Construct gradience descent accessible in Java
def buildoptimizer(output):
actual = tf.placeholder(shape=[1, 5], dtype=tf.float32, name="actual")
loss = tf.reduce_sum(tf.square(actual - output))
trainer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
update = trainer.minimize(loss)
return update
if __name__ == '__main__':
# Build the network
model = _build_network(x)
optimzier = buildoptimizer(model)
builder = tf.saved_model.builder.SavedModelBuilder(os.getcwd()+"/cnn")
# Run the build and write to a file
# readable from Java
with tf.Session() as s:
s.run(tf.global_variables_initializer())
builder.add_meta_graph_and_variables(s, [tf.saved_model.tag_constants.TRAINING])
builder.save(True)
| {
"repo_name": "austinpgraham/SmartKart",
"path": "src/cnn_python/construct.py",
"copies": "1",
"size": "2722",
"license": "apache-2.0",
"hash": -6953789711111321000,
"line_mean": 31.7951807229,
"line_max": 82,
"alpha_frac": 0.690668626,
"autogenerated": false,
"ratio": 2.8926673751328376,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40833360011328373,
"avg_score": null,
"num_lines": null
} |
__author__ = 'austin'
def module_check_r(module):
"""
Just for debian-based systems like Kali and Ubuntu
"""
ri = tk.messagebox.askyesno("error",
"""%s was not found on your system if your an admin and would like to install it
press yes""" % module)
if ri is True:
if module == 'nmap':
os.system('gksu apt-get install nmap python-nmap')
else:
os.system('gksu pip3 install %s' % module)
else:
tk.messagebox.showerror('missing dependencies',
'netscanner is closing due to a missing dependency')
exit(0)
import tkinter as tk
import tkinter.messagebox
try:
from netaddr import *
except ImportError:
module_check_r('netaddr')
from netaddr import *
try:
import nmap
except:
module_check_r('nmap')
import nmap
try:
from scapy.all import *
except ImportError:
module_check_r('scapy-python3')
from scapy.all import *
class Scanners:
"""
the scanners class will be used to run different scans from the main system or can be used independently
currently this class is still being built. scan functionality still resides in the Main class
"""
@staticmethod
def nma_scan(network_address):
def callback_result(host, scan_result):
f = open('results.p', 'ab')
pickle.dump({host: scan_result}, f)
f.close()
nma = nmap.PortScannerAsync()
Scanners.delete_content('results.p')
scan_list = list(IPNetwork(network_address))
start_time = time.time() # FOR TESTING ... start time
nma.scan(hosts=network_address, arguments='-T5 -F', callback=callback_result)
while nma.still_scanning():
nma.wait(1)
end_time = time.time()
print("{} addresses scanned in {} seconds".format(len(scan_list), end_time - start_time))
@staticmethod
def concurrent_scapy(network_address):
addresses = IPNetwork(network_address)
results = {}
ports_to_scan = [22, 23, 25, 80, 443]
for host in addresses:
if host is not addresses.network or addresses.broadcast:
resp = sr1(IP(dst=str(host))/ICMP(), timeout=2, verbose=0)
if (str(type(resp)) == "<type 'NoneType'>"):
results[host: 'is down or not responding']
elif (int(resp.getlayer(ICMP).type) == 3 and int(resp.getlayer(ICMP).code) in [1,2,3,9,10,13]):
results[host: 'is blocking icmp']
else:
results[host: 'is up']
@staticmethod
def delete_content(name): # clears the pickle when a new scan starts or when data is explicitly cleared
with open(name, "w"):
pass
Scanners.concurrent_scapy('192.168.0.0/30')
| {
"repo_name": "austin395/NetworkTools",
"path": "scanners.py",
"copies": "1",
"size": "2881",
"license": "mit",
"hash": 4801153201996782000,
"line_mean": 32.1149425287,
"line_max": 112,
"alpha_frac": 0.589378688,
"autogenerated": false,
"ratio": 3.8516042780748663,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4940982966074866,
"avg_score": null,
"num_lines": null
} |
__author__ = 'austin'
import dataset
import os
import time
import sys
def make_new_database(new_database_name, media_folder):
file_ext = (".mp4", ".avi", ".flv", ".wmv", ".mpg", ".mov", ".mkv", ".mpeg")
start_time = time.time() # FOR TESTING ... start time
db = dataset.connect('sqlite:///{}.vdb'.format(new_database_name))
table = db[new_database_name]
for root, dirs, files in os.walk(media_folder):
for file in files:
if file.endswith(file_ext):
if table.find_one(title=file[:-4]) is None:
table.insert(dict(title=file[:-4],
location=(root + '/' + file),
genre='none',
length='none',
ispresent=True))
print(root + '/' + file)
else:
pass
else:
pass
end_time = time.time()
print("{} files in {} seconds".format(len(db[new_database_name]), end_time - start_time))
exit(0) # TODO need to specify different exit codes on different errors, so we can handle these errors
def main():
new_database_name = sys.argv[1]
media_folder = sys.argv[2]
make_new_database(new_database_name, media_folder)
if __name__ == "__main__":
main() | {
"repo_name": "redeuxx/vidDB",
"path": "make_new_database.py",
"copies": "1",
"size": "1373",
"license": "mit",
"hash": -8589384122682406000,
"line_mean": 31.7142857143,
"line_max": 107,
"alpha_frac": 0.506190823,
"autogenerated": false,
"ratio": 3.922857142857143,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9908895552383217,
"avg_score": 0.004030482694785295,
"num_lines": 42
} |
__author__ = 'austin'
import subprocess
import time
import os
if os.getuid() != 0:
print("""not root: iptables functions require root privledge those functions will not be executed.
this mode is for testing only""")
class LiKnockServer:
"""
LiKnock-Server is designed to establish a listening connection to iptables logs and listen for port knocking sequences
to signal a client requesting access the server. Upon completion of a designated knocking sequence LiKnock will alter
IPtables rules in order to allow the client access.
"""
def __init__(self):
self.user_pass = {10: [5643, 5644, 5645, 7895, 8634], 11: [3456, 3564, 3764]} # these numbers are not for production use
self.user_position = {10: 0, 11: 0}
self.last_knock_time = {10: None, 11: None}
self.client_ip = {10: None, 11: None}
self.average_time = []
if os.getuid() == 0:
self.start_iptables()
else:
print('not root: no iptables settings will change')
self.log_watch = subprocess.Popen(['tail', '-f', '/var/log/firewall.log'], stdout=subprocess.PIPE)
self.main()
def main(self):
while True:
x = self.log_watch.stdout.readline().decode('UTF-8') # todo clear log before reading to prevent accepting an old auth
start = time.time()
for items in self.last_knock_time:
"""
this code block handles timeouts for testing the timeout is 1 this will be adjusted based on testing for internet
distanced connections
"""
if self.last_knock_time[items] is not None:
if start - self.last_knock_time[items] > 30:
self.last_knock_time[items] = 0
self.user_position[items] = 0
print('attempt timed out')
port_location = x.find('DPT=')
if port_location > 0:
"""
while this block and its sub blocks sort through the log file to determine if a knock occured and if so handle
it accordingly
"""
port_end = x[port_location:].find(' ')
port = int(x[port_location + 4:port_location + port_end])
user_location = x.find('LEN=')
user_end = x[user_location:].find(" ")
user = int(x[user_location + 4:user_location + user_end]) - 20 # todo find second LEN=, the first is 20 larger
if user in self.user_pass:
if port == self.user_pass[user][self.user_position[user]]:
src_location = x.find('SRC=')
src_end = x[src_location:].find(' ')
packet_src = x[src_location + 4:src_location + src_end]
if self.user_position[user] == 0:
first = time.time()
self.client_ip[user] = packet_src
if self.client_ip[user] == packet_src:
self.last_knock_time[user] = time.time()
self.user_position[user] += 1
if self.user_position[user] == len(self.user_pass[user]):
self.last_knock_time[user] = None
self.user_position[user] = 0
last = time.time()
print('opening ports for {}, auth took {}'.format(self.client_ip[user], ((last - first))))
if os.getuid == 0:
self.open_the_gates(22, packet_src)
@staticmethod
def open_the_gates(port, address): # todo add logging for separate process to handle timeouts using netstat
subprocess.Popen(['sudo', 'iptables', '-I', 'INPUT', '-i', 'eth0', '-p', 'tcp', '-s', address, '--dport', port,
'-m', 'state', '--state', 'NEW,ESTABLISHED', '-j', 'ACCEPT'])
subprocess.Popen(['sudo', 'iptables', '-I', 'OUTPUT', '-o', 'eth0', '-p', 'tcp', '--sport', port,
'-m', 'state', '--state', 'ESTABLISHED', '-j', 'ACCEPT'])
@staticmethod
def start_iptables(): # have check for kern log kern.warn to separate file and check that that file exists
subprocess.Popen(['sudo', 'iptables', '-F']) # flushes the current iptables ruleset
# these create an implicit deny
# subprocess.Popen(['sudo', 'iptables', '-P', 'INPUT', 'DROP'])
# subprocess.Popen(['sudo', 'iptables', '-P', 'FORWARD', 'DROP'])
# subprocess.Popen(['sudo', 'iptables', '-P', 'OUTPUT', 'DROP'])
subprocess.Popen(['sudo', 'iptables', '-A', 'INPUT', '-p', 'udp', '-j', 'LOG',
'--log-prefix', "'iptables: '", '--log-level', '4']) # sets logging of all UDP traffic to kern.warn
def main():
LiKnockServer()
if __name__ == '__main__':
main()
| {
"repo_name": "severedsec/LiKnox-OS",
"path": "LiKnock/LiKnock-server.py",
"copies": "1",
"size": "5036",
"license": "mit",
"hash": -5749194262497820000,
"line_mean": 46.0654205607,
"line_max": 130,
"alpha_frac": 0.525019857,
"autogenerated": false,
"ratio": 4.077732793522268,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5102752650522268,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Austin'
import os,os.path
import xml.etree.ElementTree as ET
#get the path to the pmd commits
#gwtcwd = get current working directory
#print os.getcwd()
def parse(logpath):
rootdir = os.path.abspath(os.path.dirname(os.getcwd()))
#print rootdir
LoR= []
pmd_folder = logpath
print pmd_folder
i = 0
completeName = os.path.join(pmd_folder, "CommitResult.txt")
with open(completeName, "w") as output:
for file in os.listdir(pmd_folder):
Result = dict()
currentfile = ""
num_viol = 0
i = i + 1
if os.path.isfile(pmd_folder +"\\"+ "commit"+str(i)+".xml"):
output.write("commit"+str(i)+".xml: \n")
f = open(pmd_folder +"\\"+ "commit"+str(i)+".xml")
lines = f.readlines()
for line in lines:
if '<file name=' in line:
temp = line.split("\\")
if currentfile == "":
currentfile = temp[-1][:-3]
Result[currentfile] = num_viol
else:
if currentfile not in Result:
Result[currentfile] = num_viol
else:
Result[currentfile] += num_viol
num_viol = 0
currentfile = temp[-1][:-3]
if '</violation>' in line:
num_viol = num_viol + 1
for key in Result.keys():
output.write("\t" +key + " : " + str(Result[key]) + "\n")
# print num_viol
f.close()
LoR.append(Result)
return LoR
| {
"repo_name": "UBC-Victorious-410/project",
"path": "tools/pmd_parser.py",
"copies": "1",
"size": "1800",
"license": "mit",
"hash": -5457313715963984000,
"line_mean": 29,
"line_max": 77,
"alpha_frac": 0.4372222222,
"autogenerated": false,
"ratio": 4.137931034482759,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.507515325668276,
"avg_score": null,
"num_lines": null
} |
__author__ = 'austin'
# will be capable of exporting scans to different file types
import tkinter as tk
import tkinter.filedialog
import pickle
import socket
# need to add support for clean csv xml txt
# the dictionary nesting could be cleaned up a little bit when exporting as human readable data
class Export:
@staticmethod
def ask_save_as():
save_as = [('json', '.json'), ('pickle', '.p')]
file = tk.filedialog.asksaveasfile(filetypes=save_as)
try:
x, y = file.name.split('.')
except AttributeError: # caused by user closing the window without
exit(0)
data = {}
f = open('results.p', 'rb')
while True:
try:
data.update(pickle.load(f))
except EOFError:
break
f.close()
sdata = sorted(data.items(), key=lambda item: socket.inet_aton(item[0]))
if y == 'json':
Export.to_json(data, file)
elif y == 'p':
Export.to_pickle(data, file)
else:
pass
file.close()
exit(0)
@staticmethod
def to_json(data, file):
import json
json.dump(data, file, indent=4, sort_keys=True)
@staticmethod
def to_pickle(data, file):
pickle.dump(data, file)
def main():
Export.ask_save_as()
if __name__ == "__main__":
main() | {
"repo_name": "austin395/NetworkTools",
"path": "export.py",
"copies": "1",
"size": "1384",
"license": "mit",
"hash": -8240794785297480000,
"line_mean": 26.1568627451,
"line_max": 95,
"alpha_frac": 0.5628612717,
"autogenerated": false,
"ratio": 3.8232044198895028,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4886065691589503,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Autio'
################################################
# Shit Crimson - 7DRL 2014 - By ArchBang #
# 9/3/2014 - 16/3/2014 #
################################################
# Based on Roguebasin.com's roguelike tutorial #
# #
#
#
#
#TODO labyrinth
###############################################################################
# INITIALISATION
import os
import libtcodpy as libtcod
import math
import textwrap
import Vallat
import winsound
forget = False
#size of window
SCREEN_WIDTH = 130
SCREEN_HEIGHT = 80
ROOM_MAX_SIZE = 14
ROOM_MIN_SIZE = 6
MAX_ROOMS = 35
MAX_ROOM_MONSTERS = 3
MAX_ROOM_ITEMS = 2
#size of map
MAP_WIDTH = 100
MAP_HEIGHT = 55
LIMIT_FPS = 20
#GUI Settings
BAR_WIDTH = 26
PANEL_HEIGHT = 16
PANEL_Y = SCREEN_HEIGHT - PANEL_HEIGHT - 1
MSG_X = BAR_WIDTH + 2
MSG_WIDTH = SCREEN_WIDTH - BAR_WIDTH - 2
MSG_HEIGHT = PANEL_HEIGHT - 1
# Default FOV algorithm
FOV_ALGO = 2
FOV_LIGHT_WALLS = True
TORCH_RADIUS = 20 #ever x turns diminish torch
TORCH_COUNTER = 0
# Inventory settings
INVENTORY_LIMIT = 30
INVENTORY_WIDTH = 50
# Item stats
HEAL_AMOUNT = 5 # how much a potion heals by
LIGHTNING_RANGE = 5
LIGHTNING_DAMAGE = 20
c_roll = libtcod.random_get_int(0, 0, 2)
if c_roll == 1:
color_dark_wall = libtcod.Color(0, 0, 100) # darker purple
color_light_wall = libtcod.Color(50, 50, 150) # purplish
else:
color_dark_wall = libtcod.Color(120, 150, 130) # dark green
color_light_wall = libtcod.Color(200, 220, 180) # lighter green
c_roll = libtcod.random_get_int(0, 0, 2)
if c_roll == 1:
color_dark_ground = libtcod.Color(25, 50, 150) # darkish purple/blue
else:
color_dark_ground = libtcod.Color(90, 20, 20)
color_light_ground = libtcod.Color(100, 85, 75) # light brown
highlight_color = libtcod.Color(220, 140, libtcod.random_get_int(0, 150, 220)) # pale pink
# counters
# jitters
#description move counter
dCount = 250
descriptionX = SCREEN_WIDTH - 37
descriptionY = 0
# initial image coordinates (only a sliver of the portraits are shown at any given time)
imgX = libtcod.random_get_int(0, 12, 52)
imgY = libtcod.random_get_int(0, 0, 32)
ailmentCounter = libtcod.random_get_int(0, 80, 222)
SOUNDS = True
SCounter = 10
###############################################################################
# WORDS
#################### Read in texts from the preset files #####################
def readCSVtoArray(filePath):
outputArray = []
with open(filePath, 'rb') as source:
for line in source:
line = line.replace('"', '')
#line = replace(line, )
outputArray.append(line[:-2])
return outputArray
toimiVallat = readCSVtoArray("sanat\\toimiVallat.csv")
ilkiVallat = readCSVtoArray("sanat\\ilkiVallat.csv")
ikkunat = readCSVtoArray("sanat\\ikkunat.csv")
ovet = readCSVtoArray("sanat\\ovet.csv")
esineet = readCSVtoArray("sanat\\esineet.csv")
muotovajaat = readCSVtoArray("sanat\\muodoton.csv")
sisusvajaat = readCSVtoArray("sanat\\lihaton.csv")
voimavajaat = readCSVtoArray("sanat\\voimaton.csv")
itse = readCSVtoArray("sanat\\itse.csv")
muistot = readCSVtoArray("sanat\\muistot.csv")
kaunat = readCSVtoArray("sanat\\kaunat.csv")
hajut = readCSVtoArray("sanat\\hajut.csv")
kivut = readCSVtoArray("sanat\\kivut.csv")
###############################################################################
# CLASSES
class Tile:
#a tile on the map and its properties
def __init__(self, blocked, block_sight = None, highlight = False):
self.blocked = blocked
self.explored = False
self.highlight = False
#If a tile is blocked, it also blocks sight by default
if block_sight is None: block_sight = blocked
self.block_sight = block_sight
class Object:
#this is a generic object
#always represented by a character on the screen
def __init__(self, x, y, char, name, color, blocks = False, fighter = None, ai = None, item = None, description = None, img = None):
self.x = x
self.y = y
self.char = char
self.name = name
self.color = color
self.blocks = blocks
# portait to show on mouseover
self.img = img
self.fighter = fighter
if self.fighter: #tell the fighter component its ownner
self.fighter.owner = self
self.ai = ai
if self.ai: #tell the AI component its owner
self.ai.owner = self
self.item = item
if self.item: #tell the item component its owner
self.item.owner = self
self.description = setDescription(name)
self.img = setImage(name)
def move_towards(self, target_x, target_y):
# get the distance and vector from self to target
dx = target_x - self.x
dy = target_y - self.y
distance = math.sqrt(dx ** 2 + dy ** 2)
#keep the movement steady in a grid by normalising, rounding and converting to int
dx = int(round(dx/distance))
dy = int(round(dy/distance))
self.move(dx, dy)
def move(self, dx, dy):
#move by the given amount
try:
if not is_blocked(self.x + dx, self.y + dy) and (self.x + dx) >= 0 and (self.y +dy) >= 0:
self.x += dx
self.y += dy
except:
print 'No tile exists to move to'
def distance_to(self, other):
#return the distance to another object
dx = other.x - self.x
dy = other.y - self.y
return math.sqrt(dx ** 2 + dy ** 2)
def draw(self):
#set the color and then draw the character that represents this object at its position
# only draw if in fov
if libtcod.map_is_in_fov(fov_map, self.x, self.y):
libtcod.console_set_default_foreground(con, self.color)
libtcod.console_put_char(con, self.x, self.y, self.char, libtcod.BKGND_NONE)
def clear(self):
#erase the character that represents this object
libtcod.console_put_char(con, self.x, self.y, ' ', libtcod.BKGND_NONE)
def send_to_back(self):
#push this to the top of the objects array so it gets drawn first,
#i.e. before player and monsters - meant for corpses and items
global objects
objects.remove(self)
objects.insert(0, self)
class Item:
def __init__(self, use_function=None):
self.use_function = use_function
def pick_up(self):
#add to the player's inventory and take away from the map
if len(inventory) >= INVENTORY_LIMIT:
message('Your inventory is full, cannot pick up ' + self.owner.name + '.', libtcod.red)
else:
inventory.append(self.owner)
objects.remove(self.owner)
message('You picked up a ' + self.owner.name + '!', libtcod.green)
def use(self):
#calls the object's 'use_function' if defined:
if self.use_function is None:
message('The ' + self.owner.name + ' cannot be used.')
else:
if self.use_function != 'cancelled':
self.use_function()
inventory.remove(self.owner) #destroy after usage
# define object dropping
class Rect:
#rectangle on the map
def __init__(self, x, y, w, h):
self.x1 = x
self.y1 = y
self.x2 = x + w
self.y2 = y + h
def centre(self):
centre_x = (self.x1 + self.x2)/2
centre_y = (self.y1 + self.y2)/2
return(centre_x, centre_y)
def intersect(self, other):
#returns true if this rectangle intersects with another one
return (self.x1 <= other.x2 and self.x2 >= other.x1 and self.y1 <= other.y2 and self.y2 >= other.y1)
class Fighter:
#combat-related properties and methods (monster, player, NPC)
def __init__(self, hp, defense, power, death_function = None, name=""):
self.max_hp = hp
self.hp = hp
self.defense = defense
self.power = power
self.death_function = death_function
self.name = name
def take_damage(self, damage):
if damage > 0:
self.hp -= damage
if self.hp <= 0:
function = self.death_function
if function is not None:
function(self.owner)
def attack(self, target):
damage = self.power - target.fighter.defense
if self.name == "You":
attacks = ["grin at the", "plead mercy from the", "embrace the", "kneel before the", "grow tearful at the", "explain your actions to the", "shore your memories against the", "amass some strength against the", "ask for forgiveness from the"]
msgColor = libtcod.azure
else:
attacks = ["punishes", "seeps into", "compels", "diminishes", "empties itself towards", "weakens", "accuses", "billows at"]
msgColor = libtcod.desaturated_red
if damage > 0:
message(self.owner.name.capitalize() + " " + attacks[libtcod.random_get_int(0, 0, len(attacks)-1)] + " " + target.name.lower() + ' (' + str(damage) + ' points).', msgColor)
target.fighter.take_damage(damage)
else:
print 'No damage'
def heal(self, amount):
self.hp += amount
if self.hp > self.max_hp:
self.hp = self.max_hp
def ailment(player):
global ailmentCounter
ailments = ["The disease gnaws within.", "Clusters of pain cosset you.", "A sudden pain makes you hack and cough.", "A bolt of pain runs down your body."]
ailmentCounter -= 1
if ailmentCounter < 0:
ailmentCounter = libtcod.random_get_int(0, 50, 124)
message(ailments[libtcod.random_get_int(0, 0, len(ailments)-1)], libtcod.lighter_crimson)
player.fighter.take_damage(libtcod.random_get_int(0, 1, 4))
def player_death(player):
#game over
global game_state
message('It is all over.', libtcod.brass)
game_state = 'dead'
player.char = '%'
player.color = libtcod.dark_red
def monster_death(monster):
#transform monster to a corpse
deaths = [" withers away.", " no longer feels alien to you.", " has found its place.", " is clear to you."]
roll = libtcod.random_get_int(0, 0, len(deaths)-1)
message(monster.name.capitalize() + deaths[roll], libtcod.light_gray)
monster.send_to_back() #draw this object first
monster.char = '%'
monster.color = libtcod.dark_red
monster.blocks = False
monster.fighter = None
monster.ai = None
monster.name = 'remains of ' + monster.name
class BasicMonster:
#AI for the basic monster type
def take_turn(self):
#print 'The ' + self.owner.name + ' growls'
#If you can see the basic monster, so can they see you
monster = self.owner
if libtcod.map_is_in_fov(fov_map, monster.x, monster.y):
#Move towards player if far away (if feeling up to the task?)
if monster.distance_to(player) >= 2:
roll = libtcod.random_get_int(0, 0, 10)
if roll < 9:
monster.move_towards(player.x, player.y)
else:
potential_move_x = monster.x + libtcod.random_get_int(0, -1, 2)
potential_move_y = monster.y + libtcod.random_get_int(0, -1, 2)
if not (monster.x == potential_move_x and monster.y == potential_move_y):
monster.move_towards(potential_move_x, potential_move_y)
#Attack when close by
elif player.fighter.hp > 0:
monster.fighter.attack(player)
#print 'The attack of the ' + monster.name + ' does you no harm!'
###############################################################################
# FUNCTIONS
def setDescription(name):
description = ""
if name == "You":
# Build how descriptions are set for objects here
roll = libtcod.random_get_int(0, 0, len(itse)-1)
description = itse[roll]
elif name == "Pain":
roll = libtcod.random_get_int(0, 0, len(kivut)-1)
description = kivut[roll]
elif name == "Memory":
roll = libtcod.random_get_int(0, 0, len(muistot)-1)
description = muistot[roll]
elif name == "Vapour":
roll = libtcod.random_get_int(0, 0, len(hajut)-1)
description = hajut[roll]
elif name == "Regret":
roll = libtcod.random_get_int(0, 0, len(kaunat)-1)
description = kaunat[roll]
elif name == "Bucket":
description = "A bucket of what was once in you. But the disease is still in there."
else:
roll = libtcod.random_get_int(0, 0, len(esineet)-1)
description = esineet[roll]
return description
def setImage(name):
img = None
if name == "You":
img = "kuvat\\portrait2.png"
elif name == "Pain":
roll = libtcod.random_get_int(0, 0, 3)
if roll == 1:
img = "kuvat\\portrait1.png"
elif roll ==2:
img = "kuvat\\portrait5.png"
else:
img = "kuvat\\portrait8.png"
elif name == "Memory":
img = "kuvat\\portrait3.png"
elif name == "Vapour":
img = "kuvat\\portrait4.png"
elif name == "Regret":
img = "kuvat\\portrait6.png"
elif name == "Memento":
img = "kuvat\\portrait7.png"
elif name == "Bucket":
img = "kuvat\\bucket3.png"
else:
img = "kuvat\\portrait7.png"
return img
### WORLD GENERATION ##########################################################
def create_room(room):
global map
#go through tiles in the rectangle and make them passable
for x in range(room.x1 + 1, room.x2):
for y in range(room.y1 + 1, room.y2):
map[x][y].blocked = False
map[x][y].block_sight = False
def create_h_tunnel(x1, x2, y):
global map
for x in range(min(x1, x2), max(x1, x2) + 1):
map[x][y].blocked = False
map[x][y].block_sight = False
def create_v_tunnel(y1, y2, x):
global map
for y in range(min(y1, y2), max(y1, y2)+ 1):
map[x][y].blocked = False
map[x][y].block_sight = False
def create_monster(x, y):
roll = libtcod.random_get_int(0, 0, 100)
if roll < 30: # chance of getting a vapour
fighter_component = Fighter(hp = 10, defense=0, power=3, death_function=monster_death, name="The vapour")
ai_component = BasicMonster()
monster = Object(x, y, 'v', 'Vapour', libtcod.light_green, blocks = True, fighter=fighter_component, ai = ai_component)
elif roll < 30 + 30:
fighter_component = Fighter(hp = 15, defense=1, power=4, death_function=monster_death, name="The pain")
ai_component = BasicMonster()
monster = Object(x, y, 'p', 'Pain', libtcod.lighter_purple, blocks=True, fighter = fighter_component, ai = ai_component)
elif roll < 30 + 30 + 30:
fighter_component = Fighter(hp = 15, defense=1, power=5, death_function=monster_death, name="The regret")
ai_component = BasicMonster()
monster = Object(x, y, 'r', 'Regret', libtcod.lighter_chartreuse, blocks=True, fighter = fighter_component, ai = ai_component)
else:
# memory
fighter_component = Fighter(hp = 23, defense=2, power=6, death_function = monster_death, name="The memory")
ai_component = BasicMonster()
monster = Object(x, y, 'm', 'Memory', libtcod.dark_pink, blocks = True, fighter=fighter_component,ai = ai_component)
objects.append(monster)
def create_item(x, y):
roll = libtcod.random_get_int(0, 0, 100)
if roll < 20:
item_component = Item(use_function=cast_heal)
item = Object(x, y, '?', 'A beloved book', libtcod.dark_green, item = item_component)
elif roll < 40:
item_component = Item(use_function=cast_heal)
item = Object(x, y, '*', 'Memento', libtcod.green, item = item_component)
else:
item_component = Item(use_function=cast_heal)
item = Object(x, y, '&', 'A familiar trinket', libtcod.lighter_green, item = item_component)
objects.append(item)
item.send_to_back()
def place_objects(room):
#choose random number of monsters
num_monsters = libtcod.random_get_int(0, 0, MAX_ROOM_MONSTERS)
num_items = libtcod.random_get_int(0, 0, MAX_ROOM_ITEMS)
for i in range(num_monsters):
#choose random spot for this monster
x = libtcod.random_get_int(0, room.x1+1, room.x2-1)
y = libtcod.random_get_int(0, room.y1+1, room.y2-1)
if not is_blocked(x, y):
create_monster(x, y)
for i in range(num_items):
#random spot for item
x = libtcod.random_get_int(0, room.x1+1, room.x2-1)
y = libtcod.random_get_int(0, room.y1+1, room.y2-1)
create_item(x, y)
def is_blocked(x, y):
# test the map tile
if map[x][y].blocked:
return True
# now check for blocking objects
for object in objects:
if object.blocks and object.x == x and object.y == y:
return True
return False
def make_map():
global map
rooms = []
num_rooms = 0
#fill map with "blocked" tiles
map = [[ Tile(True)
for y in range(MAP_HEIGHT) ]
for x in range(MAP_WIDTH) ]
for r in range(MAX_ROOMS):
#random width and height
w = libtcod.random_get_int(0, ROOM_MIN_SIZE, ROOM_MAX_SIZE)
h = libtcod.random_get_int(0, ROOM_MIN_SIZE, ROOM_MAX_SIZE)
#random position without going outside the map
x = libtcod.random_get_int(0, 0, MAP_WIDTH - w - 1)
y = libtcod.random_get_int(0, 0, MAP_HEIGHT - h - 1)
new_room = Rect(x, y, w, h)
#run through the rooms and check for intersections
failed = False
for other_room in rooms:
if new_room.intersect(other_room):
failed = True
break
if not failed:
#room has no intersections
create_room(new_room)
place_objects(new_room)
#centre coordinates of the new room
(new_x, new_y) = new_room.centre()
if num_rooms == 0:
player.x = new_x
player.y = new_y
bucket.x = new_x + 1
bucket.y = new_y - 1
else:
#all rooms after the first:
#connect to the previous room with a tunnel
#centre coordinates of the previous room
(prev_x, prev_y) = rooms[num_rooms - 1].centre()
#draw a coin (0 or 1)
if libtcod.random_get_int(0, 0, 1) == 1:
#first move horizontally, then vertically
create_h_tunnel(prev_x, new_x, prev_y)
create_v_tunnel(prev_y, new_y, new_x)
else:
#first vertically, then horizontally
create_v_tunnel(prev_y, new_y, prev_x)
create_h_tunnel(prev_x, new_x, new_y)
#finally, append the new room to the list
rooms.append(new_room)
num_rooms += 1
### SPELL SHAPES ##############################################################
def validDirection(direction):
if direction[0] in [-1, 0, 1] and direction[1] in [-1, 0, 1]: return \
True
else:
return False
def lineShape(start, action, power):
global map
global fov_map
# the action should be
# valid directions = [-1, 0], [1, 0], [-1, -1], [-1, 1], [-1, -1], [0, 1], [0, -1], [1, -1]
message("Choose direction: ",libtcod.white)
direction = getDirection()
if validDirection(direction):
x = start[0]
y = start[1]
rangeCounter = 10
blocked = False
while blocked == False:
if x == 0 and y == 0:
blocked = True
x += direction[0]
y += direction[1]
activeTile = map[x][y]
# Make the line stop at walls and things
if action != "access":
if activeTile.block_sight == True:
blocked = True
elif rangeCounter < 0:
blocked = True
flashTile(x, y)
# if creating an open corridor
if action == "access":
activeTile.blocked = False
activeTile.block_sight = False
rangeCounter -= 1
if action == "block":
# what to do if you are trying to make a wall
activeTile.blocked = True
activeTile.block_sight = True
if action == "damage":
# what to do if you want to damage
#activeTile.blocked = True
#activeTile.block_sight = True
for object in objects:
if object.fighter and object.fighter.hp > 0 and object.x == x and object.y == y:
object.fighter.take_damage(power)
# print object.fighter.hp
if action == "heal":
# what to do if you want to heal
for object in objects:
if object.fighter and object.fighter.hp > 0 and object.x == x and object.y == y:
object.fighter.hp += power
#print action
# find the creature in the tile and add to hp
# find the creature in the tile and subtract from hp
# what to do if you want to see / know
# what to do if you want to do other stuff
#print 'zapping tile %s %s' % (x, y)
def areaShape(start, action, power):
global map
x = start[0]
y = start[1]
level = 3
selected = chooseTile([x,y])
x = selected[0]
y = selected[1]
thresholds = [1,5,9,13,17,21] # based on the alphabet
# max power is b - z = 25
# 1-4, 5-8, 9-12, 13-16, 17-20, 21-25
# need to balance the power with the area
for i in range(len(thresholds)-1, -1,-1):
if power > thresholds[i]:
level = i+1
print 'level ' + str(level)
break
# adjust the power algorithm here
power = power * 2 - level
# limit range based on map edges
# 'paint' impacted tiles
#level1
if level == 1:
for targetX in range(x-1, x+2):
flashTile(targetX, y)
for targetY in range(y-1, y+2):
flashTile(x, targetY)
#level2
if level == 2:
for targetX in range(x-1, x + 2):
for targetY in range(y-1, y + 2):
flashTile(targetX, targetY)
#level3
if level == 3:
for targetX in range(x-1, x + 2):
for targetY in range(y-1, y + 2):
flashTile(targetX, targetY)
flashTile(x-2, y)
flashTile(x+2 ,y)
flashTile(x, y-2)
flashTile(x, y+2)
#level4
if level == 4:
for targetX in range(x-1, x + 2):
for targetY in range(y-1, y + 2):
flashTile(targetX, targetY)
for targetY in range(y-1, y+2):
flashTile(x-2, targetY)
flashTile(x+2, targetY)
for targetX in range(x-1, x+2):
flashTile(targetX, y-2)
flashTile(targetX, y+2)
#level5
if level == 5:
for targetX in range(x-2, x + 3):
for targetY in range(y-2, y + 3):
flashTile(targetX, targetY)
flashTile(x+3, y)
flashTile(x, y-3)
flashTile(x-3, y)
flashTile(x, y+3)
#level6
if level == 6:
for targetX in range(x-2, x + 3):
for targetY in range(y-2, y + 3):
flashTile(targetX, targetY)
for targetY in range(y-2, y+3):
flashTile(x-3, targetY)
flashTile(x+3, targetY)
for targetX in range(x-2, x+3):
flashTile(targetX, y-3)
flashTile(targetX, y+3)
for targetY in range(y-1, y+2):
flashTile(x-4, targetY)
flashTile(x+4, targetY)
for targetX in range(x-1, x+2):
flashTile(targetX, y-4)
flashTile(targetX, y+4)
# act on highlighted
for y in range(MAP_HEIGHT):
for x in range(MAP_WIDTH):
try:
if map[x][y].highlight > 0:
# do action
if action == "access":
map[x][y].blocked = False
map[x][y].block_sight = False
map[x][y].explored = True
if action == "block":
map[x][y].blocked = True
map[x][y].block_sight = True
if action == "heal":
for object in objects:
if object.x == x and object.y == y and object.fighter:
object.fighter.heal(power)
if object.name != "You":
message ("%s seems reinvigorated." % object.name, libtcod.azure)
else:
message ("You feel a moment of respite.", libtcod.azure)
if action == "damage":
for object in objects:
if object.x == x and object.y == y and object.fighter:
object.fighter.take_damage(power)
if object.name != "You":
message ("%s spasms and jerks." % object.name, libtcod.dark_red)
else:
message ("Convulsions unmoor your innards.", libtcod.brass)
except:
print 'Tile %s, %s is out of bounds.' % (x, y)
# Since the contours of the map have changed, we need to recreate the FOV
createFOV()
fov_recompute = True
def targetOther(start, action, power):
global map
target = chooseTile(start)
if power > 5:
power = power + libtcod.random_get_int(0, -2, 3)
x = target[0]
y = target[1]
if action == "damage":
for object in objects:
if object.x == x and object.y == y and object.fighter:
if object.name != "You":
message("%s is enveloped in a mist of pain." % object.name, libtcod.dark_red)
object.fighter.take_damage(power)
else:
message("You are struck by stabbing pains.", libtcod.brass)
object.fighter.take_damage(power)
if action == "heal":
for object in objects:
if object.x == x and object.y == y and object.fighter:
if object.name != "You":
message("%s becomes starker and more fearsome." % object.name, libtcod.azure)
object.fighter.heal(power)
else:
message("You gain some composure.", libtcod.azure)
object.fighter.heal(power)
if action == "block":
map[x][y].blocked = True
map[x][y].block_sight = True
if action == "access":
map[x][y].blocked = False
map[x][y].block_sight = False
def targetNearest(start, action, power):
player = None
closest_enemy = None
for object in objects:
if object.name == 'You':
player = object
for object in objects:
if object.fighter and object.name != 'You':
#calculate dist between object and player
dist = player.distance(object)
if dist < closest_dist: # keep track of the closest one so far
closest_enemy = object
if action == "heal":
object.fighter.heal(power)
if action == "damage":
object.fighter.take_damage(power)
return closest_enemy
def targetSelf(start, action, power):
global map
x = start[0]
y = start[1]
for object in objects:
if object.x == x and object.y == y and object.fighter:
if action == "heal":
message("You gain some composure.", libtcod.azure)
object.fighter.heal(power)
elif action == "damage":
message("You cause yourself pain.", libtcod.dark_red)
object.fighter.take_damage(power)
if action == "block":
map[x][y].blocked = True
map[x][y].block_sight = True
if action == "access":
map[x][y].blocked = False
map[x][y].block_sight = False
def targetAll(start, action, power):
# target a spell on every creature
global map
level = 0
thresholds = [1,5,9,13,17,21] # based on the alphabet
message("You try to reach out to everything there is.", libtcod.white)
# tries to target all but is limited by the power
for i in range(len(thresholds)-1, -1,-1):
if power > thresholds[i]:
level = i+1
#print 'level ' + str(level)
break
# how many characters will be targeted
level = libtcod.random_get_int(0, -1, 3) + level * 2
o = 0
for object in objects:
if object.fighter:
o += 1
if o != 0:
power = int(power / o)
if power == 0: power = 1
power = power * 2
if power > 6: power = 6
for o in range(0, level):
if objects[o].fighter:
if action == "heal":
if objects[o].name != "You":
message("%s appears to breathe easier." % objects[o].name, libtcod.azure)
else:
message("The colours of the world are sharper to you.", libtcod.azure)
objects[o].fighter.heal(power)
elif action == "damage":
if objects[o].name != "You":
message("The %s repeats unbearable sounds." % objects[o].name, libtcod.dark_red)
else:
message("You plead to the silent walls to make the pain stop.", libtcod.brass)
objects[o].fighter.take_damage(power)
elif action == "block":
map[objects[o].x][objects[o].y].blocked = True
map[objects[o].x][objects[o].y].block_sight = True
elif action == "access":
map[objects[o].x][objects[o].y].blocked = False
map[objects[o].x][objects[o].y].block_sight = False
if action == "block": message("You notice the walls begin to close in.", libtcod.brass)
elif action == "access": message("Parts of the wall collapse with a loud noise.", libtcod.brass)
# Auxiliary spell functions
def getDirection():
global key
fov_recompute = True
render_all()
libtcod.console_flush()
waiting = True
while waiting:
#message('Choose a direction: ', libtcod.light_gray)
key = libtcod.console_wait_for_keypress(True)
key = libtcod.console_wait_for_keypress(True)
#print key.vk
if key.vk == libtcod.KEY_UP or key.vk == libtcod.KEY_KP8:
return [0, -1]
elif key.vk == libtcod.KEY_DOWN or key.vk == libtcod.KEY_KP2:
return [0, 1]
elif key.vk == libtcod.KEY_LEFT or key.vk == libtcod.KEY_KP4:
return [-1, 0]
elif key.vk == libtcod.KEY_RIGHT or key.vk == libtcod.KEY_KP6:
return [1, 0]
#diagonals
elif key.vk == libtcod.KEY_KP9 or key.vk == libtcod.KEY_PAGEUP:
return [1, -1]
elif key.vk == libtcod.KEY_KP3 or key.vk == libtcod.KEY_PAGEDOWN:
return [1, 1]
elif key.vk == libtcod.KEY_KP7 or key.vk == libtcod.KEY_HOME:
return [-1, -1]
elif key.vk == libtcod.KEY_KP1 or key.vk == libtcod.KEY_END:
return [-1, 1]
# cancel
elif key.vk == libtcod.KEY_ESCAPE or key.vk == libtcod.KEY_CONTROL and key.vk == libtcod.KEY_CHAR('c'):
return [2,2]
def chooseTile(start):
global fov_recompute
global fov_map
global map
x = start[0]
y = start[1]
message("Select your target tile, then press Enter.", libtcod.light_gray)
flashTile(x, y)
fov_recompute = True
render_all()
libtcod.console_flush()
selected = False
while selected == False:
key = libtcod.console_wait_for_keypress(True)
key = libtcod.console_wait_for_keypress(True)
if key.vk == libtcod.KEY_UP or key.vk == libtcod.KEY_KP8:
if libtcod.map_is_in_fov(fov_map, x, y-1):
y -= 1
elif key.vk == libtcod.KEY_DOWN or key.vk == libtcod.KEY_KP2:
if libtcod.map_is_in_fov(fov_map, x, y+1):
y += 1
elif key.vk == libtcod.KEY_LEFT or key.vk == libtcod.KEY_KP4:
if libtcod.map_is_in_fov(fov_map, x-1, y):
x -= 1
elif key.vk == libtcod.KEY_RIGHT or key.vk == libtcod.KEY_KP6:
if libtcod.map_is_in_fov(fov_map, x+1, y):
x += 1
#diagonals
elif key.vk == libtcod.KEY_KP9 or key.vk == libtcod.KEY_PAGEUP:
if libtcod.map_is_in_fov(fov_map, x+1, y-1):
x += 1
y -= 1
elif key.vk == libtcod.KEY_KP3 or key.vk == libtcod.KEY_PAGEDOWN:
if libtcod.map_is_in_fov(fov_map, x+1, y+1):
x += 1
y += 1
elif key.vk == libtcod.KEY_KP7 or key.vk == libtcod.KEY_HOME:
if libtcod.map_is_in_fov(fov_map, x-1, y-1):
x -= 1
y -= 1
elif key.vk == libtcod.KEY_KP1 or key.vk == libtcod.KEY_END:
if libtcod.map_is_in_fov(fov_map, x-1, y+1):
x -= 1
y += 1
elif key.vk == libtcod.KEY_ENTER:
selected = True
flashTile(x, y)
fov_recompute = True
#print 'rendering'
render_all()
libtcod.console_flush()
#print x,y
return (x, y)
###############################################################################
### MAP DRAWING ###############################################################
def flashTile(x,y):
global map
try:
map[x][y].highlight = 1
except:
print 'Cannot reach tile %s, %s in order to highlight it.' % (x, y)
#render_all()
#map[x][y].highlight = False
def render_all():
global fov_map, color_dark_wall, color_light_wall
global color_light_ground, color_dark_ground
global highlight_color
global fov_recompute
global dCount, descriptionX, descriptionY, imgX, imgY
if fov_recompute:
#recompute FOV if needed
fov_recompute = False
libtcod.map_compute_fov(fov_map, player.x, player.y, TORCH_RADIUS, FOV_LIGHT_WALLS, FOV_ALGO)
#cycle through tiles and set their bg colour
for y in range(MAP_HEIGHT):
for x in range(MAP_WIDTH):
visible = libtcod.map_is_in_fov(fov_map, x, y)
wall = map[x][y].block_sight
if not visible:
if map[x][y].highlight > 0:
map[x][y].highlight -= 1
if map[x][y].explored:
if wall:
libtcod.console_set_char_background(con, x, y, color_dark_wall, libtcod.BKGND_SET)
else:
libtcod.console_set_char_background(con, x, y, color_dark_ground, libtcod.BKGND_SET)
else:
#it is visible
if map[x][y].highlight > 0:
libtcod.console_set_char_background(con, x, y, highlight_color, libtcod.BKGND_SET)
map[x][y].highlight -= 1
elif wall:
libtcod.console_set_char_background(con, x, y, color_light_wall)
else:
libtcod.console_set_char_background(con, x, y, color_light_ground)
# because it is visible, explore it
map[x][y].explored = True
#draw all objects in the list
for object in objects:
#Exclude the player so she can be drawn last
if object != player:
object.draw()
player.draw()
#blit con to the root console
#libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0)
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0)
#prepare to render the GUI panel
libtcod.console_set_default_background(panel, libtcod.black)
libtcod.console_clear(panel)
#show the player's stats
render_bar(1, 1, BAR_WIDTH, 'HP', player.fighter.hp, player.fighter.max_hp, libtcod.light_red, libtcod.darker_red)
#print the game messages
y = 1
for (line, color) in game_msgs:
libtcod.console_set_default_foreground(panel, color)
libtcod.console_print_ex(panel, MSG_X, y, libtcod.BKGND_NONE, libtcod.LEFT, line)
y += 1
#display names of objects under the mouse
libtcod.console_set_default_foreground(panel, libtcod.light_gray)
libtcod.console_print_ex(panel, 1, 0, libtcod.BKGND_NONE, libtcod.LEFT, get_names_under_mouse())
# Print the vowel map
vowelMap = ["You remember knowing",
" ",
"words of power. Ones",
" ",
"to make your house &",
" ",
"mind obey. Vowels to",
" ",
"grant form & content",
" ",
"& consonants setting",
" ",
"the amount of power.",
" ",
" ",
" a ",
" ",
" e i ",
" y ",
" ",
" o u "
]
y = 1
for line in vowelMap:
output = ""
for l in line:
output += allowedLetters(l)
libtcod.console_set_default_foreground(con, libtcod.brass)
libtcod.console_print_ex(con, SCREEN_WIDTH - 22, 1+y, libtcod.BKGND_NONE, libtcod.LEFT, output)
y += 1
# Print the description lines
# Randomise the description display point slightly
dCount -= 1
if dCount < 0:
descriptionX = SCREEN_WIDTH - 40 + libtcod.random_get_int(0, -1, 1)
descriptionY = libtcod.random_get_int(0, 0, 2)
y = 1
for line in get_description_under_mouse():
libtcod.console_set_default_foreground(panel, color)
libtcod.console_print_ex(panel, descriptionX, descriptionY + y, libtcod.BKGND_NONE, libtcod.LEFT, line)
y += 1
# display portraits
# image natural size is 96 x 96
# image, panel, x corner on panel, y corner on panel,
# Xth pixel of image to start from, Yth pixel of image to start from width, height
buffer = 20
imgPath = str(get_image_under_mouse())
img = libtcod.image_load(imgPath)
#img = libtcod.image_load('kuvat\\portrait1.png')/
if dCount < 0:
imgX = libtcod.random_get_int(0, 0, 64)
imgY = libtcod.random_get_int(0, 0, 32)
dCount = libtcod.random_get_int(0, 250, 350)
#answer should be 16 + 1 for black border
libtcod.image_blit_2x(img, 0, SCREEN_WIDTH-buffer, 24, imgX, imgY, 35, 75)
# display rune alphabet
y = 4
for line in get_alphabet():
libtcod.console_print_ex(panel, 1, y, libtcod.BKGND_NONE, libtcod.LEFT, line)
y += 1
#print get_alphabet()
# display question mark
libtcod.console_print_ex(panel, SCREEN_WIDTH-2, 14, libtcod.BKGND_NONE, libtcod.RIGHT, "(?)")
#blit the panel to the root console
libtcod.console_blit(panel, 0, 0, SCREEN_WIDTH, PANEL_HEIGHT, 0, 0, PANEL_Y)
###############################################################################
def torch_dimmer():
global TORCH_COUNTER
global TORCH_RADIUS
TORCH_COUNTER += 1
if TORCH_COUNTER % 30 == 0:
TORCH_RADIUS -= 1
def player_move_or_attack(dx, dy):
global fov_recompute
#where is the player moving to or attacking
x = player.x + dx
y = player.y + dy
#see if there's anything to attack
target = None
for object in objects:
if object.fighter and object.x == x and object.y == y:
target = object
break
#attack if target found, move otherwise
if target is not None:
player.fighter.attack(target)
fov_recompute = True
# print 'The ' + target.name + ' evades your ire.'
else:
player.move(dx, dy)
fov_recompute = True
### CONTROLS ##################################################################
def handle_keys():
global key
global fov_recompute
#key = libtcod.console_check_for_keypress() #real-time
if key.vk == libtcod.KEY_ENTER and key.lalt:
# Alt + Enter toggles fullscreen
libtcod.console_set_fullscreen(not libtcod.console_is_fullscreen())
elif key.vk == libtcod.KEY_ESCAPE:
return 'exit' #exit game
if game_state == 'playing':
#movement keys
if key.vk == libtcod.KEY_UP or key.vk == libtcod.KEY_KP8:
player_move_or_attack(0, -1)
elif key.vk == libtcod.KEY_DOWN or key.vk == libtcod.KEY_KP2:
player_move_or_attack(0, 1)
elif key.vk == libtcod.KEY_LEFT or key.vk == libtcod.KEY_KP4:
player_move_or_attack(-1, 0)
elif key.vk == libtcod.KEY_RIGHT or key.vk == libtcod.KEY_KP6:
player_move_or_attack(1, 0)
#diagonals
elif key.vk == libtcod.KEY_KP9:
player_move_or_attack(1, -1)
elif key.vk == libtcod.KEY_KP3:
player_move_or_attack(1, 1)
elif key.vk == libtcod.KEY_KP7:
player_move_or_attack(-1, -1)
elif key.vk == libtcod.KEY_KP1:
player_move_or_attack(-1, 1)
else:
#check for other keys
key_char = chr(key.c)
if key_char == '?':
information_menu("Scraps from your memory:\n")
if key_char == 'g':
#pick item up
for object in objects:
if object.x == player.x and object.y == player.y and object.item:
object.item.pick_up()
break
if key_char == 'i':
#display inventory
active_item = inventory_menu('Press the relevant letter to use the item, anything else to cancel.\n')
if active_item is not None:
active_item.use()
message('Used item.', libtcod.light_crimson)
if key_char == 's':
# toggle sound
global SOUNDS
global SCounter
if(SOUNDS):
winsound.PlaySound(None, winsound.SND_PURGE|winsound.SND_ASYNC)
#winsound.PlaySound("hicRhodus.wav", winsound.SND_ASYNC|winsound.SND_PURGE)
SOUNDS = False
SCounter = 10
elif SCounter < 0:
winsound.PlaySound("hicRhodus.wav", winsound.SND_ALIAS|winsound.SND_LOOP|winsound.SND_ASYNC)
if key_char == 'c':
#choose runes
castingOutcome = ""
runes = enterRunes("Try and remember what you can of words: \n")
runeResult = Vallat.castRunes(runes)
#print runeResult
if runeResult[3] == 0:
message("You remember a word.", libtcod.light_gray)
# able to cast spell
# what shape is it?
power = runeResult[0]
shape = runeResult[1]
content = runeResult[2]
#print shape, content, power
message(toimiVallat[libtcod.random_get_int(0, 0, len(toimiVallat)-1)])
if shape == "area":
message("This word will take shape over an area.", libtcod.light_gray)
areaShape([player.x, player.y], content, power)
if shape == "line":
message("This word wants to travel straight and far.", libtcod.light_gray)
fov_recompute = True
render_all()
lineShape([player.x, player.y], content, power)
if shape == "self":
message("This word etches itself onto your skin.", libtcod.light_gray)
targetSelf([player.x, player.y], content, power)
if shape == "other":
message("This word will latch onto something living.", libtcod.light_gray)
targetOther([player.x, player.y], content, power)
if shape == "everything":
message("This word wants to touch everything.", libtcod.light_gray)
targetAll([player.x, player.y], content, power)
if shape == "nearest":
message("This word wants to touch something nearby.")
targetNearest([player.x, player.y], content, power)
elif runeResult[3] == 1:
# formless
castingOutcome = muotovajaat[ libtcod.random_get_int(0, 0, len(muotovajaat)-1)]
elif runeResult[3] == 2:
# contentless
castingOutcome = sisusvajaat[ libtcod.random_get_int(0, 0, len(sisusvajaat)-1)]
elif runeResult[3] == 3:
# powerless
castingOutcome = voimavajaat[libtcod.random_get_int(0, 0, len(voimavajaat)-1)]
message(str(castingOutcome), libtcod.darker_azure)
fov_recompute = True
render_all()
#Debug controls
#if key_char == "t":
# #test line zap
#
# print 'player is on tile %s %s' % (player.x, player.y)
# lineShape([player.x, player.y], "access", 10)
# fov_recompute = True
# render_all()
#if key_char == "a":
# areaShape([player.x, player.y], "access", 20)
# fov_recompute = True
# render_all()
#if key_char == "s":
# targetOther([player.x, player.y], "block", 20)
# fov_recompute = True
# render_all()
#if key_char == "g":
# targetAll([player.x, player.y], "damage", 25)
# fov_recompute = True
# render_all()
return 'no-turn-taken'
#
### ITEM FUNCTIONS ############################################################
def cast_heal():
#unit healing
global HEAL_AMOUNT
if player.fighter.hp == player.fighter.max_hp:
message('You have no wounds to heal.', libtcod.red)
return 'cancelled'
message('You feel better.', libtcod.light_cyan)
player.fighter.heal(HEAL_AMOUNT)
HEAL_AMOUNT = libtcod.random_get_int(0, 3, 6) # randomise next heal (improve this)
def cast_lightning():
# find the closest enemy within a maximum range and damage it
monster = closest_monster(LIGHTNING_RANGE) # closest_monster still needs to be defined
if monster is None: # No enemy within maximum range
message('No enemy is close enough for the lightning to strike.', libtcod.dark_red)
return 'cancelled'
# thwack!
message('A lightning bolt strikes the ' + monster.name + ' with a loud thunder causing ' + str(LIGHTNING_DAMAGE) + ' points of damage.', libtcod.blue)
monster.fighter.take_damage(LIGHTNING_DAMAGE)
### GUI FUNCTIONS ###
def allowedLetters(letter):
# reduce letters from GUI when they have been cast
letters = "abcdefghijklmnopqrstuvwxyz"
if letter.lower() in letters:
if letter.lower() in Vallat.alphabet:
return letter
else:
return " "
else:
return letter
def message(new_msg, color = libtcod.white):
#split message if needed
global forget
new_msg_trim = ""
if(forget):
for a in new_msg:
new_msg_trim += allowedLetters(a)
new_msg_lines = textwrap.wrap(new_msg_trim, MSG_WIDTH)
else:
new_msg_lines = textwrap.wrap(new_msg, MSG_WIDTH)
for line in new_msg_lines:
#when the buffer is full, pop out the top line
if len(game_msgs) == MSG_HEIGHT:
del game_msgs[0]
#add the new line as a tuple
game_msgs.append( (line, color) )
def get_names_under_mouse():
global mouse
#return a string with all objects under the mouse
(x, y) = (mouse.cx, mouse.cy)
#make a list with names of all legit objects
names = [obj.name for obj in objects
if obj.x == x and obj.y == y and libtcod.map_is_in_fov(fov_map, obj.x, obj.y)]
#join names with a comma
names = ', '.join(names)
return names.capitalize()
def get_description_under_mouse():
global mouse
#return a string with all objects under the mouse
(x, y) = (mouse.cx, mouse.cy)
#make a list with descriptions of all legit objects
descriptions = [obj.description for obj in objects
if obj.x == x and obj.y == y and libtcod.map_is_in_fov(fov_map, obj.x, obj.y)]
# descriptions need to break lines after each 28 characters
n = 36
#textwrap.wrap(
#join names with a comma
descriptions = ', '.join(descriptions)
d = str(descriptions)
return textwrap.wrap(d, n)
def get_image_under_mouse():
(x, y) = (mouse.cx, mouse.cy)
images = [obj.img for obj in objects
if obj.x == x and obj.y == y and libtcod.map_is_in_fov(fov_map, obj.x, obj.y)]
for i in range(0, len(images)):
if images[i] != None:
img = images[i]
#print img
#libtcod.image_load('kuvat\\portrait1.png')
return img
return
def get_alphabet():
alphabet = Vallat.alphabet
line1 = ""
for l in range(0, 13):
line1 += alphabet[l]
line1 += " "
line2 = ""
for l in range(13, 26):
line2 += alphabet[l]
line2 += " "
blankline = ""
for l in range(0, 13):
blankline += " "
lines = []
lines.append(line1)
lines.append(blankline)
lines.append(line2)
return lines
def render_bar(x, y, total_width, name, value, maximum, bar_color, back_color):
#render a GUI bar
#calculate bar width
bar_width = int(float(value) / maximum * total_width)
#render background
libtcod.console_set_default_background(panel, back_color)
libtcod.console_rect(panel, x, y, total_width, 1, False, libtcod.BKGND_SCREEN)
#then render the actual bar
libtcod.console_set_default_background(panel, bar_color)
if bar_width > 0:
libtcod.console_rect(panel, x, y, bar_width, 1, False, libtcod.BKGND_SCREEN)
#display text with values
libtcod.console_set_default_foreground(panel, libtcod.pink)
libtcod.console_print_ex(panel, x + total_width / 2, y, libtcod.BKGND_NONE, libtcod.CENTER,
name + ': ' + str(value) + '/' + str(maximum))
def menu(header, options, width):
#img = libtcod.image_load('bff1.png')
#libtcod.image_blit_2x(img, 0, 0, 0)
#message("blitted image", libtcod.gray)
if len(options) > INVENTORY_LIMIT: raise ValueError('Cannot have a menu with more than ' + str(INVENTORY_LIMIT) + ' options.')
#Work out height of header after the automatic wrapping and give one line per menu option
header_height = libtcod.console_get_height_rect(con, 0, 0, width, SCREEN_HEIGHT, header)
height = len(options) + header_height
#create an off-screen console for the menu's window
menu_window = libtcod.console_new(width, height)
#print header
libtcod.console_set_default_foreground(menu_window, libtcod.light_gray)
libtcod.console_print_rect_ex(menu_window, 0, 0, width, height, libtcod.BKGND_NONE, libtcod.LEFT, header)
#print all the options
y = header_height
letter_index = ord('a')
for option_text in options:
text = '(' + chr(letter_index) + ') ' + option_text
libtcod.console_print_ex(menu_window, 0, y, libtcod.BKGND_NONE, libtcod.LEFT, text)
y += 1
letter_index += 1
#blit to the root console
x = 3 # SCREEN_WIDTH/2 - 35
y = SCREEN_HEIGHT/2 - 35 # - height
libtcod.console_blit(menu_window, 0, 0, width, height, 0, x, y, 1.0, 0.7) # last two params define transparency
libtcod.console_flush()
key = libtcod.console_wait_for_keypress(True)
#converting ASCII to an index
index = key.c - ord('a')
if index >= 0 and index < len(options): return index
return None
def inventory_menu(header):
#show a menu with the inventory items as options
if len(inventory) == 0:
options = ['You are not carrying anything.']
else:
options = [item.name for item in inventory]
index = menu(header, options, INVENTORY_WIDTH)
if index is None or len(inventory) == 0: return None
return inventory[index].item
def information_menu(header):
options = [" ", " ", "Attempt to form a word", " ", " ", " ", "Pick up objects", " ", "See your inventory", " ", " "," "," "," "," "," "," ", " ", "Make the sounds go away"]
menu(header, options, INVENTORY_WIDTH)
def enterRunes(header):
#show alphabet as options
runes = ""
#print Vallat.alphabet
abc = []
for a in Vallat.alphabet:
if a == "a":
abc.append("a | area | harm")
elif a == "e":
abc.append("e | all | heal")
elif a == "i":
abc.append("i | path | open")
elif a == "o":
abc.append("o | near | close")
elif a == "u":
abc.append("u | you | hurt")
elif a == "y":
abc.append("y | i | live")
else:
abc.append(a)
i = 0
while i < 4: # 4 is the spell length
key = libtcod.console_wait_for_keypress(True)
index = menu(header + " \n" + runes + "\n", abc, INVENTORY_WIDTH)
if index is not None:
runes += Vallat.alphabet[index]
i += 1
if i == 4:
fov_recompute = True
render_all()
return runes
return runes
def createFOV():
global fov_map
for y in range(MAP_HEIGHT):
for x in range(MAP_WIDTH):
libtcod.map_set_properties(fov_map, x, y, not map[x][y].block_sight, not map[x][y].blocked)
def checkForEnd():
o = 0
for object in objects:
if object.fighter and object.name != "You":
o += 1
#print 'there are %s monsters in play' % o
if o == 0:
return True # Game ends since all the bad memories have been reconciled
else:
return False
def checkObjects():
units = 0
items = 0
for o in objects:
if o.fighter:
units += 1
else:
items += 1
print "Units: %s, Items: %s" % (units, items)
###############################################################################
# MAIN LOOP
libtcod.console_set_custom_font('arial10x10.png', libtcod.FONT_TYPE_GREYSCALE | libtcod.FONT_LAYOUT_TCOD)
libtcod.console_init_root(SCREEN_WIDTH, SCREEN_HEIGHT, 'Shit Crimson | 7DRL 2014 | ArchBang', False)
con = libtcod.console_new(SCREEN_WIDTH, SCREEN_HEIGHT)
libtcod.sys_set_fps(LIMIT_FPS)
### GUI ###
panel = libtcod.console_new(SCREEN_WIDTH, PANEL_HEIGHT)
#list of game messages with their colors
game_msgs = []
#create player object
fighter_component = Fighter(hp = 30, defense = 2, power = 5, death_function = player_death, name="You")
player = Object(0, 0, '@', 'You', libtcod.white, blocks = True, fighter = fighter_component, description = "", img = "kuvat\\portrait2.png")
#create bucket
r = libtcod.random_get_int(0, -1, 2)
bucket = Object(player.x + r, player.y - 1, '#', 'Bucket', libtcod.darkest_red, blocks = False, fighter = None, description= "", img ="kuvat\\bucket3.png")
#list of objects, starting with player
objects = [player, bucket]
inventory = []
print player.x
print bucket.x
#generate map(not yet drawn)
make_map()
#create the FOV map based on the map just generated
fov_map = libtcod.map_new(MAP_WIDTH, MAP_HEIGHT)
createFOV()
#Game States
fov_recompute = True
game_state = 'playing'
player_action = None
### MOUSE FUNCTIONALITY ###
mouse = libtcod.Mouse()
key = libtcod.Key()
### LOOP ###
message("Shit Crimson", libtcod.dark_crimson)
message("You wake up next to a bloody bucket.", libtcod.light_gray)
winsound.PlaySound("hicRhodus.wav", winsound.SND_ALIAS|winsound.SND_LOOP|winsound.SND_ASYNC)
libtcod.sys_set_fps(LIMIT_FPS)
while not libtcod.console_is_window_closed():
#check for mouse
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS|libtcod.EVENT_MOUSE, key, mouse)
render_all()
libtcod.console_flush()
SCounter -= 1
#erase all objects at their old locations, before they move
for object in objects:
object.clear()
#handle keys and exit game if needed
player_action = handle_keys()
if player_action == 'exit':
break
#monsters do their stuff
if game_state == 'playing' and player_action != 'no-turn-taken':
for object in objects:
if object.ai:
object.ai.take_turn()
# increment fov_range ticker
torch_dimmer()
#checkObjects()
if checkForEnd():
message('Matay, you have accepted yourself and die unburdened.')
game_state = 'end'
ailment(player)
| {
"repo_name": "ArchBang85/S_Crimson",
"path": "ShitCrimson.py",
"copies": "1",
"size": "58307",
"license": "apache-2.0",
"hash": -5653800929157368000,
"line_mean": 32.9388824214,
"line_max": 252,
"alpha_frac": 0.5464695491,
"autogenerated": false,
"ratio": 3.4552296296296294,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4501699178729629,
"avg_score": null,
"num_lines": null
} |
import adsk.core, adsk.fusion, traceback
import os
import tempfile
import subprocess
# global set of event handlers to keep them referenced for the duration of the command
handlers = []
app = adsk.core.Application.get()
if app:
ui = app.userInterface
RADIOBUTTONGROUP = adsk.core.RadioButtonGroupCommandInput.cast(None)
resourceDir = ""
class MeshBooleanCommandExecuteHandler(adsk.core.CommandEventHandler):
def __init__(self):
super().__init__()
def notify(self, args):
try:
command = args.firingEvent.sender
inputs = command.commandInputs
tmpdir = "c:/t" ## tempfile.mkdtemp()
files = []
#occ_numbr = app.activeProduct.rootComponent.allOccurrences.count
#occ = app.activeProduct.rootComponent.allOccurrences.item(1)
for i in [0,1]:
mesh_body = inputs[i].selection(0).entity
#mesh_body = mesh_body.createForAssemblyContext(occ)
mesh = mesh_body.displayMesh;
filename = os.path.join(tmpdir, str(i) + '.obj')
files += [filename]
f = open(filename, 'w')
for pt in mesh.nodeCoordinates:
f.write("v {0} {1} {2}\n".format(pt.x, pt.y, pt.z))
j = 3
for ind in mesh.nodeIndices:
if j == 3:
f.write("\nf")
j = 0
f.write(" {0}".format(ind + 1))
j += 1
#j = 4
#for ind in mesh.quadNodeIndices:
# if j == 4:
# f.write("\nf")
# j = 0
# f.write(" {0}".format(ind + 1))
# j += 1
f.close()
exe = "C:/Users/marco/OneDrive/Documents/PROJECTS/polytriagnulation/out/MeshBooleanApp/Release/MeshBooleanApp.exe"
exed = "C:/Users/marco\OneDrive/Documents/PROJECTS/polytriagnulation/out/MeshBooleanApp/Debug/MeshBooleanApp.exe"
operation = RADIOBUTTONGROUP.selectedItem.name
curent_dir = os.getcwd()
os.chdir(tmpdir)
##msdev = "C:/Program Files (x86)/Microsoft Visual Studio/2017/Community/Common7/IDE/devenv.exe"
#subprocess.check_call([msdev, exe, files[0], files[1], operation])
subprocess.check_call([exe, files[0], files[1], operation])
activeDoc = adsk.core.Application.get().activeDocument
design = activeDoc.design
rootComp = design.rootComponent
rootComp.meshBodies.add(tmpdir + "/result.obj", 0)
os.chdir(curent_dir)
except:
if ui:
ui.messageBox('Failed:\n{}'.format(traceback.format_exc()))
class MeshBooleanCommandDestroyHandler(adsk.core.CommandEventHandler):
def __init__(self):
super().__init__()
def notify(self, args):
try:
# when the command is done, terminate the script
# this will release all globals which will remove all event handlers
adsk.terminate()
except:
if ui:
ui.messageBox('Failed:\n{}'.format(traceback.format_exc()))
class MeshBooleanValidateInputHandler(adsk.core.ValidateInputsEventHandler):
def __init__(self):
super().__init__()
def notify(self, args):
try:
sels = ui.activeSelections;
if len(sels) == 2:
args.areInputsValid = True
else:
args.areInputsValid = False
except:
if ui:
ui.messageBox('Failed:\n{}'.format(traceback.format_exc()))
class MeshBooleanCommandCreatedHandler(adsk.core.CommandCreatedEventHandler):
def __init__(self):
super().__init__()
def notify(self, args):
try:
cmd = args.command
onExecute = MeshBooleanCommandExecuteHandler()
cmd.execute.add(onExecute)
handlers.append(onExecute)
onDestroy = MeshBooleanCommandDestroyHandler()
cmd.destroy.add(onDestroy)
handlers.append(onDestroy)
onValidateInput = MeshBooleanValidateInputHandler()
cmd.validateInputs.add(onValidateInput)
handlers.append(onValidateInput)
# keep the handler referenced beyond this function
#define the inputs
inputs = cmd.commandInputs
i1 = inputs.addSelectionInput('entity', 'Entity One', 'Please select a mesh')
i1.addSelectionFilter(adsk.core.SelectionCommandInput.MeshBodies);
i1.setSelectionLimits(0, 1)
i2 = inputs.addSelectionInput('entity', 'Entity Two', 'Please select a mesh')
i2.addSelectionFilter(adsk.core.SelectionCommandInput.MeshBodies);
i2.setSelectionLimits(0, 1)
# Create radio button group input.
global RADIOBUTTONGROUP
RADIOBUTTONGROUP = inputs.addRadioButtonGroupCommandInput('BoolOperation', 'Operation button group')
radioButtonItems = RADIOBUTTONGROUP.listItems
values = [ "UNION", "INTERSECTION", "DIFFERENCE",
"SPLIT", "SPLITA", "SPLITB", "A_IN_B", "B_IN_A",
"A_OUT_B", "B_OUT_A", "A_OVERLAP", "B_OVERLAP" ]
for v in values:
radioButtonItems.add(v, v == "UNION")
except:
if ui:
ui.messageBox('Failed:\n{}'.format(traceback.format_exc()))
def run(context):
try:
product = app.activeProduct
design = adsk.fusion.Design.cast(product)
if not design:
ui.messageBox('It is not supported in current workspace, please change to MODEL workspace and try again.')
return
commandDefinitions = ui.commandDefinitions
# check the command exists or not
command_name = 'MeshBooleanDef'
cmdDef = commandDefinitions.itemById(command_name)
global resourceDir
if not cmdDef:
resourceDir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'resources') # absolute resource file path is specified
cmdDef = commandDefinitions.addButtonDefinition(command_name,
'MeshBoolean',
'Boolean operation on meshes',
resourceDir)
onCommandCreated = MeshBooleanCommandCreatedHandler()
cmdDef.commandCreated.add(onCommandCreated)
# keep the handler referenced beyond this function
handlers.append(onCommandCreated)
inputs = adsk.core.NamedValues.create()
cmdDef.execute(inputs)
# prevent this module from being terminate when the script returns, because we are waiting for event handlers to fire
adsk.autoTerminate(False)
except:
if ui:
ui.messageBox('Failed:\n{}'.format(traceback.format_exc()))
| {
"repo_name": "marcomanno/polygon_triangulation",
"path": "fusion_script/mesh_bool/mesh_boolean.py",
"copies": "1",
"size": "7321",
"license": "apache-2.0",
"hash": 3894091341129252000,
"line_mean": 39.8342857143,
"line_max": 139,
"alpha_frac": 0.5760142057,
"autogenerated": false,
"ratio": 4.173888255416191,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00815855016808137,
"num_lines": 175
} |
__author__ = "auxiliary-character"
from wpilib.command import Command
from wpilib.timer import Timer
import wpilib
import csv
class RecordMacro(Command):
"""This records robot movements and writes them to a .csv file."""
def __init__(self, robot, name):
super().__init__()
self.robot = robot
self.setTimeout(15)
self.name = name
def initialize(self):
self.initTime = wpilib.Timer.getFPGATimestamp() #get the current time
self.f = open("/home/lvuser/py/"+self.name, "w")
fields = ["Subsystem"]
self.writer = csv.DictWriter(self.f, fieldnames=fields)
self.writer.writeheader()
def execute(self):
self.writer.writerow({
"Subsystem": self.robot.subsystem.output,
#this is needed to make sure everything runs at the right time
"Time": wpilib.Timer.getFPGATimestamp() - self.initTime}) #get the time as the row is written
def isFinished(self):
return self.isTimedOut()
def end(self):
self.f.close()
def interrupted(self):
self.end()
def cancel(self):
self.end()
super().cancel()
| {
"repo_name": "DenfeldRobotics4009/python-framework",
"path": "commands/record_macro.py",
"copies": "1",
"size": "1169",
"license": "bsd-3-clause",
"hash": -9065455216644522000,
"line_mean": 28.225,
"line_max": 105,
"alpha_frac": 0.619332763,
"autogenerated": false,
"ratio": 3.770967741935484,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9877426688500346,
"avg_score": 0.002574763287027438,
"num_lines": 40
} |
__author__ = 'auxiliary-character'
from wpilib.command import Command
import wpilib
import math
class DriveStraight(Command):
"""Drives the robot straight using the navX, PIDs, and a bit of math."""
def __init__(self, robot, x, y, timeout=None):
super().__init__()
self.robot = robot
self.x = x
self.y = y
self.controller = wpilib.PIDController(-.05, 0, 0, self.returnPIDInput, self.usePIDOutput)
self.requires(self.robot.drivetrain)
self.setTimeout(timeout)
def initialize(self):
self.controller.enable()
self.controller.setSetpoint(self.robot.drivetrain.gyro.getYaw())
def isFinished(self):
return self.isTimedOut()
def end(self):
self.controller.disable()
self.robot.drivetrain.driveManual(0,0,0)
def interrupted(self):
self.end()
def _cancel(self):
self.end()
super()._cancel()
def returnPIDInput(self):
angle = self.robot.drivetrain.gyro.getYaw()
set_point = self.controller.getSetpoint()
angle_greater = angle + 360
angle_lesser = angle - 360
use_angle = math.fabs(angle-set_point) < math.fabs(angle_greater - set_point)
use_angle = use_angle and math.fabs(angle-set_point) < math.fabs(angle_lesser - set_point)
if use_angle:
return angle
elif math.fabs(angle_greater-set_point) < math.fabs(angle_lesser - set_point):
return angle_greater
else:
return angle_lesser
def usePIDOutput(self, output):
self.robot.drivetrain.driveManual(self.x, self.y, output)
| {
"repo_name": "DenfeldRobotics4009/2015_Lopez_Jr",
"path": "commands/semiauto_commands/drive_straight.py",
"copies": "1",
"size": "1643",
"license": "bsd-3-clause",
"hash": 8517265513883887000,
"line_mean": 30.5961538462,
"line_max": 98,
"alpha_frac": 0.6256847231,
"autogenerated": false,
"ratio": 3.4957446808510637,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4621429403951064,
"avg_score": null,
"num_lines": null
} |
__author__ = 'auxiliary-character'
from wpilib.command import Command
import wpilib
import math
class Turn(Command):
"""Does some turning based on math, the navX, and a PID controller."""
def __init__(self, robot, angle):
super().__init__()
self.robot = robot
self.angle = angle
self.controller = wpilib.PIDController(.008, 0, 0, self.returnPIDInput, self.usePIDOutput)
self.setTimeout(3)
self.controller.setAbsoluteTolerance(2)
self.requires(self.robot.drivetrain)
def initialize(self):
self.controller.setSetpoint(self.robot.drivetrain.gyro.getYaw()+self.angle)
self.controller.enable()
def isFinished(self):
return self.controller.onTarget() or self.isTimedOut()
def end(self):
self.controller.disable()
self.robot.drivetrain.driveManual(0,0,0)
def interupted(self):
self.end()
def _cancel(self):
self.end()
super()._cancel()
def returnPIDInput(self):
angle = self.robot.drivetrain.gyro.getYaw()
set_point = self.controller.getSetpoint()
angle_greater = angle + 360
angle_lesser = angle - 360
use_angle = math.fabs(angle-set_point) < math.fabs(angle_greater - set_point)
use_angle = use_angle and math.fabs(angle-set_point) < math.fabs(angle_lesser - set_point)
if use_angle:
return angle
elif math.fabs(angle_greater-set_point) < math.fabs(angle_lesser - set_point):
return angle_greater
else:
return angle_lesser
def usePIDOutput(self, output):
self.robot.drivetrain.driveManual(0, 0, output**3)
| {
"repo_name": "DenfeldRobotics4009/2015_Lopez_Jr",
"path": "commands/semiauto_commands/turn.py",
"copies": "1",
"size": "1683",
"license": "bsd-3-clause",
"hash": 9186172982501165000,
"line_mean": 31.3653846154,
"line_max": 98,
"alpha_frac": 0.6339869281,
"autogenerated": false,
"ratio": 3.550632911392405,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4684619839492405,
"avg_score": null,
"num_lines": null
} |
__author__ = 'auxiliary-character'
import threading
import wpilib
import serial
import re
class IMUSimple(threading.Thread):
float_regex = """([\+\-\ ]\d{3}\.\d{2})"""
int8_regex = """([0-9A-Fa-f]{2})"""
int16_regex = """([0-9A-Fa-f]{4})"""
termination_regex = int8_regex
yprc_packet_regex = re.compile("!y"+
float_regex+ #Yaw
float_regex+ #Pitch
float_regex+ #Roll
float_regex+ #Compass Heading
termination_regex)
def _parse(self, line):
match = self.yprc_packet_regex.search(line)
if match:
groups = match.groups()
yaw = float(groups[0])
pitch = float(groups[1])
roll = float(groups[2])
compass = float(groups[3])
return yaw, pitch, roll, compass
def __init__(self):
self.serial = serial.Serial(1, 57500)
super().__init__(name="IMU Listener", daemon=True)
self.start()
self.mutex = threading.RLock()
self.yaw = 0.0
self.pitch = 0.0
self.roll = 0.0
self.compass = 0.0
def run(self):
while True:
try:
line = self.serial.readline().decode("utf8")
parsed = self._parse(line)
if parsed:
yaw, pitch, roll, compass = parsed
with self.mutex:
self.yaw = yaw
self.pitch = pitch
self.roll = roll
self.compass = compass
except(UnicodeDecodeError):
pass
def getYaw(self):
with self.mutex:
return self.yaw
def getPitch(self):
with self.mutex:
return self.pitch
def getRoll(self):
with self.mutex:
return self.Roll
def getCompass(self):
with self.mutex:
return self.compass
| {
"repo_name": "DenfeldRobotics4009/2015_Lopez_Jr",
"path": "imu_simple.py",
"copies": "1",
"size": "1922",
"license": "bsd-3-clause",
"hash": 7964717382175934000,
"line_mean": 26.8550724638,
"line_max": 60,
"alpha_frac": 0.5015608741,
"autogenerated": false,
"ratio": 3.8286852589641436,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9774610416634784,
"avg_score": 0.011127143285872049,
"num_lines": 69
} |
__author__ = "auxiliary-character"
import wpilib
from wpilib.command import Command
class SuperStrafe64(Command):
"""Only for Nintendo 64."""
kBack = 0
kForward = 1
kRight = 2
kLeft = 3
def __init__(self, robot, direction):
super().__init__()
self.robot = robot
self.requires(robot.drivetrain)
self.setTimeout(.7)
self.direction = direction
def actuate(self, amount):
if self.direction == self.kBack:
self.robot.drivetrain.driveManual(0, amount, amount/5)
elif self.direction == self.kForward:
self.robot.drivetrain.driveManual(0, -amount, amount/5)
elif self.direction == self.kRight:
self.robot.drivetrain.driveManual(amount, 0, 0)
elif self.direction == self.kLeft:
self.robot.drivetrain.driveManual(-amount, 0, 0)
def execute(self):
time = self.timeSinceInitialized()
if time < .5:
self.actuate(time)
else:
self.actuate(-1)
def isFinished(self):
return self.isTimedOut()
def end(self):
self.actuate(0)
def interrupted(self):
self.end()
def cancel(self):
self.end()
super().cancel()
| {
"repo_name": "DenfeldRobotics4009/2015_Lopez_Jr",
"path": "commands/semiauto_commands/super_strafe_64.py",
"copies": "1",
"size": "1249",
"license": "bsd-3-clause",
"hash": -5528069923901526000,
"line_mean": 26.152173913,
"line_max": 67,
"alpha_frac": 0.5868694956,
"autogenerated": false,
"ratio": 3.6735294117647057,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9754187727240482,
"avg_score": 0.0012422360248447205,
"num_lines": 46
} |
__author__ = "auxiliary-character"
import wpilib
from wpilib.command import Command
class SuperStrafeEntertainmentSystem(Command):
"""Cartridge-based entertainment system."""
kBack = 0
kForward = 1
kRight = 2
kLeft = 3
def __init__(self, robot, direction):
super().__init__()
self.robot = robot
self.requires(robot.drivetrain)
self.setTimeout(.7)
self.direction = direction
def actuate(self, amount):
if self.direction == self.kBack:
self.robot.drivetrain.driveManual(0, amount, amount/5)
elif self.direction == self.kForward:
self.robot.drivetrain.driveManual(0, -amount, amount/5)
elif self.direction == self.kRight:
self.robot.drivetrain.driveManual(amount, 0, 0)
elif self.direction == self.kLeft:
self.robot.drivetrain.driveManual(-amount, 0, 0)
def execute(self):
time = self.timeSinceInitialized()
if time < .5:
self.actuate(time)
else:
self.actuate(-1)
def isFinished(self):
return self.isTimedOut()
def end(self):
self.actuate(0)
def interrupted(self):
self.end()
def cancel(self):
self.end()
| {
"repo_name": "DenfeldRobotics4009/2016_Dos_Point_Oh",
"path": "commands/semiauto/super_strafe_entertainment_system.py",
"copies": "1",
"size": "1257",
"license": "bsd-3-clause",
"hash": -5816460163849677000,
"line_mean": 26.9333333333,
"line_max": 67,
"alpha_frac": 0.600636436,
"autogenerated": false,
"ratio": 3.7079646017699117,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9803591468930556,
"avg_score": 0.0010019137678712146,
"num_lines": 45
} |
__author__ = "auxiliary-character"
import wpilib
from wpilib.timer import Timer
from wpilib.command import Command
import csv
class PlayMacro(Command):
"""This plays macro movements from the .csv file."""
def __init__(self, robot, name):
"""Initialize the command and get all the requirements."""
super().__init__()
self.robot = robot
self.requires(robot.subsystem)
self.name = name
self.done_yet = False
def initialize(self):
try:
#attempt to access the files required
if self.robot.isReal():
self.f = open("/home/lvuser/py/"+self.name)
else:
self.f = open(self.name)
self.reader_iterator = csv.DictReader(self.f)
except FileNotFoundError:
#This bit runs if the file isn't there
self.reader_iterator = []
self.setTimeout(15)
start_time = Timer.getFPGATimestamp()
for line in self.reader_iterator:
t_delta = float(line["Time"]) - (Timer.getFPGATimestamp()-start_time)
if t_delta > 0:
Timer.delay(t_delta)
self.robot.subsystem.manualSet(float(line["Subsystem"]))
if self.isTimedOut() or self.done_yet:
break
def execute(self):
pass
def isFinished(self):
return True
def end(self):
self.robot.subsystem.manualSet(0)
if hasattr(self, "f"):
self.f.close()
def interrupted(self):
self.end()
def cancel(self):
self.end()
super().cancel()
| {
"repo_name": "DenfeldRobotics4009/python-framework",
"path": "commands/play_macro.py",
"copies": "1",
"size": "1611",
"license": "bsd-3-clause",
"hash": -134495411226501060,
"line_mean": 28.8333333333,
"line_max": 81,
"alpha_frac": 0.5667287399,
"autogenerated": false,
"ratio": 3.987623762376238,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5054352502276238,
"avg_score": null,
"num_lines": null
} |
__author__ = "auxiliary-character"
import csv
import wpilib
from wpilib.command import Command
from wpilib.timer import Timer
from utilities.settings import Settings
class PlayMacro(Command):
"""This plays macro movements from the .csv file."""
def __init__(self, robot, name):
"""Initialize the command and get all the requirements."""
super().__init__()
self.robot = robot
self.requires(robot.drivetrain)
self.name = name
self.done_yet = False
def initialize(self):
"""Figure out the file location and play it back."""
try:
#attempt to access the files required
if self.robot.isReal():
self.f = open("/home/lvuser/py/macros/"+self.name)
else:
self.f = open(self.name)
self.reader_iterator = csv.DictReader(self.f)
except FileNotFoundError:
#This bit runs if the file isn't there
self.reader_iterator = []
#length of time to play the macro.
self.setTimeout(Settings.num_macro_timeout)
#start time is important for making sure everything plays at the right time
start_time = Timer.getFPGATimestamp()
#do the actual playback bit
for line in self.reader_iterator:
t_delta = float(line["Time"]) - (Timer.getFPGATimestamp()-start_time)
if t_delta > 0:
Timer.delay(t_delta)
#Add subsystems in the following manner:
#self.robot.subsystem.manualCommand(float(line["Row_Name"]))
self.robot.drivetrain.driveManual(float(line["Drive_X"]),
float(line["Drive_Y"]),
float(line["Drive_Z"]))
if self.isTimedOut() or self.done_yet:
break
def execute(self):
pass
def isFinished(self):
return True
def end(self):
"""Run when called, end the macro playing."""
#set the motors to 0 for safety's sake:
self.robot.drivetrain.driveManual(0,0,0)
if hasattr(self, "f"):
self.f.close()
def interrupted(self):
"""Runs when macro playback is interrupted."""
self.end()
def cancel(self):
"""Runs when macro playback is canceled."""
self.end()
super().cancel()
| {
"repo_name": "DenfeldRobotics4009/2016_Dos_Point_Oh",
"path": "macros/play_macro.py",
"copies": "1",
"size": "2399",
"license": "bsd-3-clause",
"hash": -5957401128605432000,
"line_mean": 28.9875,
"line_max": 83,
"alpha_frac": 0.5681533972,
"autogenerated": false,
"ratio": 4.201401050788091,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5269554447988091,
"avg_score": null,
"num_lines": null
} |
__author__ = "auxiliary-character"
import csv
import wpilib
from wpilib.command import Command
from wpilib.timer import Timer
from utilities.settings import Settings
class RecordMacro(Command):
"""This records robot movements and writes them to a .csv file."""
def __init__(self, robot, name):
super().__init__()
self.robot = robot
#length of time to record the macro.
self.setTimeout(Settings.num_macro_timeout)
self.name = name
def initialize(self):
"""Set up the macro file and prepare for recording."""
self.initTime = wpilib.Timer.getFPGATimestamp() #get the current time
self.f = open("/home/lvuser/py/macros/"+self.name, "w")
fields = ["Drive_X",
"Drive_Y",]
self.writer = csv.DictWriter(self.f, fieldnames=fields)
self.writer.writeheader()
def execute(self):
"""Record the macro."""
#do the actual writing bit:
self.writer.writerow({
#Add subsystems in the following manner:
#"Row_Name": self.robot.subsystem.getValue
"Drive_X": self.robot.drivetrain.x,
"Drive_Y": self.robot.drivetrain.y,
"Drive_Z": self.robot.drivetrain.z,
#this is needed to make sure everything runs at the right time, v. important:
"Time": wpilib.Timer.getFPGATimestamp() - self.initTime}) #get the time as the row is written
def isFinished(self):
"""Returns .isTimedOut() when called."""
return self.isTimedOut()
def end(self):
"""Close out & save the macro when called."""
self.f.close()
def interrupted(self):
"""Run when macro recording is interrupted."""
self.end()
def cancel(self):
"""Run when macro recording is canceled."""
self.end()
super().cancel()
| {
"repo_name": "DenfeldRobotics4009/2016_Dos_Point_Oh",
"path": "macros/record_macro.py",
"copies": "1",
"size": "1876",
"license": "bsd-3-clause",
"hash": 1054404207226903700,
"line_mean": 28.7777777778,
"line_max": 105,
"alpha_frac": 0.6044776119,
"autogenerated": false,
"ratio": 3.9411764705882355,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5045654082488236,
"avg_score": null,
"num_lines": null
} |
__author__ = 'avale'
import time, random
def solve(grid): return search(parse_grid(grid))
def search(values):
"Using depth-first search and propagation, try all possible values."
if values is False:
return False ## Failed earlier
if all(len(values[s]) == 1 for s in squares):
return values ## Solved!
## Chose the unfilled square s with the fewest possibilities
n,s = min((len(values[s]), s) for s in squares if len(values[s]) > 1)
return some(search(assign(values.copy(), s, d))
for d in values[s])
def some(seq):
"Return some element of seq that is true."
for e in seq:
if e: return e
return False
def solve_all(grids, name='', showif=0.0):
"""Attempt to solve a sequence of grids. Report results.
When showif is a number of seconds, display puzzles that take longer.
When showif is None, don't display any puzzles."""
def time_solve(grid):
start = time.clock()
values = solve(grid)
t = time.clock()-start
## Display puzzles that take long enough
if showif is not None and t > showif:
display(grid_values(grid))
if values: display(values)
print '(%.2f seconds)\n' % t
return (t, solved(values))
times, results = zip(*[time_solve(grid) for grid in grids])
N = len(grids)
if N > 1:
print "Solved %d of %d %s puzzles (avg %.2f secs (%d Hz), max %.2f secs)." % (
sum(results), N, name, sum(times)/N, N/sum(times), max(times))
def solved(values):
"A puzzle is solved if each unit is a permutation of the digits 1 to 9."
def unitsolved(unit): return set(values[s] for s in unit) == set(digits)
return values is not False and all(unitsolved(unit) for unit in unitlist)
def from_file(filename, sep='\n'):
"Parse a file into a list of strings, separated by sep."
return file(filename).read().strip().split(sep)
def random_puzzle(N=17):
"""Make a random puzzle with N or more assignments. Restart on contradictions.
Note the resulting puzzle is not guaranteed to be solvable, but empirically
about 99.8% of them are solvable. Some have multiple solutions."""
values = dict((s, digits) for s in squares)
for s in shuffled(squares):
if not assign(values, s, random.choice(values[s])):
break
ds = [values[s] for s in squares if len(values[s]) == 1]
if len(ds) >= N and len(set(ds)) >= 8:
return ''.join(values[s] if len(values[s])==1 else '.' for s in squares)
return random_puzzle(N) ## Give up and make a new puzzle
def shuffled(seq):
"Return a randomly shuffled copy of the input sequence."
seq = list(seq)
random.shuffle(seq)
return seq
grid1 = '003020600900305001001806400008102900700000008006708200002609500800203009005010300'
grid2 = '4.....8.5.3..........7......2.....6.....8.4......1.......6.3.7.5..2.....1.4......'
hard1 = '.....6....59.....82....8....45........3........6..3.54...325..6..................'
if __name__ == '__main__':
global grid1
solve_all(grid1, "easy", None)
# solve_all(from_file("easy50.txt", '========'), "easy", None)
# solve_all(from_file("top95.txt"), "hard", None)
# solve_all(from_file("hardest.txt"), "hardest", None)
# solve_all([random_puzzle() for _ in range(99)], "random", 100.0)
| {
"repo_name": "andredalton/bcc",
"path": "2015/MAC0327/Desafios 2/p3.py",
"copies": "1",
"size": "3352",
"license": "apache-2.0",
"hash": 6222991863946814000,
"line_mean": 39.8780487805,
"line_max": 92,
"alpha_frac": 0.611575179,
"autogenerated": false,
"ratio": 3.295968534906588,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4407543713906588,
"avg_score": null,
"num_lines": null
} |
__author__ = 'avathar'
from selectors import RankSelector
from crossover import OnePointCrossover
from chromosome import Chromosome
from mutation import Mutation
from fitness import FitnessFunction
import random
class GeneticAlgorithm(object):
def __init__(self, population_size, sample_genotype, crossover_rate=0.6,
mutation_rate=0.2, maximize=True):
self.population_size = population_size
self.genotype = sample_genotype
self.crossover_rate = crossover_rate
self.mutation_rate = mutation_rate
self.selector = RankSelector(maximize)
self.crossover = OnePointCrossover()
self.mutation = Mutation()
self.generations = []
self.maximize = maximize
def evolve(self, fitness_obj=FitnessFunction, num_generations=10):
# initialize population
population = []
for _ in range(self.population_size):
chromosome = self.genotype.create_random_instance()
population.append(chromosome)
# process each generation
for _ in range(num_generations):
# track generations
self.generations.append(population)
next_population = []
# calculate fitness for population
for chromosome in population:
chromosome.fitness = fitness_obj.evaluate(chromosome)
# select parents for generation
parents = self.selector.select_pairs(population=population)
# perform crossover
for parent in parents:
do_crossover = random.random() < self.crossover_rate
if do_crossover:
child_1, child_2 = self.crossover.recombine(
parent[0].genes,
parent[1].genes
)
chrom_child_1 = Chromosome(genes=child_1)
chrom_child_2 = Chromosome(genes=child_2)
# add new children to next population
next_population.append(chrom_child_1)
next_population.append(chrom_child_2)
else:
# no crossover, add parents as is
next_population.append(parent[0])
next_population.append(parent[1])
# do mutation
do_mutation = random.random() < self.mutation_rate
if do_mutation:
next_population = self.mutation.mutate(self.genotype,
next_population)
population = next_population
# calculate fitness for last generation
for chromosome in population:
chromosome.fitness = fitness_obj.evaluate(chromosome)
return population
def best_individual(self, population):
population.sort(key=lambda x: x.fitness, reverse=self.maximize)
best_individual = population[0]
fittest = dict()
for i in range(len(best_individual.genes)):
fittest[self.genotype.get_label_at(i)] = best_individual.genes[i]
return fittest
| {
"repo_name": "avathardev/ppolom",
"path": "darwin/ga.py",
"copies": "2",
"size": "3126",
"license": "mit",
"hash": -1481341017634758100,
"line_mean": 35.7764705882,
"line_max": 77,
"alpha_frac": 0.5886116443,
"autogenerated": false,
"ratio": 4.517341040462428,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 85
} |
__author__ = 'avathar'
import json
import numpy as np
from darwin.ga import GeneticAlgorithm
from darwin.genotype import Genotype
from darwin.fitness import FitnessFunction
item = '1005.90.03'
def read_distance():
with open('distances.json', 'r') as input_file:
distances = json.load(input_file)
data = {}
for country in distances:
data[country['id']] = {
"country": country['country'],
"distance": country['distance']
}
return data
def read_file(filename):
with open(filename, 'r') as input_file:
return json.load(input_file)
class PpolomFitness(FitnessFunction):
def __init__(self):
self.distances = read_distance()
self.trade_agreements = read_file('trade_agreements.json')
self.quotas = read_file('quotas.json')
self.products = read_file('products.json')
def evaluate(self, chromosome):
country_code = chromosome.genes[0]
quantity = chromosome.genes[1]
tax = 0
item_fta = None
item_quota = 0
# get distance to country
country_distance = self.distances[country_code]['distance']
# find tax for product
for fta, countries in self.trade_agreements.items():
if country_code in countries:
tax = self.products[item]['fta'][fta]
item_fta = fta
else:
tax = self.products[item]['tax']
if tax is None:
tax = self.products[item]['tax']
if item in self.quotas:
item_quota = self.quotas[item]['quota']
if not isinstance(item_quota, int):
if item_fta is not None and item_fta in item_quota:
item_quota = self.quotas[item]['quota'][item_fta]
else:
item_quota = 0
quota_delta = abs(item_quota - quantity)
fitness_value = country_distance + quota_delta + (tax*quota_delta)
return fitness_value
def main():
ppplom_fitness = PpolomFitness()
labels = [
'code',
'units',
]
values = [
[code for code in ppplom_fitness.distances.keys()],
[i for i in range(1000, 10000000, 1000)]
]
sample = Genotype(labels, values)
ga = GeneticAlgorithm(population_size=200,
sample_genotype=sample,
crossover_rate=0.6,
mutation_rate=0.02,
maximize=False)
best_generation = ga.evolve(fitness_obj=ppplom_fitness,
num_generations=500)
print "Best Generation "
all_fitness = []
for chrom in best_generation:
all_fitness.append(chrom.fitness)
print "Avg fitness = " + str(np.average(all_fitness))
print "Max fitness = " + str(np.max(all_fitness))
print "Min fitness = " + str(np.min(all_fitness))
print "Fitness std = " + str(np.std(all_fitness))
print "\n"
fittest = ga.best_individual(best_generation)
print " Values for fittest individual"
print "\tcountry:", ppplom_fitness.distances[fittest['code']]['country']
print "\tunits:", fittest['units'],
if __name__ == '__main__':
main()
| {
"repo_name": "avatharBot/ppolom",
"path": "ppolom.py",
"copies": "2",
"size": "3259",
"license": "mit",
"hash": -2511581590090491400,
"line_mean": 30.3365384615,
"line_max": 76,
"alpha_frac": 0.5694998466,
"autogenerated": false,
"ratio": 3.7373853211009176,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5306885167700918,
"avg_score": null,
"num_lines": null
} |
__author__ = 'avathar'
import random
from chromosome import Chromosome
class Genotype(object):
"""
Genotype defines the structure of a chromosome
"""
def __init__(self, labels, values):
self.labels = labels
self.values = values
def describe(self):
"""
describe: tuple with genotype info
"""
description = (len(self.values), self.labels, self.values)
return description
def num_genes(self):
"""
num_genes: # genes in genotype
"""
return len(self.values)
def create_random_instance(self):
"""
create_random_instance: creates chromosome with randomized gene values
"""
instance = []
for each in self.values:
num = len(each) - 1
index = random.randint(0, num)
gene_value = each[index]
instance.append(gene_value)
chromosome = Chromosome(genes=instance)
return chromosome
def get_label_at(self, pos):
"""
get_label: label from gene position
"""
return self.labels[pos]
| {
"repo_name": "avatharBot/ppolom",
"path": "darwin/genotype.py",
"copies": "2",
"size": "1128",
"license": "mit",
"hash": -8110609767163993000,
"line_mean": 23.5217391304,
"line_max": 78,
"alpha_frac": 0.5647163121,
"autogenerated": false,
"ratio": 4.256603773584906,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 46
} |
__author__ = 'avinashj'
"""
The accounts module contains the definitions for basic CRUD operations on Accounts
"""
from qds_sdk.qubole import Qubole
from qds_sdk.resource import Resource
from argparse import ArgumentParser
import logging
import json
log = logging.getLogger("qds_account")
class AccountCmdLine:
"""
qds_sdk.AccountCmdLine is the interface used a qds.py
"""
@staticmethod
def parsers():
"""
Parse command line arguments to construct a dictionary of accounts
parameters that can be used to create an account.
Args:
`args`: sequence of arguments
Returns:
Dictionary that can be used to create a space
"""
argparser = ArgumentParser(prog="qds.py accounts",
description="Accounts client for Qubole Data Service.")
subparsers = argparser.add_subparsers()
# create
create = subparsers.add_parser("create",
help="Create a new account")
create.add_argument("--account", dest="account",
help="create an account with the given account parameters")
create.set_defaults(func=AccountCmdLine.create)
return argparser
@staticmethod
def run(args):
parser = AccountCmdLine.parsers()
parsed = parser.parse_args(args)
return parsed.func(parsed)
@staticmethod
def create(args):
account = Account.create(args.account)
return json.dumps(account, sort_keys=True, indent=4)
class Account(Resource):
"""
qds_sdk.Account is the base Qubole Account class.
"""
""" all commands use the /account endpoint"""
rest_entity_path = "account"
@staticmethod
def create(args):
conn = Qubole.agent()
url_path = Account.rest_entity_path
return conn.post(url_path, {"account": json.loads(args)})
| {
"repo_name": "jainavi/qds-sdk-py",
"path": "qds_sdk/accounts.py",
"copies": "1",
"size": "1946",
"license": "apache-2.0",
"hash": 6795997318420048000,
"line_mean": 26.4084507042,
"line_max": 90,
"alpha_frac": 0.6223021583,
"autogenerated": false,
"ratio": 4.422727272727273,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5545029431027273,
"avg_score": null,
"num_lines": null
} |
__author__ = 'avinashj'
"""
The cloud creds module contains the definitions for basic CRUD operations on CloudCred
"""
from qds_sdk.qubole import Qubole
from qds_sdk.resource import Resource
from argparse import ArgumentParser
import logging
import json
log = logging.getLogger("qds_cloud_cred")
class CloudCredCmdLine:
"""
qds_sdk.CloudCredCmdLine is the interface used a qds.py
"""
@staticmethod
def parsers():
"""
Parse command line arguments to construct a dictionary of qbucket
parameters that can be used to create a qbucket.
Args:
`args`: sequence of arguments
Returns:
Dictionary that can be used to create a qbucket
"""
argparser = ArgumentParser(prog="qds.py cloud_creds",
description="CloudCred client for Qubole Data Service.")
subparsers = argparser.add_subparsers()
# Create
create = subparsers.add_parser("create",
help="Create a new cloud cred")
create.add_argument("--name", dest="name",
help="name of cloud creds")
create.add_argument("--role_arn", dest="role_arn",
help="role arn of cloud creds")
create.add_argument("--external_id", dest="external_id",
help="external id of cloud creds")
create.set_defaults(func=CloudCredCmdLine.create)
return argparser
@staticmethod
def run(args):
parser = CloudCredCmdLine.parsers()
parsed = parser.parse_args(args)
return parsed.func(parsed)
@staticmethod
def create(args):
aws_creds = CloudCred.create(name=args.name,
role_arn=args.role_arn,
external_id=args.external_id)
return json.dumps(aws_creds.attributes, sort_keys=True, indent=4)
class CloudCred(Resource):
"""
qds_sdk.QbucketSubscriber is the base Qubole QbucketSubscriber class.
"""
""" all commands use the /qbucket endpoint"""
rest_entity_path = "cloud_creds" | {
"repo_name": "jainavi/qds-sdk-py",
"path": "qds_sdk/cloud_creds.py",
"copies": "1",
"size": "2169",
"license": "apache-2.0",
"hash": -7789673559407318000,
"line_mean": 29.1388888889,
"line_max": 91,
"alpha_frac": 0.5961272476,
"autogenerated": false,
"ratio": 4.338,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.54341272476,
"avg_score": null,
"num_lines": null
} |
__author__ = 'avinashj'
"""
The published hivetables module contains the definitions for basic CRUD operations on PublishedHivetable
"""
from qds_sdk.qubole import Qubole
from qds_sdk.resource import Resource
from argparse import ArgumentParser
import logging
import json
log = logging.getLogger("qds_published_hivetable")
class PublishedHivetableCmdLine:
"""
qds_sdk.PublishedHivetableCmdLine is the interface used a qds.py
"""
@staticmethod
def parsers():
"""
Parse command line arguments to construct a dictionary of hivetables
parameters that can be used to publish a hivetable in a space.
Args:
`args`: sequence of arguments
Returns:
Dictionary that can be used to create a space
"""
argparser = ArgumentParser(prog="qds.py published_hivetables",
description="Published hivetables client for Qubole Data Service.")
subparsers = argparser.add_subparsers()
# Publish
publish = subparsers.add_parser("publish",
help="Publish a new hivetable")
publish.add_argument("--space_id", dest="space_id",
help="publish the hivetable in the given space id")
publish.add_argument("--table_name", dest="table_name",
help="Name of the hivetable to be published")
publish.add_argument("--schema_name", dest="schema_name", default="default",
help="Name of the schema")
publish.set_defaults(func=PublishedHivetableCmdLine.publish)
# List
list = subparsers.add_parser("list",
help="List all published hivetables")
list.set_defaults(func=PublishedHivetableCmdLine.list)
# View
view = subparsers.add_parser("view",
help="View a specific published hivetable")
view.add_argument("id",
help="Numeric id of the Published hivetable")
view.add_argument("--meta_data", dest="meta_data", default=False,
help="Meta data of the published hivetable")
view.set_defaults(func=PublishedHivetableCmdLine.view)
# Update
update = subparsers.add_parser("update",
help="Update a specific Published hivetable")
update.add_argument("id",
help="Numeric id of the Published hivetable")
update.set_defaults(func=PublishedHivetableCmdLine.update)
# Delete
delete = subparsers.add_parser("unpublish",
help="Unpublish a specific Published Hivetable")
delete.add_argument("id",
help="Numeric id of the Published Hivetable")
delete.set_defaults(func=PublishedHivetableCmdLine.delete)
return argparser
@staticmethod
def run(args):
parser = PublishedHivetableCmdLine.parsers()
parsed = parser.parse_args(args)
return parsed.func(parsed)
@staticmethod
def publish(args):
published_hivetable = PublishedHivetable.create(space_id=args.space_id,
table_name=args.table_name,
schema_name=args.schema_name)
return json.dumps(published_hivetable.attributes, sort_keys=True, indent=4)
@staticmethod
def list(args):
published_hivetable_list = PublishedHivetable.list()
return json.dumps(published_hivetable_list, sort_keys=True, indent=4)
@staticmethod
def view(args):
published_hivetable = PublishedHivetable.find(args)
return json.dumps(published_hivetable, sort_keys=True, indent=4)
@staticmethod
def update(args):
options = {}
published_hivetable = PublishedHivetable.update(args.id, **options)
return json.dumps(published_hivetable.attributes, sort_keys=True, indent=4)
@staticmethod
def delete(args):
return json.dumps(PublishedHivetable.delete(args.id), sort_keys=True, indent=4)
class PublishedHivetable(Resource):
"""
qds_sdk.PublishedHivetable is the base Qubole PublishedHivetable class.
"""
""" all commands use the /space endpoint"""
rest_entity_path = "published_hivetables"
@staticmethod
def find(args):
conn = Qubole.agent()
url_path = PublishedHivetable.rest_entity_path + "/" + str(args.id)
if args.meta_data == 'true' or args.meta_data == 'True':
url_path += '?meta_data=true'
return conn.get(url_path)
@staticmethod
def list():
conn = Qubole.agent()
url_path = PublishedHivetable.rest_entity_path
return conn.get(url_path)
@staticmethod
def update(id, **kwargs):
conn = Qubole.agent()
url_path = PublishedHivetable.rest_entity_path + "/" + str(id)
return PublishedHivetable(conn.put(url_path, data=kwargs))
@staticmethod
def delete(id):
conn = Qubole.agent()
url_path = PublishedHivetable.rest_entity_path + "/" + str(id)
return conn.delete(url_path) | {
"repo_name": "jainavi/qds-sdk-py",
"path": "qds_sdk/published_hivetables.py",
"copies": "1",
"size": "5285",
"license": "apache-2.0",
"hash": 7284062271244049000,
"line_mean": 35.4551724138,
"line_max": 104,
"alpha_frac": 0.606244087,
"autogenerated": false,
"ratio": 4.034351145038168,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0011400038646438954,
"num_lines": 145
} |
__author__ = 'avinashj'
"""
The space subscribers module contains the definitions for basic CRUD operations on SpaceSubscriber
"""
from qds_sdk.qubole import Qubole
from qds_sdk.resource import Resource
from argparse import ArgumentParser
import logging
import json
log = logging.getLogger("qds_space_subscriber")
class SpaceSubscriberCmdLine:
"""
qds_sdk.SpaceSubscriberCmdLine is the interface used a qds.py
"""
@staticmethod
def parsers():
"""
Parse command line arguments to construct a dictionary of space
parameters that can be used to create a space.
Args:
`args`: sequence of arguments
Returns:
Dictionary that can be used to create a space
"""
argparser = ArgumentParser(prog="qds.py space_subscribers",
description="SpaceSubscriber client for Qubole Data Service.")
subparsers = argparser.add_subparsers()
# Create
create = subparsers.add_parser("create",
help="Create a new space subscriber")
create.add_argument("--space_id", dest="space_id",
help="id of the space for subscription")
create.add_argument("--role_arn", dest="role_arn",
help="role_arn to access the space")
create.add_argument("--external_id", dest="external_id",
help="external_id to access the space")
create.set_defaults(func=SpaceSubscriberCmdLine.create)
# List
list = subparsers.add_parser("list",
help="List all space subscribers")
list.set_defaults(func=SpaceSubscriberCmdLine.list)
# View
view = subparsers.add_parser("view",
help="View a specific Space Subscriber")
view.add_argument("id",
help="Numeric id of the Space Subscriber")
view.set_defaults(func=SpaceSubscriberCmdLine.view)
# Edit
update = subparsers.add_parser("update",
help="Edit a specific Space Subscriber")
update.add_argument("id",
help="Numeric id of the Space Subscriber")
update.add_argument("--role_arn", dest="role_arn",
help="role_arn to access the space")
update.add_argument("--external_id", dest="external_id",
help="external_id to access the space")
update.set_defaults(func=SpaceSubscriberCmdLine.update)
# Delete
delete = subparsers.add_parser("delete",
help="Delete a specific Space Subscriber")
delete.add_argument("id",
help="Numeric id of the Space Subscriber")
delete.set_defaults(func=SpaceSubscriberCmdLine.delete)
# Hivetables
hivetables = subparsers.add_parser("hivetables",
help="Get all hivetables available/subscribed inside a space")
hivetables.add_argument("id",
help="Numeric id of the Space")
hivetables.set_defaults(func=SpaceSubscriberCmdLine.hivetables)
return argparser
@staticmethod
def run(args):
parser = SpaceSubscriberCmdLine.parsers()
parsed = parser.parse_args(args)
return parsed.func(parsed)
@staticmethod
def create(args):
space_subscriber = SpaceSubscriber.create(space_id=args.space_id,
role_arn=args.role_arn, external_id=args.external_id)
return json.dumps(space_subscriber.attributes, sort_keys=True, indent=4)
@staticmethod
def list(args):
space_list = SpaceSubscriber.list()
return json.dumps(space_list, sort_keys=True, indent=4)
@staticmethod
def view(args):
tap = SpaceSubscriber.find(args.id)
return json.dumps(tap.attributes, sort_keys=True, indent=4)
@staticmethod
def update(args):
space_subscriber = SpaceSubscriber.find(args.id)
options = {'role_arn': args.role_arn, 'external_id': args.external_id}
space_subscriber = space_subscriber.update(**options)
return json.dumps(space_subscriber.attributes, sort_keys=True, indent=4)
@staticmethod
def delete(args):
space_subscriber = SpaceSubscriber.find(args.id)
return json.dumps(space_subscriber.delete(), sort_keys=True, indent=4)
@staticmethod
def hivetables(args):
res = SpaceSubscriber.hivetables(args.id)
return json.dumps(res, sort_keys=True, indent=4)
class SpaceSubscriber(Resource):
"""
qds_sdk.SpaceSubscriber is the base Qubole SpaceSubscriber class.
"""
""" all commands use the /space endpoint"""
rest_entity_path = "space_subscribers"
@staticmethod
def list():
conn = Qubole.agent()
url_path = SpaceSubscriber.rest_entity_path
return conn.get(url_path)
def update(self, **kwargs):
conn = Qubole.agent()
return SpaceSubscriber(conn.put(self.element_path(self.subscription_id), data=kwargs))
def delete(self):
conn = Qubole.agent()
return conn.delete(self.element_path(self.subscription_id))
@staticmethod
def hivetables(id):
conn = Qubole.agent()
url_path = SpaceSubscriber.rest_entity_path + "/" + str(id) + "/hivetables"
return conn.get(url_path) | {
"repo_name": "jainavi/qds-sdk-py",
"path": "qds_sdk/space_subscribers.py",
"copies": "1",
"size": "5571",
"license": "apache-2.0",
"hash": 4546381834662094300,
"line_mean": 35.4183006536,
"line_max": 105,
"alpha_frac": 0.6020463113,
"autogenerated": false,
"ratio": 4.345553822152886,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5447600133452886,
"avg_score": null,
"num_lines": null
} |
__author__ = 'avinashj'
"""
The subscribed hivetables module contains the definitions for basic CRUD operations on SubscribedHivetable
"""
from qds_sdk.qubole import Qubole
from qds_sdk.resource import Resource
from argparse import ArgumentParser
import logging
import json
log = logging.getLogger("qds_subscribed_hivetable")
class SubscribedHivetableCmdLine:
"""
qds_sdk.SubscribedHivetableCmdLine is the interface used a qds.py
"""
@staticmethod
def parsers():
"""
Parse command line arguments to construct a dictionary of hivetables
parameters that can be used to publish a hivetable in a space.
Args:
`args`: sequence of arguments
Returns:
Dictionary that can be used to create a space
"""
argparser = ArgumentParser(prog="qds.py subscribed_hivetables",
description="Subscribed hivetables client for Qubole Data Service.")
subparsers = argparser.add_subparsers()
# Subscribe
subscribe = subparsers.add_parser("subscribe",
help="Subscribe a new hivetable")
subscribe.add_argument("--published_hivetable_id", dest="published_hivetable_id",
help="Numeric id of the hivetable to subscribe")
subscribe.add_argument("--schema_name", dest="schema_name", default="default",
help="Name of the schema")
subscribe.set_defaults(func=SubscribedHivetableCmdLine.subscribe)
# List
list = subparsers.add_parser("list",
help="List all subscribed hivetables")
list.set_defaults(func=SubscribedHivetableCmdLine.list)
# View
view = subparsers.add_parser("view",
help="View a specific subscribed hivetable")
view.add_argument("id",
help="Numeric id of the Subscribed hivetable")
view.set_defaults(func=SubscribedHivetableCmdLine.view)
# Update
update = subparsers.add_parser("update",
help="Update a specific Subscribed hivetable")
update.add_argument("id",
help="Numeric id of the Subscribed hivetable")
update.set_defaults(func=SubscribedHivetableCmdLine.update)
# Delete
delete = subparsers.add_parser("unsubscribe",
help="Delete a specific Space Subscriber")
delete.add_argument("id",
help="Numeric id of the Space Subscriber")
delete.set_defaults(func=SubscribedHivetableCmdLine.delete)
return argparser
@staticmethod
def run(args):
parser = SubscribedHivetableCmdLine.parsers()
parsed = parser.parse_args(args)
return parsed.func(parsed)
@staticmethod
def subscribe(args):
subscribed_hivetable = SubscribedHivetable.create(published_hivetable_id=args.published_hivetable_id,
schema_name=args.schema_name)
return json.dumps(subscribed_hivetable.attributes, sort_keys=True, indent=4)
@staticmethod
def list(args):
subscribed_hivetable_list = SubscribedHivetable.list()
return json.dumps(subscribed_hivetable_list, sort_keys=True, indent=4)
@staticmethod
def view(args):
subscribed_hivetable = SubscribedHivetable.find(args.id)
return json.dumps(subscribed_hivetable.attributes, sort_keys=True, indent=4)
@staticmethod
def update(args):
options = {}
subscribed_hivetable = SubscribedHivetable.update(args.id, **options)
return json.dumps(subscribed_hivetable.attributes, sort_keys=True, indent=4)
@staticmethod
def delete(args):
return json.dumps(SubscribedHivetable.delete(args.id), sort_keys=True, indent=4)
class SubscribedHivetable(Resource):
"""
qds_sdk.SubscribedHivetables is the base Qubole SubscribedHivetables class.
"""
""" all commands use the /space endpoint"""
rest_entity_path = "subscribed_hivetables"
@staticmethod
def list():
conn = Qubole.agent()
url_path = SubscribedHivetable.rest_entity_path
return conn.get(url_path)
@staticmethod
def update(id, **kwargs):
conn = Qubole.agent()
url_path = SubscribedHivetable.rest_entity_path + "/" + str(id)
return SubscribedHivetable(conn.put(url_path, data=kwargs))
@staticmethod
def delete(id):
conn = Qubole.agent()
url_path = SubscribedHivetable.rest_entity_path + "/" + str(id)
return conn.delete(url_path) | {
"repo_name": "jainavi/qds-sdk-py",
"path": "qds_sdk/subscribed_hivetables.py",
"copies": "1",
"size": "4758",
"license": "apache-2.0",
"hash": -555314366587825660,
"line_mean": 35.053030303,
"line_max": 109,
"alpha_frac": 0.6258932325,
"autogenerated": false,
"ratio": 4.056265984654732,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5182159217154731,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Avinesh_Kumar'
'''
MIT License
Copyright (c) 2017 Avinesh Kumar
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import BaseHTTPServer
import time
import HttpServerImpl
import json
from urlparse import urlparse, parse_qs
hostname="127.0.0.1"
port=80
class MyHandler(BaseHTTPServer.BaseHTTPRequestHandler):
keep_values = {}
def do_HEAD(self):
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.end_headers()
def do_GET(self):
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.end_headers()
# print self.path
# print urlparse(self.path).query
query_components = parse_qs(urlparse(self.path).query)
# print query_components
res = HttpServerImpl.process_get(query_components)
# print "response: ",res
self.wfile.write(res)
# support JSON post data only.
def do_POST(self):
content_len = int(self.headers.getheader('content-length'))
post_body = self.rfile.read(content_len)
# print "post body: ",post_body
self.send_response(200)
self.end_headers()
data = json.loads(post_body)
res = HttpServerImpl.process_post(data)
self.wfile.write(res)
if __name__ == '__main__':
server = BaseHTTPServer.HTTPServer
httpserver = server((hostname, port), MyHandler)
print time.asctime(), "Server Starts - %s:%s" % (hostname, port)
try:
httpserver.serve_forever()
except KeyboardInterrupt:
pass
httpserver.server_close()
print time.asctime(), "Server Stops - %s:%s" % (hostname, port)
| {
"repo_name": "avinesh09/SimplePyHttpServer",
"path": "SimplePyHttpServer.py",
"copies": "1",
"size": "2659",
"license": "mit",
"hash": 6651258470527368000,
"line_mean": 33.5324675325,
"line_max": 78,
"alpha_frac": 0.6991350132,
"autogenerated": false,
"ratio": 3.980538922155689,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004165807737236308,
"num_lines": 77
} |
__author__ = 'AVIRAM'
import sys
import json
import logging
from pprint import pprint
#sys.path.insert(0, 'lib') #we need this line in order to make libraries imported from lib folder work properly
import requests #Used for http requests
def getCurrencyResults(FROM,TO):
URL="https://query.yahooapis.com/v1/public/yql?q=select%20*%20from%20yahoo.finance.xchange%20where%20pair%20in%20(%22"+ FROM +TO+\
"%22)&format=json&diagnostics=true&env=store%3A%2F%2Fdatatables.org%2Falltableswithkeys&callback="
request = requests.get(URL)
data = request.json()#["query"]["results"]["rate"]["Rate"]
return data, request.status_code
def getCommoditiesResults(COM):
URL="https://query.yahooapis.com/v1/public/yql?q=select%20*%20from%20yahoo.finance.quotes%20where%20symbol%20in%20(%22"+COM+"%22)" \
"&format=json&diagnostics=true&env=store%3A%2F%2Fdatatables.org%2Falltableswithkeys&callback="
request = requests.get(URL)
data = request.json()#["query"]["results"]["quote"]["Ask"]
return data, request.status_code
def multiRequests():
with open('../web/static/json/symbols.json') as data_file:
data = json.load(data_file)
stComm=""
stCurr=""
stDax=""
line=0;
for i in data["symbol"]:
if (line<5):
stComm=stComm+''+i["symbolName"]+','
else:
stCurr=stCurr+''+i["symbolName"]+','
line=line+1
stComm=stComm[:-1]
stCurr=stCurr[:-1]
URLComm="https://query.yahooapis.com/v1/public/yql?q=select%20*%20from%20yahoo.finance.quotes%20where%20symbol%20in%20(%22"+stComm+"%22)&format=json&diagnostics=true&env=store%3A%2F%2Fdatatables.org%2Falltableswithkeys&callback="
URLCurr="https://query.yahooapis.com/v1/public/yql?q=select%20*%20from%20yahoo.finance.xchange%20where%20pair%20in%20(%22"+stCurr+\
"%22)&format=json&diagnostics=true&env=store%3A%2F%2Fdatatables.org%2Falltableswithkeys&callback="
requestComm = requests.get(URLComm)
dataComm = requestComm.json()#["query"]["results"]["quote"]["Ask"]
requestCurr = requests.get(URLCurr)
dataCurr = requestCurr.json()#["query"]["results"]["rate"]["Rate"]
#print all Commodities
for i in range(dataComm["query"]["count"]):
if (i!=2):
print (dataComm["query"]["results"]["quote"][i]["Ask"])
else:#for Dax
print (dataComm["query"]["results"]["quote"][i]["LastTradePriceOnly"])
#print all Currency
for i in range(dataCurr["query"]["count"]):
print (dataCurr["query"]["results"]["rate"][i]["Rate"])
'''
FROM="usd"
TO="cad"
response, status_code = getCurrencyResults(FROM,TO)
print("The Rate of "+FROM+"/"+TO+ ": "+response["query"]["results"]["rate"]["Rate"])
'''
'''
COM="GCK15.CMX"
response, status_code = getCommoditiesResults(COM)
print ("The Rate of "+COM+": "+response["query"]["results"]["quote"]["Ask"])
'''
#multiRequests()
| {
"repo_name": "yaakov300/ForexApp",
"path": "models/priceInterface.py",
"copies": "1",
"size": "2988",
"license": "mit",
"hash": 7711611803835160000,
"line_mean": 34.8888888889,
"line_max": 233,
"alpha_frac": 0.6429049531,
"autogenerated": false,
"ratio": 3.106029106029106,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4248934059129106,
"avg_score": null,
"num_lines": null
} |
__author__ = 'avraham'
import time
class MyType(type):
def __new__(cls, name, bases, attrs):
bases = (object,)
return super(MyType, cls).__new__(cls, name, bases, attrs)
class TFExample(object):
"""Version 123
This is the docstring for the class, any relevant info should go here
This is the example class for making a TFE compliant class, class names should begin with TF
Use docstring where relevant and @values extension for checkboxes
"""
__metaclass__ = MyType
def __init__(self, *args, **kwargs):
super(TFExample, self).__init__(*args, **kwargs)
# raise Exception('Azohenvey')
self.name = 'TFExample'
# def __del__(self):
# print('deleting...')
def no_docs(self, cavity):
pass
def sleep5(self):
"""
Sleeps 5 seconds
"""
time.sleep(5)
return "OK", [1, 2, 3], 4.5, {"status": "running"}
def bad_method(self, x):
"""
This method divides by zero
@rtype : int
@param x: Dummy, it doesn't actually use this
@return: Won't return because the method should raise an exception when trying to divide by zero
"""
return 10 / 0
def add(self, a, b=7):
"""
This method adds the two parameters and returns the result
@rtype : int or float
@return : The sum of the two parameters
@param a: First parameter
@param b: Second parameter
@type b: int or float
@type a: float
@values a: 1,2,3,4,5,6,7,8,9,10,11
"""
return a + b
def makelist(self, a, b=2, c=3):
"""
Takes the parameters and insert them in a list as is
@param a: firstone
@param b: second
@param c: third
@type a: int
@type b: int
@type c: int
@values a: 1,2
@values b: 3,4
@values c:5,6
"""
print 'a', a
print 'b', b
print 'c', c
print a + b
return [a, b, c]
def complex_method(self, bcd, adc, zxc, kkk, bbb):
"""
Takes a lof parameters placed in non alphabetical order
@rtype : str
@return : formatted string
@param bcd: first argument
@param adc: second argument
@param zxc: third argument
@param kkk: fourth argument
@param bbb: fifth argument
@type bcd: int
@type adc: int
@type zxc: int
@type kkk: str
@type bbb: str
"""
return '{}:{}:{}:{}:{}'.format(bcd, adc, zxc, kkk, bbb)
def get_tf_name(self, param):
"""
All TF classes are expected to have this method with this signature, this is used to check if it is online
@param not used:
@return: class name
"""
return self.__class__.__name__
def tf_list_cavities(self):
"""
Return a list of cavities
@param not used:
@return: a list of cavities
"""
return ['cavity1', 'cavity2', 'cavity3']
def tf_health(self):
return {'fixture_status': {}}
| {
"repo_name": "davidvoler/ate_meteor",
"path": "xmlrpc/tf_example.py",
"copies": "1",
"size": "3130",
"license": "mit",
"hash": -3732162088206004700,
"line_mean": 25.9827586207,
"line_max": 114,
"alpha_frac": 0.538658147,
"autogenerated": false,
"ratio": 3.7893462469733654,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9826755594410013,
"avg_score": 0.00024975991267039293,
"num_lines": 116
} |
import os
import sys
import re
import sublime
import subprocess
import cssbeautifier
class CssFormatter:
def __init__(self, formatter):
self.formatter = formatter
def format(self, text):
text = text.decode("utf-8")
opts = self.formatter.settings.get('codeformatter_css_options')
stderr = ""
stdout = ""
options = cssbeautifier.default_options()
if ("indent_size" in opts and opts["indent_size"]):
options.indent_size = opts["indent_size"]
else:
options.indent_size = 4
if ("indent_char" in opts and opts["indent_char"]):
options.indent_char = opts["indent_char"]
else:
options.indent_char = ' '
if ("indent_with_tabs" in opts and opts["indent_with_tabs"]):
options.indent_with_tabs = True
else:
options.indent_with_tabs = False
if ("selector_separator_newline" in opts and opts["selector_separator_newline"]):
options.selector_separator_newline = True
else:
options.selector_separator_newline = False
if ("end_with_newline" in opts and opts["end_with_newline"]):
options.end_with_newline = True
else:
options.end_with_newline = False
if ("eol" in opts and opts["eol"]):
options.eol = opts["eol"]
else:
options.eol = "\n"
try:
stdout = cssbeautifier.beautify(text, options)
except Exception as e:
stderr = str(e)
if (not stderr and not stdout):
stderr = "Formatting error!"
return stdout, stderr
| {
"repo_name": "jay3126/sublimetext-codeformatter",
"path": "codeformatter/cssformatter.py",
"copies": "3",
"size": "1900",
"license": "mit",
"hash": -9084731452926822000,
"line_mean": 25.7605633803,
"line_max": 99,
"alpha_frac": 0.5731578947,
"autogenerated": false,
"ratio": 4.033970276008493,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.018966366018969942,
"num_lines": 71
} |
import os
import sys
import re
import sublime
import subprocess
import jsbeautifier
class JsFormatter:
def __init__(self, formatter):
self.formatter = formatter
def format(self, text):
text = text.decode("utf-8")
opts = self.formatter.settings.get('codeformatter_js_options')
stderr = ""
stdout = ""
options = jsbeautifier.default_options()
if ("indent_size" in opts and opts["indent_size"]):
options.indent_size = opts["indent_size"]
else:
options.indent_size = 4
if ("indent_char" in opts and opts["indent_char"]):
options.indent_char = str(opts["indent_char"])
else:
options.indent_char = " "
if ("indent_with_tabs" in opts and opts["indent_with_tabs"]):
options.indent_with_tabs = True
else:
options.indent_with_tabs = False
if ("eol" in opts and opts["eol"]):
options.eol = opts["eol"]
else:
options.eol = "\n"
if ("preserve_newlines" in opts and opts["preserve_newlines"]):
options.preserve_newlines = True
else:
options.preserve_newlines = False
if ("max_preserve_newlines" in opts and opts["max_preserve_newlines"]):
options.max_preserve_newlines = opts["max_preserve_newlines"]
else:
options.max_preserve_newlines = 10
if ("space_in_paren" in opts and opts["space_in_paren"]):
options.space_in_paren = True
else:
options.space_in_paren = False
if ("space_in_empty_paren" in opts and opts["space_in_empty_paren"]):
options.space_in_empty_paren = True
else:
options.space_in_empty_paren = False
if ("e4x" in opts and opts["e4x"]):
options.e4x = True
else:
options.e4x = False
if ("jslint_happy" in opts and opts["jslint_happy"]):
options.jslint_happy = True
else:
options.jslint_happy = False
if ("brace_style" in opts and opts["brace_style"]):
options.brace_style = opts["brace_style"]
else:
options.brace_style = 'collapse'
if ("keep_array_indentation" in opts and opts["keep_array_indentation"]):
options.keep_array_indentation = True
else:
options.keep_array_indentation = False
if ("keep_function_indentation" in opts and opts["keep_function_indentation"]):
options.keep_function_indentation = True
else:
options.keep_function_indentation = False
if ("eval_code" in opts and opts["eval_code"]):
options.eval_code = True
else:
options.eval_code = False
if ("unescape_strings" in opts and opts["unescape_strings"]):
options.unescape_strings = True
else:
options.unescape_strings = False
if ("wrap_line_length" in opts and opts["wrap_line_length"]):
options.wrap_line_length = opts["wrap_line_length"]
else:
options.wrap_line_length = 0
if ("break_chained_methods" in opts and opts["break_chained_methods"]):
options.break_chained_methods = True
else:
options.break_chained_methods = False
if ("end_with_newline" in opts and opts["end_with_newline"]):
options.end_with_newline = True
else:
options.end_with_newline = False
if ("comma_first" in opts and opts["comma_first"]):
options.comma_first = True
else:
options.comma_first = False
try:
stdout = jsbeautifier.beautify(text, options)
except Exception as e:
stderr = str(e)
#return "", ""
if (not stderr and not stdout):
stderr = "Formatting error!"
return stdout, stderr
| {
"repo_name": "dgmdan/sublimetext-codeformatter",
"path": "codeformatter/jsformatter.py",
"copies": "3",
"size": "4186",
"license": "mit",
"hash": 6170834737703727000,
"line_mean": 27.6712328767,
"line_max": 99,
"alpha_frac": 0.5661729575,
"autogenerated": false,
"ratio": 4.08390243902439,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.615007539652439,
"avg_score": null,
"num_lines": null
} |
import os, sys, re, sublime
directory = os.path.dirname(os.path.realpath(__file__))
libs_path = os.path.join(directory, "lib")
if libs_path not in sys.path:
sys.path.append(libs_path)
try:
# Python 3
from .phpformatter import PhpFormatter
from .jsformatter import JsFormatter
from .htmlformatter import HtmlFormatter
from .cssformatter import CssFormatter
from .pyformatter import PyFormatter
except (ValueError):
# Python 2
from phpformatter import PhpFormatter
from jsformatter import JsFormatter
from htmlformatter import HtmlFormatter
from cssformatter import CssFormatter
from pyformatter import PyFormatter
class Formatter:
def __init__(self, view=False, file_name=False, syntax=False):
self.platform = sublime.platform()
self.classmap = {}
self.st_version = 2
if sublime.version() == '' or int(sublime.version()) > 3000:
self.st_version = 3
self.file_name = file_name
self.settings = sublime.load_settings('CodeFormatter.sublime-settings')
self.packages_path = sublime.packages_path()
self.syntax_file = view.settings().get('syntax')
if syntax == False:
self.syntax = self.getSyntax()
else:
self.syntax = syntax
# PHP
opts = self.settings.get('codeformatter_php_options')
if ("syntaxes" in opts and opts["syntaxes"]):
for _formatter in opts["syntaxes"].split(","):
self.classmap[_formatter.strip()] = PhpFormatter
# Javascript
opts = self.settings.get('codeformatter_js_options')
if ("syntaxes" in opts and opts["syntaxes"]):
for _formatter in opts["syntaxes"].split(","):
self.classmap[_formatter.strip()] = JsFormatter
# CSS
opts = self.settings.get('codeformatter_css_options')
if ("syntaxes" in opts and opts["syntaxes"]):
for _formatter in opts["syntaxes"].split(","):
self.classmap[_formatter.strip()] = CssFormatter
# HTML
opts = self.settings.get('codeformatter_html_options')
if ("syntaxes" in opts and opts["syntaxes"]):
for _formatter in opts["syntaxes"].split(","):
self.classmap[_formatter.strip()] = HtmlFormatter
# Python
opts = self.settings.get('codeformatter_python_options')
if ("syntaxes" in opts and opts["syntaxes"]):
for _formatter in opts["syntaxes"].split(","):
self.classmap[_formatter.strip()] = PyFormatter
def format(self, text):
try:
formatter = self.classmap[self.syntax](self)
except Exception as e:
stdout = ""
stderr = "Formatter for "+self.syntax+" files not supported."
return self.clean(stdout), self.clean(stderr)
try:
stdout, stderr = formatter.format(text)
except Exception as e:
stdout = ""
stderr = str(e)
return self.clean(stdout), self.clean(stderr)
def exists(self):
if self.syntax in self.classmap:
return True
else:
return False
def getSyntax(self):
pattern = re.compile(r"Packages/.*/(.+?).(?=tmLanguage|sublime-syntax)")
m = pattern.search(self.syntax_file)
found = ""
if (m):
for s in m.groups():
found = s
break
return found.lower()
def clean(self, string):
if hasattr(string, 'decode'):
string = string.decode('UTF-8', 'ignore')
return re.sub(r'\r\n|\r', '\n', string)
| {
"repo_name": "TadeuRodrigues/sublimetext-codeformatter",
"path": "codeformatter/formatter.py",
"copies": "3",
"size": "3896",
"license": "mit",
"hash": 5236552615257220000,
"line_mean": 31.1983471074,
"line_max": 99,
"alpha_frac": 0.589835729,
"autogenerated": false,
"ratio": 3.963377416073245,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0014458105918161012,
"num_lines": 121
} |
import os
import re
import sublime
import subprocess
import os.path
from os.path import dirname, realpath
class PhpFormatter:
def __init__(self, formatter):
self.formatter = formatter
self.opts = formatter.settings.get('codeformatter_php_options')
def format(self, text):
php_path = 'php'
if ('php_path' in self.opts and self.opts['php_path']):
php_path = self.opts['php_path']
php55_compat = False
if ('php55_compat' in self.opts and self.opts['php55_compat']):
php55_compat = self.opts['php55_compat']
enable_auto_align = False
if (
'enable_auto_align' in self.opts and
self.opts['enable_auto_align']
):
enable_auto_align = self.opts['enable_auto_align']
indent_with_space = False
if (
'indent_with_space' in self.opts and
self.opts['indent_with_space']
):
indent_with_space = self.opts['indent_with_space']
psr1 = False
if ('psr1' in self.opts and self.opts['psr1']):
psr1 = self.opts['psr1']
psr1_naming = False
if ('psr1_naming' in self.opts and self.opts['psr1_naming']):
psr1_naming = self.opts['psr1_naming']
psr2 = False
if ('psr2' in self.opts and self.opts['psr2']):
psr2 = self.opts['psr2']
smart_linebreak_after_curly = False
if ('smart_linebreak_after_curly' in self.opts and self.opts['smart_linebreak_after_curly']):
smart_linebreak_after_curly = self.opts['smart_linebreak_after_curly']
visibility_order = False
if ('visibility_order' in self.opts and self.opts['visibility_order']):
visibility_order = self.opts['visibility_order']
passes = []
if ('passes' in self.opts and self.opts['passes']):
passes = self.opts['passes']
excludes = []
if ('excludes' in self.opts and self.opts['excludes']):
excludes = self.opts['excludes']
cmd = []
cmd.append(str(php_path))
cmd.append('-ddisplay_errors=stderr')
cmd.append('-dshort_open_tag=On')
if php55_compat:
formatter_path = os.path.join(
dirname(realpath(sublime.packages_path())),
'Packages',
'CodeFormatter',
'codeformatter',
'lib',
'phpbeautifier',
'fmt-php55.phar'
)
else:
formatter_path = os.path.join(
dirname(realpath(sublime.packages_path())),
'Packages',
'CodeFormatter',
'codeformatter',
'lib',
'phpbeautifier',
'phpf.phar'
)
cmd.append(formatter_path)
if psr1:
cmd.append('--psr1')
if psr1_naming:
cmd.append('--psr1-naming')
if psr2:
cmd.append('--psr2')
if indent_with_space is True:
cmd.append('--indent_with_space')
elif indent_with_space > 0:
cmd.append('--indent_with_space=' + str(indent_with_space))
if enable_auto_align:
cmd.append('--enable_auto_align')
if visibility_order:
cmd.append('--visibility_order')
if smart_linebreak_after_curly:
cmd.append('--smart_linebreak_after_curly')
if len(passes) > 0:
cmd.append('--passes=' + ','.join(passes))
if len(excludes) > 0:
cmd.append('--exclude=' + ','.join(excludes))
cmd.append('-')
stderr = ''
stdout = ''
#print(cmd)
try:
if (self.formatter.platform == 'windows'):
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
p = subprocess.Popen(
cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, startupinfo=startupinfo,
shell=False, creationflags=subprocess.SW_HIDE)
else:
p = subprocess.Popen(
cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate(text)
except Exception as e:
stderr = str(e)
if (not stderr and not stdout):
stderr = 'Formatting error!'
return stdout, stderr
def format_on_save_enabled(self, file_name):
format_on_save = False
if ('format_on_save' in self.opts and self.opts['format_on_save']):
format_on_save = self.opts['format_on_save']
if (isinstance(format_on_save, str)):
format_on_save = re.search(format_on_save, file_name) is not None
return format_on_save
| {
"repo_name": "crlang/sublime-text---front-end-config",
"path": "Data/Packages/CodeFormatter/codeformatter/phpformatter.py",
"copies": "2",
"size": "5222",
"license": "mit",
"hash": 8373095596663269000,
"line_mean": 30.6484848485,
"line_max": 101,
"alpha_frac": 0.5394484872,
"autogenerated": false,
"ratio": 3.8941088739746457,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0004960734202406775,
"num_lines": 165
} |
import os
import sys
import re
import htmlbeautifier
import sublime
directory = os.path.dirname(os.path.realpath(__file__))
libs_path = os.path.join(directory, 'lib')
libs_path = os.path.join(libs_path, 'htmlbeautifier')
if libs_path not in sys.path:
sys.path.append(libs_path)
use_bs4 = True
try:
from bs4 import BeautifulSoup
except:
use_bs4 = False
class HtmlFormatter:
def __init__(self, formatter):
self.formatter = formatter
self.opts = formatter.settings.get('codeformatter_html_options')
def format(self, text):
text = text.decode('utf-8')
stderr = ''
stdout = ''
formatter = ''
if 'formatter_version' in self.opts:
formatter = self.opts['formatter_version']
if use_bs4 is False and self.opts['formatter_version'] == 'bs4':
formatter = 'regexp'
sublime.error_message(
u'CodeFormatter\n\nUnable to load BeautifulSoup HTML '
u'formatter. The old RegExp-based formatter was '
u'automatically used for you instead.'
)
if formatter == 'bs4' and use_bs4:
p_indent_size = 4
if 'indent_size' in self.opts:
p_indent_size = self.opts['indent_size']
try:
soup = BeautifulSoup(text, 'html.parser')
stdout = soup.prettify(
formatter=None, indent_size=p_indent_size)
except Exception as e:
stderr = str(e)
else:
options = htmlbeautifier.default_options()
if 'indent_size' in self.opts:
options.indent_size = self.opts['indent_size']
if 'indent_char' in self.opts:
options.indent_char = str(self.opts['indent_char'])
if 'minimum_attribute_count' in self.opts:
options.minimum_attribute_count = (
self.opts['minimum_attribute_count']
)
if 'first_attribute_on_new_line' in self.opts:
options.first_attribute_on_new_line = (
self.opts['first_attribute_on_new_line']
)
if 'indent_with_tabs' in self.opts:
options.indent_with_tabs = self.opts['indent_with_tabs']
if 'expand_tags' in self.opts:
options.expand_tags = self.opts['expand_tags']
if 'reduce_empty_tags' in self.opts:
options.reduce_empty_tags = self.opts['reduce_empty_tags']
if 'reduce_whole_word_tags' in self.opts:
options.reduce_whole_word_tags = (
self.opts['reduce_whole_word_tags']
)
if 'exception_on_tag_mismatch' in self.opts:
options.exception_on_tag_mismatch = (
self.opts['exception_on_tag_mismatch']
)
if 'custom_singletons' in self.opts:
options.custom_singletons = self.opts['custom_singletons']
try:
stdout = htmlbeautifier.beautify(text, options)
except Exception as e:
stderr = str(e)
if (not stderr and not stdout):
stderr = 'Formatting error!'
return stdout, stderr
def format_on_save_enabled(self, file_name):
format_on_save = False
if ('format_on_save' in self.opts and self.opts['format_on_save']):
format_on_save = self.opts['format_on_save']
if (isinstance(format_on_save, str)):
format_on_save = re.search(format_on_save, file_name) is not None
return format_on_save
| {
"repo_name": "crlang/sublime-text---front-end-config",
"path": "Data/Packages/CodeFormatter/codeformatter/htmlformatter.py",
"copies": "1",
"size": "3926",
"license": "mit",
"hash": -5530302567888876000,
"line_mean": 32.5555555556,
"line_max": 97,
"alpha_frac": 0.5534895568,
"autogenerated": false,
"ratio": 3.937813440320963,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.998552504491444,
"avg_score": 0.001155590441304727,
"num_lines": 117
} |
import os
import sys
import re
import sublime
import subprocess
import os.path
from os.path import dirname, realpath
class PhpFormatter:
def __init__(self, formatter):
self.formatter = formatter
self.opts = formatter.settings.get('codeformatter_php_options')
def format(self, text):
php_path = "php"
if ("php_path" in self.opts and self.opts["php_path"]):
php_path = self.opts["php_path"]
php55_compat = False
if ("php55_compat" in self.opts and self.opts["php55_compat"]):
php55_compat = self.opts["php55_compat"]
enable_auto_align = False
if ("enable_auto_align" in self.opts and self.opts["enable_auto_align"]):
enable_auto_align = self.opts["enable_auto_align"]
indent_with_space = False
if ("indent_with_space" in self.opts and self.opts["indent_with_space"]):
indent_with_space = self.opts["indent_with_space"]
psr1 = False
if ("psr1" in self.opts and self.opts["psr1"]):
psr1 = self.opts["psr1"]
psr1_naming = False
if ("psr1_naming" in self.opts and self.opts["psr1_naming"]):
psr1_naming = self.opts["psr1_naming"]
psr2 = False
if ("psr2" in self.opts and self.opts["psr2"]):
psr2 = self.opts["psr2"]
smart_linebreak_after_curly = False
if ("smart_linebreak_after_curly" in self.opts and self.opts["smart_linebreak_after_curly"]):
smart_linebreak_after_curly = self.opts["smart_linebreak_after_curly"]
visibility_order = False
if ("visibility_order" in self.opts and self.opts["visibility_order"]):
visibility_order = self.opts["visibility_order"]
passes = []
if ("passes" in self.opts and self.opts["passes"]):
passes = self.opts["passes"]
excludes = []
if ("excludes" in self.opts and self.opts["excludes"]):
excludes = self.opts["excludes"]
formatter_path = os.path.join(dirname(realpath(sublime.packages_path())), "Packages", "CodeFormatter", "codeformatter", "lib", "phpbeautifier", "fmt.phar")
cmd = []
cmd.append(str(php_path))
cmd.append("-ddisplay_errors=stderr")
cmd.append("-dshort_open_tag=On")
if php55_compat:
formatter_path = os.path.join(dirname(realpath(sublime.packages_path())), "Packages", "CodeFormatter", "codeformatter", "lib", "phpbeautifier", "fmt-php55.phar")
else:
formatter_path = os.path.join(dirname(realpath(sublime.packages_path())), "Packages", "CodeFormatter", "codeformatter", "lib", "phpbeautifier", "fmt.phar")
cmd.append(formatter_path)
if psr1:
cmd.append("--psr1")
if psr1_naming:
cmd.append("--psr1-naming")
if psr2:
cmd.append("--psr2")
if indent_with_space is True:
cmd.append("--indent_with_space")
elif indent_with_space > 0:
cmd.append("--indent_with_space="+str(indent_with_space))
if enable_auto_align:
cmd.append("--enable_auto_align")
if visibility_order:
cmd.append("--visibility_order")
if smart_linebreak_after_curly:
cmd.append("--smart_linebreak_after_curly")
if len(passes) > 0:
cmd.append("--passes="+','.join(passes))
if len(excludes) > 0:
cmd.append("--exclude="+','.join(excludes))
cmd.append("-")
stderr = ""
stdout = ""
#print(cmd)
try:
if (self.formatter.platform == "windows"):
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, startupinfo=startupinfo, shell=False, creationflags=subprocess.SW_HIDE)
else:
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate(text)
except Exception as e:
stderr = str(e)
if (not stderr and not stdout):
stderr = "Formatting error!"
return stdout, stderr
def formatOnSaveEnabled(self):
format_on_save = False
if ("format_on_save" in self.opts and self.opts["format_on_save"]):
format_on_save = self.opts["format_on_save"]
return format_on_save
| {
"repo_name": "RevanProdigalKnight/sublimetext-codeformatter",
"path": "codeformatter/phpformatter.py",
"copies": "1",
"size": "4835",
"license": "mit",
"hash": -6730934610643223000,
"line_mean": 32.8111888112,
"line_max": 184,
"alpha_frac": 0.5904860393,
"autogenerated": false,
"ratio": 3.671222475322703,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4761708514622703,
"avg_score": null,
"num_lines": null
} |
import os
import sys
import re
import sublime
import subprocess
directory = os.path.dirname(os.path.realpath(__file__))
libs_path = os.path.join(directory, "lib")
libs_path = os.path.join(libs_path, "htmlbeautifier")
if libs_path not in sys.path:
sys.path.append(libs_path)
import htmlbeautifier
use_bs4 = True
try:
from bs4 import BeautifulSoup
except:
use_bs4 = False
class HtmlFormatter:
def __init__(self, formatter):
self.formatter = formatter
self.opts = formatter.settings.get('codeformatter_html_options')
def format(self, text):
text = text.decode("utf-8")
stderr = ""
stdout = ""
formatter = ""
if "formatter_version" in self.opts:
formatter = self.opts["formatter_version"]
if use_bs4 == False and self.opts["formatter_version"] == "bs4":
formatter = "regexp"
sublime.error_message(u'CodeFormatter\n\nUnable to load BeautifulSoup HTML formatter. The old RegExp-based formatter was automatically used for you instead.')
if formatter == "bs4" and use_bs4:
p_indent_size = 4
if "indent_size" in self.opts:
p_indent_size = self.opts["indent_size"]
try:
soup = BeautifulSoup(text, 'html.parser')
stdout = soup.prettify(formatter=None,indent_size=p_indent_size)
except Exception as e:
stderr = str(e)
else:
options = htmlbeautifier.default_options()
if "indent_size" in self.opts:
options.indent_size = self.opts["indent_size"]
if "indent_char" in self.opts:
options.indent_char = str(self.opts["indent_char"])
if "minimum_attribute_count" in self.opts:
options.minimum_attribute_count = self.opts["minimum_attribute_count"]
if "first_attribute_on_new_line" in self.opts:
options.first_attribute_on_new_line = self.opts["first_attribute_on_new_line"]
if "indent_with_tabs" in self.opts:
options.indent_with_tabs = self.opts["indent_with_tabs"]
if "expand_tags" in self.opts:
options.expand_tags = self.opts["expand_tags"]
if "reduce_empty_tags" in self.opts:
options.reduce_empty_tags = self.opts["reduce_empty_tags"]
if "exception_on_tag_mismatch" in self.opts:
options.exception_on_tag_mismatch = self.opts["exception_on_tag_mismatch"]
if "custom_singletons" in self.opts:
options.custom_singletons = self.opts["custom_singletons"]
try:
stdout = htmlbeautifier.beautify(text, options)
except Exception as e:
stderr = str(e)
if (not stderr and not stdout):
stderr = "Formatting error!"
return stdout, stderr
def formatOnSaveEnabled(self):
format_on_save = False
if ("format_on_save" in self.opts and self.opts["format_on_save"]):
format_on_save = self.opts["format_on_save"]
return format_on_save
| {
"repo_name": "RevanProdigalKnight/sublimetext-codeformatter",
"path": "codeformatter/htmlformatter.py",
"copies": "1",
"size": "3401",
"license": "mit",
"hash": 1277315414199450000,
"line_mean": 33.3535353535,
"line_max": 174,
"alpha_frac": 0.5930608645,
"autogenerated": false,
"ratio": 3.829954954954955,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4923015819454955,
"avg_score": null,
"num_lines": null
} |
import os
import sys
import re
import sublime
import subprocess
import coldfusionbeautifier
class ColdfusionFormatter:
def __init__(self, formatter):
self.formatter = formatter
self.opts = formatter.settings.get('codeformatter_coldfusion_options')
def format(self, text):
text = text.decode("utf-8")
stderr = ""
stdout = ""
options = coldfusionbeautifier.default_options()
if "indent_size" in self.opts:
options.indent_size = self.opts["indent_size"]
if "indent_char" in self.opts:
options.indent_char = str(self.opts["indent_char"])
if "minimum_attribute_count" in self.opts:
options.minimum_attribute_count = self.opts["minimum_attribute_count"]
if "first_attribute_on_new_line" in self.opts:
options.first_attribute_on_new_line = self.opts["first_attribute_on_new_line"]
if "indent_with_tabs" in self.opts:
options.indent_with_tabs = self.opts["indent_with_tabs"]
if "expand_tags" in self.opts:
options.expand_tags = self.opts["expand_tags"]
if "expand_javascript" in self.opts:
options.expand_javascript = self.opts["expand_javascript"]
if "reduce_empty_tags" in self.opts:
options.reduce_empty_tags = self.opts["reduce_empty_tags"]
if "exception_on_tag_mismatch" in self.opts:
options.exception_on_tag_mismatch = self.opts["exception_on_tag_mismatch"]
if "custom_singletons" in self.opts:
options.custom_singletons = self.opts["custom_singletons"]
try:
stdout = coldfusionbeautifier.beautify(text, options)
except Exception as e:
stderr = str(e)
if (not stderr and not stdout):
stderr = "Formatting error!"
return stdout, stderr
def formatOnSaveEnabled(self):
format_on_save = False
if ("format_on_save" in self.opts and self.opts["format_on_save"]):
format_on_save = self.opts["format_on_save"]
return format_on_save
| {
"repo_name": "RevanProdigalKnight/sublimetext-codeformatter",
"path": "codeformatter/coldfusionformatter.py",
"copies": "1",
"size": "2326",
"license": "mit",
"hash": 1151307143303282300,
"line_mean": 32.2285714286,
"line_max": 99,
"alpha_frac": 0.6229578676,
"autogenerated": false,
"ratio": 3.7097288676236047,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9826860414247127,
"avg_score": 0.001165264195295651,
"num_lines": 70
} |
import os
import sys
import re
import sublime
import subprocess
import cssbeautifier
class CssFormatter:
def __init__(self, formatter):
self.formatter = formatter
self.opts = formatter.settings.get('codeformatter_css_options')
def format(self, text):
text = text.decode("utf-8")
stderr = ""
stdout = ""
options = cssbeautifier.default_options()
if ("indent_size" in self.opts and self.opts["indent_size"]):
options.indent_size = self.opts["indent_size"]
else:
options.indent_size = 4
if ("indent_char" in self.opts and self.opts["indent_char"]):
options.indent_char = self.opts["indent_char"]
else:
options.indent_char = ' '
if ("indent_with_tabs" in self.opts and self.opts["indent_with_tabs"]):
options.indent_with_tabs = True
else:
options.indent_with_tabs = False
if ("selector_separator_newline" in self.opts and self.opts["selector_separator_newline"]):
options.selector_separator_newline = True
else:
options.selector_separator_newline = False
if ("end_with_newline" in self.opts and self.opts["end_with_newline"]):
options.end_with_newline = True
else:
options.end_with_newline = False
if ("eol" in self.opts and self.opts["eol"]):
options.eol = self.opts["eol"]
else:
options.eol = "\n"
try:
stdout = cssbeautifier.beautify(text, options)
except Exception as e:
stderr = str(e)
if (not stderr and not stdout):
stderr = "Formatting error!"
return stdout, stderr
def formatOnSaveEnabled(self):
format_on_save = False
if ("format_on_save" in self.opts and self.opts["format_on_save"]):
format_on_save = self.opts["format_on_save"]
return format_on_save
| {
"repo_name": "RevanProdigalKnight/sublimetext-codeformatter",
"path": "codeformatter/cssformatter.py",
"copies": "1",
"size": "2206",
"license": "mit",
"hash": 5929386929632855000,
"line_mean": 28.4133333333,
"line_max": 99,
"alpha_frac": 0.5856754306,
"autogenerated": false,
"ratio": 3.890652557319224,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9890035336910914,
"avg_score": 0.017258530201661914,
"num_lines": 75
} |
import os
import sys
import re
import sublime
import subprocess
import jsbeautifier
class JsFormatter:
def __init__(self, formatter):
self.formatter = formatter
self.opts = formatter.settings.get('codeformatter_js_options')
def format(self, text):
text = text.decode("utf-8")
opts = self.formatter.settings.get('codeformatter_js_options')
stderr = ""
stdout = ""
options = jsbeautifier.default_options()
if ("indent_size" in self.opts and self.opts["indent_size"]):
options.indent_size = self.opts["indent_size"]
else:
options.indent_size = 4
if ("indent_char" in self.opts and self.opts["indent_char"]):
options.indent_char = str(self.opts["indent_char"])
else:
options.indent_char = " "
if ("indent_with_tabs" in self.opts and self.opts["indent_with_tabs"]):
options.indent_with_tabs = True
else:
options.indent_with_tabs = False
if ("eol" in self.opts and self.opts["eol"]):
options.eol = self.opts["eol"]
else:
options.eol = "\n"
if ("preserve_newlines" in self.opts and self.opts["preserve_newlines"]):
options.preserve_newlines = True
else:
options.preserve_newlines = False
if ("max_preserve_newlines" in self.opts and self.opts["max_preserve_newlines"]):
options.max_preserve_newlines = self.opts["max_preserve_newlines"]
else:
options.max_preserve_newlines = 10
if ("space_in_paren" in self.opts and self.opts["space_in_paren"]):
options.space_in_paren = True
else:
options.space_in_paren = False
if ("space_in_empty_paren" in self.opts and self.opts["space_in_empty_paren"]):
options.space_in_empty_paren = True
else:
options.space_in_empty_paren = False
if ("e4x" in self.opts and self.opts["e4x"]):
options.e4x = True
else:
options.e4x = False
if ("jslint_happy" in self.opts and self.opts["jslint_happy"]):
options.jslint_happy = True
else:
options.jslint_happy = False
if ("brace_style" in self.opts and self.opts["brace_style"]):
options.brace_style = self.opts["brace_style"]
else:
options.brace_style = 'collapse'
if ("keep_array_indentation" in self.opts and self.opts["keep_array_indentation"]):
options.keep_array_indentation = True
else:
options.keep_array_indentation = False
if ("keep_function_indentation" in self.opts and self.opts["keep_function_indentation"]):
options.keep_function_indentation = True
else:
options.keep_function_indentation = False
if ("eval_code" in self.opts and self.opts["eval_code"]):
options.eval_code = True
else:
options.eval_code = False
if ("unescape_strings" in self.opts and self.opts["unescape_strings"]):
options.unescape_strings = True
else:
options.unescape_strings = False
if ("wrap_line_length" in self.opts and self.opts["wrap_line_length"]):
options.wrap_line_length = self.opts["wrap_line_length"]
else:
options.wrap_line_length = 0
if ("break_chained_methods" in self.opts and self.opts["break_chained_methods"]):
options.break_chained_methods = True
else:
options.break_chained_methods = False
if ("end_with_newline" in self.opts and self.opts["end_with_newline"]):
options.end_with_newline = True
else:
options.end_with_newline = False
if ("comma_first" in self.opts and self.opts["comma_first"]):
options.comma_first = True
else:
options.comma_first = False
if ("space_after_anon_function" in self.opts and self.opts["space_after_anon_function"]):
options.space_after_anon_function = True
else:
options.space_after_anon_function = False
try:
stdout = jsbeautifier.beautify(text, options)
except Exception as e:
stderr = str(e)
#return "", ""
if (not stderr and not stdout):
stderr = "Formatting error!"
return stdout, stderr
def formatOnSaveEnabled(self):
format_on_save = False
if ("format_on_save" in self.opts and self.opts["format_on_save"]):
format_on_save = self.opts["format_on_save"]
return format_on_save
| {
"repo_name": "RevanProdigalKnight/sublimetext-codeformatter",
"path": "codeformatter/jsformatter.py",
"copies": "1",
"size": "4934",
"license": "mit",
"hash": 3030268438215400000,
"line_mean": 29.4567901235,
"line_max": 99,
"alpha_frac": 0.5830968788,
"autogenerated": false,
"ratio": 3.928343949044586,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5011440827844585,
"avg_score": null,
"num_lines": null
} |
import os
import sys
import re
import sublime
import subprocess
import vbscriptbeautifier
class VbscriptFormatter:
def __init__(self, formatter):
self.formatter = formatter
self.opts = formatter.settings.get('codeformatter_vbscript_options')
def format(self, text):
text = text.decode("utf-8")
stderr = ""
stdout = ""
options = vbscriptbeautifier.default_options()
if ("indent_size" in self.opts and self.opts["indent_size"]):
options.indent_size = self.opts["indent_size"]
else:
options.indent_size = 1
if ("indent_char" in self.opts and self.opts["indent_char"]):
options.indent_char = str(self.opts["indent_char"])
else:
options.indent_char = "\t"
if ("indent_with_tabs" in self.opts and self.opts["indent_with_tabs"]):
options.indent_with_tabs = True
else:
options.indent_with_tabs = True
if ("preserve_newlines" in self.opts and self.opts["preserve_newlines"]):
options.preserve_newlines = True
else:
options.preserve_newlines = False
if ("max_preserve_newlines" in self.opts and self.opts["max_preserve_newlines"]):
options.max_preserve_newlines = self.opts["max_preserve_newlines"]
else:
options.max_preserve_newlines = 10
if ("opening_tags" in self.opts and self.opts["opening_tags"]):
options.opening_tags = str(self.opts["opening_tags"])
if ("middle_tags" in self.opts and self.opts["middle_tags"]):
options.middle_tags = str(self.opts["middle_tags"])
if ("closing_tags" in self.opts and self.opts["closing_tags"]):
options.closing_tags = str(self.opts["closing_tags"])
try:
stdout = vbscriptbeautifier.beautify(text, options)
except Exception as e:
stderr = str(e)
if (not stderr and not stdout):
stderr = "Formatting error!"
return stdout, stderr
def formatOnSaveEnabled(self):
format_on_save = False
if ("format_on_save" in self.opts and self.opts["format_on_save"]):
format_on_save = self.opts["format_on_save"]
return format_on_save
| {
"repo_name": "RevanProdigalKnight/sublimetext-codeformatter",
"path": "codeformatter/vbscriptformatter.py",
"copies": "1",
"size": "2517",
"license": "mit",
"hash": -8625865103353759000,
"line_mean": 30.8607594937,
"line_max": 99,
"alpha_frac": 0.6015097338,
"autogenerated": false,
"ratio": 3.8604294478527605,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9881396474019422,
"avg_score": 0.0161085415266676,
"num_lines": 79
} |
import os
import sys
import re
import sublime
directory = os.path.dirname(os.path.realpath(__file__))
libs_path = os.path.join(directory, 'lib')
if libs_path not in sys.path:
sys.path.append(libs_path)
try:
# Python 3
from .phpformatter import PhpFormatter
from .jsformatter import JsFormatter
from .htmlformatter import HtmlFormatter
from .cssformatter import CssFormatter
from .scssformatter import ScssFormatter
from .pyformatter import PyFormatter
from .vbscriptformatter import VbscriptFormatter
from .coldfusionformatter import ColdfusionFormatter
except (ValueError):
# Python 2
from phpformatter import PhpFormatter
from jsformatter import JsFormatter
from htmlformatter import HtmlFormatter
from cssformatter import CssFormatter
from scssformatter import ScssFormatter
from pyformatter import PyFormatter
from vbscriptformatter import VbscriptFormatter
from coldfusionformatter import ColdfusionFormatter
class Formatter:
def __init__(self, view, syntax=None):
self.platform = sublime.platform()
self.classmap = {}
self.st_version = 2
if sublime.version() == '' or int(sublime.version()) > 3000:
self.st_version = 3
self.file_name = view.file_name()
self.settings = sublime.load_settings('CodeFormatter.sublime-settings')
self.packages_path = sublime.packages_path()
self.syntax_file = view.settings().get('syntax')
self.syntax = syntax or self.get_syntax()
# map of settings names with related class
map_settings_formatter = [
('codeformatter_php_options', PhpFormatter),
('codeformatter_js_options', JsFormatter),
('codeformatter_css_options', CssFormatter),
('codeformatter_html_options', HtmlFormatter),
('codeformatter_python_options', PyFormatter),
('codeformatter_vbscript_options', VbscriptFormatter),
('codeformatter_scss_options', ScssFormatter),
('codeformatter_coldfusion_options', ColdfusionFormatter),
]
for name, _class in map_settings_formatter:
syntaxes = self.settings.get(name, {}).get('syntaxes')
if not syntaxes or not isinstance(syntaxes, str):
continue
for _formatter in syntaxes.split(','):
self.classmap[_formatter.strip()] = _class
def format(self, text):
formatter = self.classmap[self.syntax](self)
try:
stdout, stderr = formatter.format(text)
except Exception as e:
stdout = ''
stderr = str(e)
return self.clean(stdout), self.clean(stderr)
def exists(self):
return self.syntax in self.classmap
def get_syntax(self):
pattern = re.compile(
r'Packages/.*/(.+?).(?=tmLanguage|sublime-syntax)')
m = pattern.search(self.syntax_file)
found = ''
if m and len(m.groups()) > 0:
found = m.groups()[0]
return found.lower()
def format_on_save_enabled(self):
if not self.exists():
return False
formatter = self.classmap[self.syntax](self)
return formatter.format_on_save_enabled(self.file_name)
def clean(self, string):
if hasattr(string, 'decode'):
string = string.decode('UTF-8', 'ignore')
return re.sub(r'\r\n|\r', '\n', string)
| {
"repo_name": "crlang/sublime-text---front-end-config",
"path": "Data/Packages/CodeFormatter/codeformatter/formatter.py",
"copies": "1",
"size": "3680",
"license": "mit",
"hash": -429174601770344960,
"line_mean": 33.0740740741,
"line_max": 99,
"alpha_frac": 0.6366847826,
"autogenerated": false,
"ratio": 3.9065817409766455,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5043266523576646,
"avg_score": null,
"num_lines": null
} |
import os
import sys
import re
import sublime
st_version = 2
if sublime.version() == '' or int(sublime.version()) > 3000:
st_version = 3
if (st_version == 2):
from pybeautifier import Beautifier
else:
#from .pybeautifier import Beautifier
print('CodeFormatter: formatting python files on ST3 not supported.')
class PyFormatter:
def __init__(self, formatter):
self.formatter = formatter
self.opts = formatter.settings.get('codeformatter_python_options')
def format(self, text):
if (self.formatter.st_version == 3):
stdout = ""
stderr = "formatting python files on ST3 not supported!"
return stdout, stderr
# Options
options = {}
# indent_size
if (self.opts["indent_size"]):
indent_size = self.opts["indent_size"]
else:
indent_size = 1
# indent_with_tabs
if (self.opts["indent_with_tabs"]):
indent_with_tabs = True
else:
indent_with_tabs = False
if indent_with_tabs:
indentation = ' ' * indent_size
else:
indentation = ' ' * indent_size
options['INDENTATION'] = indentation
# max_char
if (self.opts["max_char"]):
col_limit = self.opts["max_char"]
else:
col_limit = 80
options['COL_LIMIT'] = col_limit
# assignment
if (self.opts["assignment"]):
assignment = self.opts["assignment"]
else:
assignment = " = "
options['ASSIGNMENT'] = assignment
# function_param_assignment
if (self.opts["function_param_assignment"]):
function_param_assignment = self.opts["function_param_assignment"]
else:
function_param_assignment = "="
options['FUNCTION_PARAM_ASSIGNMENT'] = function_param_assignment
# function_param_sep
if (self.opts["function_param_sep"]):
function_param_sep = self.opts["function_param_sep"]
else:
function_param_sep = ", "
options['FUNCTION_PARAM_SEP'] = function_param_sep
# list_sep
if (self.opts["list_sep"]):
list_sep = self.opts["list_sep"]
else:
list_sep = ", "
options['LIST_SEP'] = list_sep
# subscript_sep
if (self.opts["subscript_sep"]):
subscript_sep = self.opts["subscript_sep"]
else:
subscript_sep = "="
options['SUBSCRIPT_SEP'] = subscript_sep
# dict_colon
if (self.opts["dict_colon"]):
dict_colon = self.opts["dict_colon"]
else:
dict_colon = ": "
options['DICT_COLON'] = dict_colon
# slice_colon
if (self.opts["slice_colon"]):
slice_colon = self.opts["slice_colon"]
else:
slice_colon = ": "
options['SLICE_COLON'] = slice_colon
# comment_prefix
if (self.opts["comment_prefix"]):
comment_prefix = self.opts["comment_prefix"]
else:
comment_prefix = "# "
options['COMMENT_PREFIX'] = comment_prefix
# shebang
if (self.opts["shebang"]):
shebang = self.opts["shebang"]
else:
shebang = "#!/usr/bin/env python"
options['SHEBANG'] = shebang
# boilerplate
if (self.opts["boilerplate"]):
boilerplate = self.opts["boilerplate"]
else:
boilerplate = ""
options['BOILERPLATE'] = boilerplate
# blank_line
if (self.opts["blank_line"]):
blank_line = self.opts["blank_line"]
else:
blank_line = ""
options['BLANK_LINE'] = blank_line
# keep_blank_lines
if (self.opts["keep_blank_lines"]):
keep_blank_lines = self.opts["keep_blank_lines"]
else:
keep_blank_lines = True
options['KEEP_BLANK_LINES'] = keep_blank_lines
# add_blank_lines_around_comments
if (self.opts["add_blank_lines_around_comments"]):
add_blank_lines_around_comments = self.opts["add_blank_lines_around_comments"]
else:
add_blank_lines_around_comments = True
options['ADD_BLANK_LINES_AROUND_COMMENTS'] = add_blank_lines_around_comments
# add_blank_line_after_doc_string
if (self.opts["add_blank_line_after_doc_string"]):
add_blank_line_after_doc_string = self.opts["add_blank_line_after_doc_string"]
else:
add_blank_line_after_doc_string = True
options['ADD_BLANK_LINE_AFTER_DOC_STRING'] = add_blank_line_after_doc_string
# max_seps_func_def
if (self.opts["max_seps_func_def"]):
max_seps_func_def = self.opts["max_seps_func_def"]
else:
max_seps_func_def = 3
options['MAX_SEPS_FUNC_DEF'] = max_seps_func_def
# max_seps_func_ref
if (self.opts["max_seps_func_ref"]):
max_seps_func_ref = self.opts["max_seps_func_ref"]
else:
max_seps_func_ref = 5
options['MAX_SEPS_FUNC_REF'] = max_seps_func_ref
# max_seps_series
if (self.opts["max_seps_series"]):
max_seps_series = self.opts["max_seps_series"]
else:
max_seps_series = 5
options['MAX_SEPS_SERIES'] = max_seps_series
# max_seps_dict
if (self.opts["max_seps_dict"]):
max_seps_dict = self.opts["max_seps_dict"]
else:
max_seps_dict = 3
options['MAX_SEPS_DICT'] = max_seps_dict
# max_lines_before_split_lit
if (self.opts["max_lines_before_split_lit"]):
max_lines_before_split_lit = self.opts["max_lines_before_split_lit"]
else:
max_lines_before_split_lit = 2
options['MAX_LINES_BEFORE_SPLIT_LIT'] = max_lines_before_split_lit
# left_margin
if (self.opts["left_margin"]):
left_margin = self.opts["left_margin"]
else:
left_margin = ""
options['LEFT_MARGIN'] = left_margin
# normalize_doc_strings
if (self.opts["normalize_doc_strings"]):
normalize_doc_strings = self.opts["normalize_doc_strings"]
else:
normalize_doc_strings = False
options['NORMALIZE_DOC_STRINGS'] = normalize_doc_strings
# leftjust_doc_strings
if (self.opts["leftjust_doc_strings"]):
leftjust_doc_strings = self.opts["leftjust_doc_strings"]
else:
leftjust_doc_strings = False
options['LEFTJUST_DOC_STRINGS'] = leftjust_doc_strings
# wrap_doc_strings
if (self.opts["wrap_doc_strings"]):
wrap_doc_strings = self.opts["wrap_doc_strings"]
else:
wrap_doc_strings = False
options['WRAP_DOC_STRINGS'] = wrap_doc_strings
# leftjust_comments
if (self.opts["leftjust_comments"]):
leftjust_comments = self.opts["leftjust_comments"]
else:
leftjust_comments = False
options['LEFTJUST_COMMENTS'] = leftjust_comments
# wrap_comments
if (self.opts["wrap_comments"]):
wrap_comments = self.opts["wrap_comments"]
else:
wrap_comments = False
options['WRAP_COMMENTS'] = wrap_comments
# double_quoted_strings
if (self.opts["double_quoted_strings"]):
double_quoted_strings = self.opts["double_quoted_strings"]
else:
double_quoted_strings = False
options['DOUBLE_QUOTED_STRINGS'] = double_quoted_strings
# single_quoted_strings
if (self.opts["single_quoted_strings"]):
single_quoted_strings = self.opts["single_quoted_strings"]
else:
single_quoted_strings = False
options['SINGLE_QUOTED_STRINGS'] = single_quoted_strings
# can_split_strings
if (self.opts["can_split_strings"]):
can_split_strings = self.opts["can_split_strings"]
else:
can_split_strings = False
options['CAN_SPLIT_STRINGS'] = can_split_strings
# doc_tab_replacement
if (self.opts["doc_tab_replacement"]):
doc_tab_replacement = self.opts["doc_tab_replacement"]
else:
doc_tab_replacement = "...."
options['DOC_TAB_REPLACEMENT'] = doc_tab_replacement
# keep_unassigned_constants
if (self.opts["keep_unassigned_constants"]):
keep_unassigned_constants = self.opts["keep_unassigned_constants"]
else:
keep_unassigned_constants = False
options['KEEP_UNASSIGNED_CONSTANTS'] = keep_unassigned_constants
# parenthesize_tuple_display
if (self.opts["parenthesize_tuple_display"]):
parenthesize_tuple_display = self.opts["parenthesize_tuple_display"]
else:
parenthesize_tuple_display = True
options['PARENTHESIZE_TUPLE_DISPLAY'] = parenthesize_tuple_display
# java_style_list_dedent
if (self.opts["java_style_list_dedent"]):
java_style_list_dedent = self.opts["java_style_list_dedent"]
else:
java_style_list_dedent = False
options['JAVA_STYLE_LIST_DEDENT'] = java_style_list_dedent
beautifier = Beautifier(self.formatter)
stdout, stderr = beautifier.beautify(text, options)
return stdout, stderr
def formatOnSaveEnabled(self):
format_on_save = False
if ("format_on_save" in self.opts and self.opts["format_on_save"]):
format_on_save = self.opts["format_on_save"]
return format_on_save
| {
"repo_name": "RevanProdigalKnight/sublimetext-codeformatter",
"path": "codeformatter/pyformatter.py",
"copies": "1",
"size": "9914",
"license": "mit",
"hash": 7338739944273438000,
"line_mean": 32.0466666667,
"line_max": 99,
"alpha_frac": 0.5676820658,
"autogenerated": false,
"ratio": 3.8160123171670515,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4883694382967051,
"avg_score": null,
"num_lines": null
} |
import os
import sys
import sublime
import sublime_plugin
st_version = 2
if sublime.version() == '' or int(sublime.version()) > 3000:
st_version = 3
reloader_name = 'codeformatter.reloader'
# ST3 loads each package as a module, so it needs an extra prefix
if st_version == 3:
reloader_name = 'CodeFormatter.' + reloader_name
from imp import reload
if reloader_name in sys.modules:
reload(sys.modules[reloader_name])
try:
# Python 3
from .codeformatter.formatter import Formatter
except (ValueError):
# Python 2
from codeformatter.formatter import Formatter
# fix for ST2
cprint = globals()['__builtins__']['print']
debug_mode = False
def plugin_loaded():
cprint('CodeFormatter: Plugin Initialized')
# settings = sublime.load_settings('CodeFormatter.sublime-settings')
# debug_mode = settings.get('codeformatter_debug', False)
# if debug_mode:
# from pprint import pprint
# pprint(settings)
# debug_write('Debug mode enabled')
# debug_write('Platform ' + sublime.platform() + ' ' + sublime.arch())
# debug_write('Sublime Version ' + sublime.version())
# debug_write('Settings ' + pprint(settings))
if (sublime.platform() != 'windows'):
import stat
path = (
sublime.packages_path() +
'/CodeFormatter/codeformatter/lib/phpbeautifier/fmt.phar'
)
st = os.stat(path)
os.chmod(path, st.st_mode | stat.S_IEXEC)
if st_version == 2:
plugin_loaded()
class CodeFormatterCommand(sublime_plugin.TextCommand):
def run(self, edit, syntax=None, saving=None):
run_formatter(self.view, edit, syntax=syntax, saving=saving)
class CodeFormatterOpenTabsCommand(sublime_plugin.TextCommand):
def run(self, edit, syntax=None):
window = sublime.active_window()
for view in window.views():
run_formatter(view, edit, quiet=True)
class CodeFormatterEventListener(sublime_plugin.EventListener):
def on_pre_save(self, view):
view.run_command('code_formatter', {'saving': True})
class CodeFormatterShowPhpTransformationsCommand(sublime_plugin.TextCommand):
def run(self, edit, syntax=False):
import subprocess
import re
platform = sublime.platform()
settings = sublime.load_settings('CodeFormatter.sublime-settings')
opts = settings.get('codeformatter_php_options')
php_path = 'php'
if ('php_path' in opts and opts['php_path']):
php_path = opts['php_path']
php55_compat = False
if ('php55_compat' in opts and opts['php55_compat']):
php55_compat = opts['php55_compat']
cmd = []
cmd.append(str(php_path))
if php55_compat:
cmd.append(
'{}/CodeFormatter/codeformatter/lib/phpbeautifier/fmt.phar'.format(
sublime.packages_path()))
else:
cmd.append(
'{}/CodeFormatter/codeformatter/lib/phpbeautifier/phpf.phar'.format(
sublime.packages_path()))
cmd.append('--list')
#print(cmd)
stderr = ''
stdout = ''
try:
if (platform == 'windows'):
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
p = subprocess.Popen(
cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, startupinfo=startupinfo,
shell=False, creationflags=subprocess.SW_HIDE)
else:
p = subprocess.Popen(
cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
except Exception as e:
stderr = str(e)
if (not stderr and not stdout):
stderr = 'Error while gethering list of php transformations'
if len(stderr) == 0 and len(stdout) > 0:
text = stdout.decode('utf-8')
text = re.sub(
'Usage:.*?PASSNAME', 'Available PHP Tranformations:', text)
window = self.view.window()
pt = window.get_output_panel('paneltranformations')
pt.set_read_only(False)
pt.insert(edit, pt.size(), text)
window.run_command(
'show_panel', {'panel': 'output.paneltranformations'})
else:
show_error('Formatter error:\n' + stderr)
def run_formatter(view, edit, *args, **kwargs):
if view.is_scratch():
show_error('File is scratch')
return
# default parameters
syntax = kwargs.get('syntax')
saving = kwargs.get('saving', False)
quiet = kwargs.get('quiet', False)
formatter = Formatter(view, syntax)
if not formatter.exists():
if not quiet and not saving:
show_error('Formatter for this file type ({}) not found.'.format(
formatter.syntax))
return
if (saving and not formatter.format_on_save_enabled()):
return
file_text = sublime.Region(0, view.size())
file_text_utf = view.substr(file_text).encode('utf-8')
if (len(file_text_utf) == 0):
return
stdout, stderr = formatter.format(file_text_utf)
if len(stderr) == 0 and len(stdout) > 0:
view.replace(edit, file_text, stdout)
elif not quiet:
show_error('Format error:\n' + stderr)
def console_write(text, prefix=False):
if prefix:
sys.stdout.write('CodeFormatter: ')
sys.stdout.write(text + '\n')
def debug_write(text, prefix=False):
console_write(text, True)
def show_error(text):
sublime.error_message(u'CodeFormatter\n\n%s' % text)
| {
"repo_name": "crlang/sublime-text---front-end-config",
"path": "Data/Packages/CodeFormatter/CodeFormatter.py",
"copies": "2",
"size": "6068",
"license": "mit",
"hash": -5956755684221830000,
"line_mean": 28.7450980392,
"line_max": 99,
"alpha_frac": 0.6031641397,
"autogenerated": false,
"ratio": 3.7619342839429635,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0008675282148018098,
"num_lines": 204
} |
import os, sys, re, sublime
directory = os.path.dirname(os.path.realpath(__file__))
libs_path = os.path.join(directory, "lib")
if libs_path not in sys.path:
sys.path.append(libs_path)
try:
# Python 3
from .phpformatter import PhpFormatter
from .jsformatter import JsFormatter
from .htmlformatter import HtmlFormatter
from .cssformatter import CssFormatter
from .scssformatter import ScssFormatter
from .pyformatter import PyFormatter
from .vbscriptformatter import VbscriptFormatter
from .coldfusionformatter import ColdfusionFormatter
except (ValueError):
# Python 2
from phpformatter import PhpFormatter
from jsformatter import JsFormatter
from htmlformatter import HtmlFormatter
from cssformatter import CssFormatter
from scssformatter import ScssFormatter
from pyformatter import PyFormatter
from vbscriptformatter import VbscriptFormatter
from coldfusionformatter import ColdfusionFormatter
class Formatter:
def __init__(self, view=False, file_name=False, syntax=False, saving=False):
self.platform = sublime.platform()
self.classmap = {}
self.st_version = 2
if sublime.version() == '' or int(sublime.version()) > 3000:
self.st_version = 3
self.file_name = file_name
self.settings = sublime.load_settings('CodeFormatter.sublime-settings')
self.packages_path = sublime.packages_path()
self.syntax_file = view.settings().get('syntax')
if syntax == False:
self.syntax = self.getSyntax()
else:
self.syntax = syntax
self.saving = saving
# PHP
opts = self.settings.get('codeformatter_php_options')
if ("syntaxes" in opts and opts["syntaxes"]):
for _formatter in opts["syntaxes"].split(","):
self.classmap[_formatter.strip()] = PhpFormatter
# Javascript
opts = self.settings.get('codeformatter_js_options')
if ("syntaxes" in opts and opts["syntaxes"]):
for _formatter in opts["syntaxes"].split(","):
self.classmap[_formatter.strip()] = JsFormatter
# CSS
opts = self.settings.get('codeformatter_css_options')
if ("syntaxes" in opts and opts["syntaxes"]):
for _formatter in opts["syntaxes"].split(","):
self.classmap[_formatter.strip()] = CssFormatter
# HTML
opts = self.settings.get('codeformatter_html_options')
if ("syntaxes" in opts and opts["syntaxes"]):
for _formatter in opts["syntaxes"].split(","):
self.classmap[_formatter.strip()] = HtmlFormatter
# Python
opts = self.settings.get('codeformatter_python_options')
if ("syntaxes" in opts and opts["syntaxes"]):
for _formatter in opts["syntaxes"].split(","):
self.classmap[_formatter.strip()] = PyFormatter
# VBScript
opts = self.settings.get('codeformatter_vbscript_options')
if ("syntaxes" in opts and opts["syntaxes"]):
for _formatter in opts["syntaxes"].split(","):
self.classmap[_formatter.strip()] = VbscriptFormatter
# SCSS
opts = self.settings.get('codeformatter_scss_options')
if ("syntaxes" in opts and opts["syntaxes"]):
for _formatter in opts["syntaxes"].split(","):
self.classmap[_formatter.strip()] = ScssFormatter
# COLDFUSION
opts = self.settings.get('codeformatter_coldfusion_options')
if ("syntaxes" in opts and opts["syntaxes"]):
for _formatter in opts["syntaxes"].split(","):
self.classmap[_formatter.strip()] = ColdfusionFormatter
def format(self, text):
formatter = self.classmap[self.syntax](self)
try:
stdout, stderr = formatter.format(text)
except Exception as e:
stdout = ""
stderr = str(e)
return self.clean(stdout), self.clean(stderr)
def exists(self):
if self.syntax in self.classmap:
return True
else:
return False
def getSyntax(self):
pattern = re.compile(r"Packages/.*/(.+?).(?=tmLanguage|sublime-syntax)")
m = pattern.search(self.syntax_file)
found = ""
if (m):
for s in m.groups():
found = s
break
return found.lower()
def formatOnSaveEnabled(self):
if (not self.exists()):
return False
formatter = self.classmap[self.syntax](self)
return formatter.formatOnSaveEnabled()
def clean(self, string):
if hasattr(string, 'decode'):
string = string.decode('UTF-8', 'ignore')
return re.sub(r'\r\n|\r', '\n', string)
| {
"repo_name": "RevanProdigalKnight/sublimetext-codeformatter",
"path": "codeformatter/formatter.py",
"copies": "1",
"size": "5046",
"license": "mit",
"hash": 2588512869972717600,
"line_mean": 32.417218543,
"line_max": 99,
"alpha_frac": 0.608600872,
"autogenerated": false,
"ratio": 3.9889328063241107,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5097533678324111,
"avg_score": null,
"num_lines": null
} |
import os, sys, sublime, sublime_plugin
st_version = 2
if sublime.version() == '' or int(sublime.version()) > 3000:
st_version = 3
reloader_name = 'codeformatter.reloader'
# ST3 loads each package as a module, so it needs an extra prefix
if st_version == 3:
reloader_name = 'CodeFormatter.' + reloader_name
from imp import reload
if reloader_name in sys.modules:
reload(sys.modules[reloader_name])
try:
# Python 3
from .codeformatter import reloader
from .codeformatter.formatter import Formatter
except (ValueError):
# Python 2
from codeformatter import reloader
from codeformatter.formatter import Formatter
# fix for ST2
cprint = globals()["__builtins__"]["print"]
debug_mode = False
def plugin_loaded():
cprint('CodeFormatter: Plugin Initialized')
settings = sublime.load_settings('CodeFormatter.sublime-settings')
debug_mode = settings.get('codeformatter_debug', False)
#if debug_mode:
#from pprint import pprint
#pprint(settings)
#debug_write("Debug mode enabled")
#debug_write("Platform "+sublime.platform()+" "+sublime.arch())
#debug_write("Sublime Version "+sublime.version())
#debug_write("Settings "+pprint(settings))
if (sublime.platform() != "windows"):
import stat
path = sublime.packages_path()+"/CodeFormatter/codeformatter/lib/phpbeautifier/fmt.phar"
st = os.stat(path)
os.chmod(path, st.st_mode | stat.S_IEXEC)
if st_version == 2:
plugin_loaded()
class CodeFormatterCommand(sublime_plugin.TextCommand):
def run(self, edit, syntax=False, saving=False):
if self.view.is_scratch():
return show_error("File is scratch")
file_name = self.view.file_name()
# if not file_name:
# return show_error("File does not exist.")
# if not os.path.exists(file_name):
# return show_error("File "+file_name+" does not exist.")
formatter = Formatter(self.view, file_name, syntax, saving)
if not formatter.exists():
if saving:
return False
return show_error("Formatter for this file type ("+formatter.syntax+") not found.")
if (saving and not formatter.formatOnSaveEnabled()):
return False
file_text = sublime.Region(0, self.view.size())
file_text_utf = self.view.substr(file_text).encode('utf-8')
if (len(file_text_utf) == 0):
return show_error("No code found.")
stdout, stderr = formatter.format(file_text_utf)
if len(stderr) == 0 and len(stdout) > 0:
self.view.replace(edit, file_text, stdout)
else:
show_error("Format error:\n"+stderr)
class CodeFormatterEventListener(sublime_plugin.EventListener):
def on_pre_save(self, view):
args = {}
args['saving'] = True
view.run_command('code_formatter', args)
class CodeFormatterShowPhpTransformationsCommand(sublime_plugin.TextCommand):
def run(self, edit, syntax=False):
import subprocess, re
platform = sublime.platform()
settings = sublime.load_settings('CodeFormatter.sublime-settings')
opts = settings.get('codeformatter_php_options')
php_path = "php"
if ("php_path" in opts and opts["php_path"]):
php_path = opts["php_path"]
cmd = []
cmd.append(str(php_path))
cmd.append(sublime.packages_path()+"/CodeFormatter/codeformatter/lib/phpbeautifier/fmt.phar")
cmd.append("--list")
stderr = ""
stdout = ""
try:
if (platform == "windows"):
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, startupinfo=startupinfo, shell=False, creationflags=subprocess.SW_HIDE)
else:
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
except Exception as e:
stderr = str(e)
if (not stderr and not stdout):
stderr = "Error while gethering list of php transformations"
if len(stderr) == 0 and len(stdout) > 0:
text = stdout.decode('utf-8')
text = re.sub("Usage:.*?PASSNAME", "Available PHP Tranformations:", text)
window = self.view.window()
pt = window.get_output_panel("paneltranformations")
pt.set_read_only(False)
pt.insert(edit, pt.size(), text)
window.run_command("show_panel", {"panel": "output.paneltranformations"})
else:
show_error("Formatter error:\n"+stderr)
def console_write(text, prefix=False):
if prefix:
sys.stdout.write('CodeFormatter: ')
sys.stdout.write(text+"\n")
def debug_write(text, prefix=False):
console_write(text, True)
def show_error(text):
sublime.error_message(u'CodeFormatter\n\n%s' % text)
| {
"repo_name": "RevanProdigalKnight/sublimetext-codeformatter",
"path": "CodeFormatter.py",
"copies": "1",
"size": "5396",
"license": "mit",
"hash": 8678561073666991000,
"line_mean": 31.119047619,
"line_max": 184,
"alpha_frac": 0.6241660489,
"autogenerated": false,
"ratio": 3.7085910652920964,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9810683760384827,
"avg_score": 0.00441467076145374,
"num_lines": 168
} |
import re
import coldfusionbeautifier
class ColdfusionFormatter:
def __init__(self, formatter):
self.formatter = formatter
self.options = coldfusionbeautifier.default_options()
# parse custom options from settings file
self.fill_custom_options(formatter.settings.get('codeformatter_coldfusion_options'))
def fill_custom_options(self, options):
if not options:
return
custom_options = [
'indent_size',
'indent_char',
'minimum_attribute_count',
'first_attribute_on_new_line',
'indent_with_tabs',
'expand_tags',
'expand_javascript',
'reduce_empty_tags',
'reduce_whole_word_tags',
'exception_on_tag_mismatch',
'custom_singletons',
'format_on_save'
]
casters = {'indent_char': str}
for key in custom_options:
value = options.get(key)
if value is None:
continue
cast = casters.get(key)
if cast:
value = cast(value)
setattr(self.options, key, value)
def format(self, text):
text = text.decode('utf-8')
stderr = ''
stdout = ''
try:
stdout = coldfusionbeautifier.beautify(text, self.options)
except Exception as e:
stderr = str(e)
if (not stderr and not stdout):
stderr = 'Formatting error!'
return stdout, stderr
def format_on_save_enabled(self, file_name):
format_on_save = getattr(self.options, 'format_on_save', False)
if isinstance(format_on_save, str):
format_on_save = re.search(format_on_save, file_name) is not None
return format_on_save
| {
"repo_name": "crlang/sublime-text---front-end-config",
"path": "Data/Packages/CodeFormatter/codeformatter/coldfusionformatter.py",
"copies": "2",
"size": "2042",
"license": "mit",
"hash": -2893107343198580700,
"line_mean": 26.5945945946,
"line_max": 99,
"alpha_frac": 0.5592556317,
"autogenerated": false,
"ratio": 3.98828125,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00028044173205463526,
"num_lines": 74
} |
import re
import cssbeautifier
class CssFormatter:
def __init__(self, formatter):
self.formatter = formatter
self.options = cssbeautifier.default_options()
# parse custom options from settings file
self.fill_custom_options(formatter.settings.get('codeformatter_css_options'))
def fill_custom_options(self, options):
if not options:
return
# map of key, value if key exists and value if key not exists
custom_options = [
('indent_size', None, 4),
('indent_char', None, ' '),
('indent_with_tabs', True, False),
('selector_separator_newline', True, False),
('end_with_newline', True, False),
('eol', None, '\n'),
('space_around_combinator', True, False),
('newline_between_rules', True, False),
('format_on_save', None, False)
]
casters = {'indent_char': str}
for key, on_value, off_value in custom_options:
if key not in options:
value = off_value
else:
value = options[key]
if value and on_value:
value = on_value
else:
cast = casters.get(key)
if cast:
value = cast(value)
setattr(self.options, key, value)
def format(self, text):
text = text.decode('utf-8')
stderr = ''
stdout = ''
try:
stdout = cssbeautifier.beautify(text, self.options)
except Exception as e:
stderr = str(e)
if (not stderr and not stdout):
stderr = 'Formatting error!'
return stdout, stderr
def format_on_save_enabled(self, file_name):
format_on_save = getattr(self.options, 'format_on_save', False)
if isinstance(format_on_save, str):
format_on_save = re.search(format_on_save, file_name) is not None
return format_on_save
| {
"repo_name": "crlang/sublime-text---front-end-config",
"path": "Data/Packages/CodeFormatter/codeformatter/cssformatter.py",
"copies": "2",
"size": "2260",
"license": "mit",
"hash": -5176470515749901000,
"line_mean": 29.5405405405,
"line_max": 99,
"alpha_frac": 0.5389380531,
"autogenerated": false,
"ratio": 4.057450628366248,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002922690131992458,
"num_lines": 74
} |
import re
import jsbeautifier
class JsFormatter:
def __init__(self, formatter):
self.formatter = formatter
self.opts = formatter.settings.get('codeformatter_js_options')
def format(self, text):
text = text.decode('utf-8')
stderr = ''
stdout = ''
options = jsbeautifier.default_options()
if ('indent_size' in self.opts and self.opts['indent_size']):
options.indent_size = self.opts['indent_size']
else:
options.indent_size = 4
if ('indent_char' in self.opts and self.opts['indent_char']):
options.indent_char = str(self.opts['indent_char'])
else:
options.indent_char = ' '
if ('indent_with_tabs' in self.opts and self.opts['indent_with_tabs']):
options.indent_with_tabs = True
else:
options.indent_with_tabs = False
if ('eol' in self.opts and self.opts['eol']):
options.eol = self.opts['eol']
else:
options.eol = '\n'
if (
'preserve_newlines' in self.opts and
self.opts['preserve_newlines']
):
options.preserve_newlines = True
else:
options.preserve_newlines = False
if (
'max_preserve_newlines' in self.opts and
self.opts['max_preserve_newlines']
):
options.max_preserve_newlines = self.opts['max_preserve_newlines']
else:
options.max_preserve_newlines = 10
if ('space_in_paren' in self.opts and self.opts['space_in_paren']):
options.space_in_paren = True
else:
options.space_in_paren = False
if (
'space_in_empty_paren' in self.opts and
self.opts['space_in_empty_paren']
):
options.space_in_empty_paren = True
else:
options.space_in_empty_paren = False
if ('e4x' in self.opts and self.opts['e4x']):
options.e4x = True
else:
options.e4x = False
if ('jslint_happy' in self.opts and self.opts['jslint_happy']):
options.jslint_happy = True
else:
options.jslint_happy = False
if ('brace_style' in self.opts and self.opts['brace_style']):
options.brace_style = self.opts['brace_style']
else:
options.brace_style = 'collapse'
if (
'keep_array_indentation' in self.opts and
self.opts['keep_array_indentation']
):
options.keep_array_indentation = True
else:
options.keep_array_indentation = False
if (
'keep_function_indentation' in self.opts and
self.opts['keep_function_indentation']
):
options.keep_function_indentation = True
else:
options.keep_function_indentation = False
if ('eval_code' in self.opts and self.opts['eval_code']):
options.eval_code = True
else:
options.eval_code = False
if ('unescape_strings' in self.opts and self.opts['unescape_strings']):
options.unescape_strings = True
else:
options.unescape_strings = False
if ('wrap_line_length' in self.opts and self.opts['wrap_line_length']):
options.wrap_line_length = self.opts['wrap_line_length']
else:
options.wrap_line_length = 0
if (
'break_chained_methods' in self.opts and
self.opts['break_chained_methods']
):
options.break_chained_methods = True
else:
options.break_chained_methods = False
if ('end_with_newline' in self.opts and self.opts['end_with_newline']):
options.end_with_newline = True
else:
options.end_with_newline = False
if ('comma_first' in self.opts and self.opts['comma_first']):
options.comma_first = True
else:
options.comma_first = False
if (
'space_after_anon_function' in self.opts and
self.opts['space_after_anon_function']
):
options.space_after_anon_function = True
else:
options.space_after_anon_function = False
if (
'unindent_chained_methods' in self.opts and
self.opts['unindent_chained_methods']
):
options.unindent_chained_methods = True
else:
options.unindent_chained_methods = False
if ('operator_position' in self.opts and self.opts['operator_position']):
options.operator_position = self.opts['operator_position']
else:
options.operator_position = 'before-newline'
try:
stdout = jsbeautifier.beautify(text, options)
except Exception as e:
stderr = str(e)
# return ', '
if (not stderr and not stdout):
stderr = 'Formatting error!'
return stdout, stderr
def format_on_save_enabled(self, file_name):
format_on_save = False
if ('format_on_save' in self.opts and self.opts['format_on_save']):
format_on_save = self.opts['format_on_save']
if (isinstance(format_on_save, str)):
format_on_save = re.search(format_on_save, file_name) is not None
return format_on_save
| {
"repo_name": "crlang/sublime-text---front-end-config",
"path": "Data/Packages/CodeFormatter/codeformatter/jsformatter.py",
"copies": "2",
"size": "5640",
"license": "mit",
"hash": 8752688341265374000,
"line_mean": 31.2285714286,
"line_max": 99,
"alpha_frac": 0.5576241135,
"autogenerated": false,
"ratio": 4.002838892831796,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5560463006331796,
"avg_score": null,
"num_lines": null
} |
import re
import sublime
st_version = 2
if sublime.version() == '' or int(sublime.version()) > 3000:
st_version = 3
if (st_version == 2):
from pybeautifier import Beautifier
else:
# from .pybeautifier import Beautifier
print('CodeFormatter: formatting python files on ST3 not supported.')
class PyFormatter:
def __init__(self, formatter):
self.formatter = formatter
self.opts = formatter.settings.get('codeformatter_python_options')
def format(self, text):
if (self.formatter.st_version == 3):
stdout = ''
stderr = 'formatting python files on ST3 not supported!'
return stdout, stderr
# Options
options = {}
# indent_size
if (self.opts['indent_size']):
indent_size = self.opts['indent_size']
else:
indent_size = 1
# indent_with_tabs
if (self.opts['indent_with_tabs']):
indent_with_tabs = True
else:
indent_with_tabs = False
if indent_with_tabs:
indentation = ' ' * indent_size
else:
indentation = ' ' * indent_size
options['INDENTATION'] = indentation
# max_char
if (self.opts['max_char']):
col_limit = self.opts['max_char']
else:
col_limit = 80
options['COL_LIMIT'] = col_limit
# assignment
if (self.opts['assignment']):
assignment = self.opts['assignment']
else:
assignment = ' = '
options['ASSIGNMENT'] = assignment
# function_param_assignment
if (self.opts['function_param_assignment']):
function_param_assignment = self.opts['function_param_assignment']
else:
function_param_assignment = '='
options['FUNCTION_PARAM_ASSIGNMENT'] = function_param_assignment
# function_param_sep
if (self.opts['function_param_sep']):
function_param_sep = self.opts['function_param_sep']
else:
function_param_sep = ', '
options['FUNCTION_PARAM_SEP'] = function_param_sep
# list_sep
if (self.opts['list_sep']):
list_sep = self.opts['list_sep']
else:
list_sep = ', '
options['LIST_SEP'] = list_sep
# subscript_sep
if (self.opts['subscript_sep']):
subscript_sep = self.opts['subscript_sep']
else:
subscript_sep = '='
options['SUBSCRIPT_SEP'] = subscript_sep
# dict_colon
if (self.opts['dict_colon']):
dict_colon = self.opts['dict_colon']
else:
dict_colon = ': '
options['DICT_COLON'] = dict_colon
# slice_colon
if (self.opts['slice_colon']):
slice_colon = self.opts['slice_colon']
else:
slice_colon = ': '
options['SLICE_COLON'] = slice_colon
# comment_prefix
if (self.opts['comment_prefix']):
comment_prefix = self.opts['comment_prefix']
else:
comment_prefix = '# '
options['COMMENT_PREFIX'] = comment_prefix
# shebang
if (self.opts['shebang']):
shebang = self.opts['shebang']
else:
shebang = '#!/usr/bin/env python'
options['SHEBANG'] = shebang
# boilerplate
if (self.opts['boilerplate']):
boilerplate = self.opts['boilerplate']
else:
boilerplate = ''
options['BOILERPLATE'] = boilerplate
# blank_line
if (self.opts['blank_line']):
blank_line = self.opts['blank_line']
else:
blank_line = ''
options['BLANK_LINE'] = blank_line
# keep_blank_lines
if (self.opts['keep_blank_lines']):
keep_blank_lines = self.opts['keep_blank_lines']
else:
keep_blank_lines = True
options['KEEP_BLANK_LINES'] = keep_blank_lines
# add_blank_lines_around_comments
if (self.opts['add_blank_lines_around_comments']):
add_blank_lines_around_comments = (
self.opts['add_blank_lines_around_comments']
)
else:
add_blank_lines_around_comments = True
options['ADD_BLANK_LINES_AROUND_COMMENTS'] = (
add_blank_lines_around_comments
)
# add_blank_line_after_doc_string
if (self.opts['add_blank_line_after_doc_string']):
add_blank_line_after_doc_string = (
self.opts['add_blank_line_after_doc_string']
)
else:
add_blank_line_after_doc_string = True
options['ADD_BLANK_LINE_AFTER_DOC_STRING'] = (
add_blank_line_after_doc_string
)
# max_seps_func_def
if (self.opts['max_seps_func_def']):
max_seps_func_def = self.opts['max_seps_func_def']
else:
max_seps_func_def = 3
options['MAX_SEPS_FUNC_DEF'] = max_seps_func_def
# max_seps_func_ref
if (self.opts['max_seps_func_ref']):
max_seps_func_ref = self.opts['max_seps_func_ref']
else:
max_seps_func_ref = 5
options['MAX_SEPS_FUNC_REF'] = max_seps_func_ref
# max_seps_series
if (self.opts['max_seps_series']):
max_seps_series = self.opts['max_seps_series']
else:
max_seps_series = 5
options['MAX_SEPS_SERIES'] = max_seps_series
# max_seps_dict
if (self.opts['max_seps_dict']):
max_seps_dict = self.opts['max_seps_dict']
else:
max_seps_dict = 3
options['MAX_SEPS_DICT'] = max_seps_dict
# max_lines_before_split_lit
if (self.opts['max_lines_before_split_lit']):
max_lines_before_split_lit = (
self.opts['max_lines_before_split_lit']
)
else:
max_lines_before_split_lit = 2
options['MAX_LINES_BEFORE_SPLIT_LIT'] = max_lines_before_split_lit
# left_margin
if (self.opts['left_margin']):
left_margin = self.opts['left_margin']
else:
left_margin = ''
options['LEFT_MARGIN'] = left_margin
# normalize_doc_strings
if (self.opts['normalize_doc_strings']):
normalize_doc_strings = self.opts['normalize_doc_strings']
else:
normalize_doc_strings = False
options['NORMALIZE_DOC_STRINGS'] = normalize_doc_strings
# leftjust_doc_strings
if (self.opts['leftjust_doc_strings']):
leftjust_doc_strings = self.opts['leftjust_doc_strings']
else:
leftjust_doc_strings = False
options['LEFTJUST_DOC_STRINGS'] = leftjust_doc_strings
# wrap_doc_strings
if (self.opts['wrap_doc_strings']):
wrap_doc_strings = self.opts['wrap_doc_strings']
else:
wrap_doc_strings = False
options['WRAP_DOC_STRINGS'] = wrap_doc_strings
# leftjust_comments
if (self.opts['leftjust_comments']):
leftjust_comments = self.opts['leftjust_comments']
else:
leftjust_comments = False
options['LEFTJUST_COMMENTS'] = leftjust_comments
# wrap_comments
if (self.opts['wrap_comments']):
wrap_comments = self.opts['wrap_comments']
else:
wrap_comments = False
options['WRAP_COMMENTS'] = wrap_comments
# double_quoted_strings
if (self.opts['double_quoted_strings']):
double_quoted_strings = self.opts['double_quoted_strings']
else:
double_quoted_strings = False
options['DOUBLE_QUOTED_STRINGS'] = double_quoted_strings
# single_quoted_strings
if (self.opts['single_quoted_strings']):
single_quoted_strings = self.opts['single_quoted_strings']
else:
single_quoted_strings = False
options['SINGLE_QUOTED_STRINGS'] = single_quoted_strings
# can_split_strings
if (self.opts['can_split_strings']):
can_split_strings = self.opts['can_split_strings']
else:
can_split_strings = False
options['CAN_SPLIT_STRINGS'] = can_split_strings
# doc_tab_replacement
if (self.opts['doc_tab_replacement']):
doc_tab_replacement = self.opts['doc_tab_replacement']
else:
doc_tab_replacement = '....'
options['DOC_TAB_REPLACEMENT'] = doc_tab_replacement
# keep_unassigned_constants
if (self.opts['keep_unassigned_constants']):
keep_unassigned_constants = self.opts['keep_unassigned_constants']
else:
keep_unassigned_constants = False
options['KEEP_UNASSIGNED_CONSTANTS'] = keep_unassigned_constants
# parenthesize_tuple_display
if (self.opts['parenthesize_tuple_display']):
parenthesize_tuple_display = (
self.opts['parenthesize_tuple_display']
)
else:
parenthesize_tuple_display = True
options['PARENTHESIZE_TUPLE_DISPLAY'] = parenthesize_tuple_display
# java_style_list_dedent
if (self.opts['java_style_list_dedent']):
java_style_list_dedent = self.opts['java_style_list_dedent']
else:
java_style_list_dedent = False
options['JAVA_STYLE_LIST_DEDENT'] = java_style_list_dedent
beautifier = Beautifier(self.formatter)
stdout, stderr = beautifier.beautify(text, options)
return stdout, stderr
def format_on_save_enabled(self, file_name):
format_on_save = False
if ('format_on_save' in self.opts and self.opts['format_on_save']):
format_on_save = self.opts['format_on_save']
if (isinstance(format_on_save, str)):
format_on_save = re.search(format_on_save, file_name) is not None
return format_on_save
| {
"repo_name": "crlang/sublime-text---front-end-config",
"path": "Data/Packages/CodeFormatter/codeformatter/pyformatter.py",
"copies": "2",
"size": "10203",
"license": "mit",
"hash": -7477170203309850000,
"line_mean": 32.2345276873,
"line_max": 99,
"alpha_frac": 0.5581691659,
"autogenerated": false,
"ratio": 3.8371568258743887,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5395325991774389,
"avg_score": null,
"num_lines": null
} |
import re
import vbscriptbeautifier
class VbscriptFormatter:
def __init__(self, formatter):
self.formatter = formatter
self.opts = formatter.settings.get('codeformatter_vbscript_options')
def format(self, text):
text = text.decode('utf-8')
stderr = ''
stdout = ''
options = vbscriptbeautifier.default_options()
if ('indent_size' in self.opts and self.opts['indent_size']):
options.indent_size = self.opts['indent_size']
else:
options.indent_size = 1
if ('indent_char' in self.opts and self.opts['indent_char']):
options.indent_char = str(self.opts['indent_char'])
else:
options.indent_char = '\t'
if ('indent_with_tabs' in self.opts and self.opts['indent_with_tabs']):
options.indent_with_tabs = True
else:
options.indent_with_tabs = True
if (
'preserve_newlines' in self.opts and
self.opts['preserve_newlines']
):
options.preserve_newlines = True
else:
options.preserve_newlines = False
if (
'max_preserve_newlines' in self.opts and
self.opts['max_preserve_newlines']
):
options.max_preserve_newlines = self.opts['max_preserve_newlines']
else:
options.max_preserve_newlines = 10
if ('opening_tags' in self.opts and self.opts['opening_tags']):
options.opening_tags = str(self.opts['opening_tags'])
if ('middle_tags' in self.opts and self.opts['middle_tags']):
options.middle_tags = str(self.opts['middle_tags'])
if ('closing_tags' in self.opts and self.opts['closing_tags']):
options.closing_tags = str(self.opts['closing_tags'])
try:
stdout = vbscriptbeautifier.beautify(text, options)
except Exception as e:
stderr = str(e)
if (not stderr and not stdout):
stderr = 'Formatting error!'
return stdout, stderr
def format_on_save_enabled(self, file_name):
format_on_save = False
if ('format_on_save' in self.opts and self.opts['format_on_save']):
format_on_save = self.opts['format_on_save']
if (isinstance(format_on_save, str)):
format_on_save = re.search(format_on_save, file_name) is not None
return format_on_save
| {
"repo_name": "crlang/sublime-text---front-end-config",
"path": "Data/Packages/CodeFormatter/codeformatter/vbscriptformatter.py",
"copies": "2",
"size": "2662",
"license": "mit",
"hash": 8802153408880483000,
"line_mean": 32.6962025316,
"line_max": 99,
"alpha_frac": 0.5830202855,
"autogenerated": false,
"ratio": 3.846820809248555,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00012658227848101267,
"num_lines": 79
} |
import sublime
import sublime_plugin
import string
import re
import sys
import time
import base64
import html.entities as htmlentitydefs
from cgi import escape
from hashlib import md5, sha1, sha224, sha256, sha384, sha512
from datetime import datetime
from random import sample, choice, randrange
import os, socket, urllib
import binascii
import json
import pprint
if sys.hexversion >= 0x3000000:
def unichr(c):
return chr(c)
class StringUtilitiesExpandStringCommand(sublime_plugin.TextCommand):
"""If the region is contained in a string scope, expands the region to
the whole string. If the region is not contained in a string scope, this
command does nothing. It is applied to all regions in the current
selection."""
def run(self, edit):
for region in self.view.sel():
self._run(edit, region)
def _run(self, edit, region):
if (not self.view.match_selector(region.a, "string") or
not self.view.match_selector(region.b, "string")):
return
selector = "string punctuation.definition.string"
p = region.begin()
while not self.view.match_selector(p, selector):
p = self.view.find_by_class(p, False, sublime.CLASS_PUNCTUATION_START)
q = region.end()
while not self.view.match_selector(q, selector):
# sublime.CLASS_PUNCTUATION_END is broken
# this works too
q = self.view.find_by_class(q, True, sublime.CLASS_PUNCTUATION_START)
self.view.sel().add(sublime.Region(p, q + 1))
class ConvertSelection(sublime_plugin.TextCommand):
"""Abstract class to implement a command that modifies the current text
select. Subclasses must implement the convert method which accepts the
selected text and returns a replacement value."""
def run(self, edit):
for region in self.view.sel():
if not region.empty():
text = self.view.substr(region).encode(self.enc())
self.view.replace(edit, region, self.convert(text))
def enc(self):
if self.view.encoding() == 'Undefined':
return self.view.settings().get('default_encoding', 'UTF-8')
else:
return self.view.encoding()
def convert(self, text):
raise NotImplementedError("Subclass must implement convert")
class ConvertTabsToSpacesCommand(sublime_plugin.TextCommand):
#Convert Tabs To Spaces
def run(self, edit):
sublime.status_message('Convert tabs to spaces.')
tab_size = int(self.view.settings().get('tab_size', 4))
for region in self.view.sel():
if not region.empty():
self.view.replace(edit, region, self.view.substr(region).expandtabs(tab_size))
else:
self.view.run_command('select_all')
self.view.replace(edit, self.view.sel()[0], self.view.substr(self.view.sel()[0]).expandtabs(tab_size))
self.view.sel().clear()
class ConvertSpacesToTabsCommand(sublime_plugin.TextCommand):
#Convert Spaces To Tabs
def run(self, edit):
sublime.status_message('Convert spaces to tabs.')
tab_size = str(self.view.settings().get('tab_size', 4))
for region in self.view.sel():
if not region.empty():
self.view.replace(edit, region, re.sub(r' {' + tab_size + r'}', r'\t', self.view.substr(region)))
else:
self.view.run_command('select_all')
self.view.replace(edit, self.view.sel()[0], re.sub(r' {' + tab_size + r'}', r'\t', self.view.substr(self.view.sel()[0])))
self.view.sel().clear()
class ConvertSpacesToNonBreaking(sublime_plugin.TextCommand):
#Convert Spaces into Non-breaking Spaces
def run(self, edit):
for region in self.view.sel():
if not region.empty():
text = self.view.substr(region)
text = text.replace(" ", " ")
self.view.replace(edit, region, text)
class ConvertCharsToHtmlCommand(sublime_plugin.TextCommand):
#Convert Chars into XML/HTML Entities
def run(self, edit):
for region in self.view.sel():
if not region.empty():
self.view.replace(edit, region, escape(self.view.substr(region), True))
class ConvertHtmlToCharsCommand(sublime_plugin.TextCommand):
#Convert XML/HTML Entities into Chars
def run(self, edit):
for region in self.view.sel():
if not region.empty():
text = re.sub('&(%s);' % '|'.join(htmlentitydefs.name2codepoint),
lambda m: unichr(htmlentitydefs.name2codepoint[m.group(1)]), self.view.substr(region))
self.view.replace(edit, region, text)
class ConvertCamelUnderscoresCommand(sublime_plugin.TextCommand):
#Convert camelCase to under_scores and vice versa
def run(self, edit):
for region in self.view.sel():
if not region.empty():
text = self.view.substr(region)
text = self.toCamelCase(text) if '_' in text and text[0].islower() else (text[0].islower() and self.toUnderscores(text))
self.view.replace(edit, region, text)
def toUnderscores(self, name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def toCamelCase(self, name):
return ''.join(ch.capitalize() if i > 0 else ch for i, ch in enumerate(name.split('_')))
class ConvertCamelDashCommand(sublime_plugin.TextCommand):
#Convert camelCase to dash and vice versa
def run(self, edit):
for region in self.view.sel():
if not region.empty():
text = self.view.substr(region)
text = self.toCamelCase(text) if '-' in text and text[0].islower() else (text[0].islower() and self.toDash(text))
self.view.replace(edit, region, text)
def toDash(self, name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1-\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1-\2', s1).lower()
def toCamelCase(self, name):
return ''.join(ch.capitalize() if i > 0 else ch for i, ch in enumerate(name.split('-')))
class ConvertPascalUnderscoresCommand(sublime_plugin.TextCommand):
#Convert PascalCase to under_scores and vice versa
def run(self, edit):
for region in self.view.sel():
if not region.empty():
text = self.view.substr(region)
text = self.toPascalCase(text) if '_' in text and text[0].islower() else (text[0].isupper() and self.toUnderscores(text))
self.view.replace(edit, region, text)
def toUnderscores(self, name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def toPascalCase(self, name):
return ''.join(map(lambda x: x.capitalize(), name.split('_')))
class ConvertToUnicodeNotationCommand(sublime_plugin.TextCommand):
#Convert string to Unicode notation
def run(self, edit):
pattern = re.compile(r'\s+')
for region in self.view.sel():
if not region.empty():
text = ''
for c in self.view.substr(region):
if not re.match(pattern, c) and (ord(c) < 0x20 or ord(c) > 0x7e):
text += '\\u{0:04X}'.format(ord(c))
else:
text += c
self.view.replace(edit, region, text)
class ConvertFromUnicodeNotationCommand(sublime_plugin.TextCommand):
#Convert string from Unicode notation
def run(self, edit):
pattern = re.compile(r'(\\u)([0-9a-fA-F]{2,4})')
for region in self.view.sel():
if not region.empty():
text = re.sub(pattern, lambda m: unichr(int(m.group(2), 16)), self.view.substr(region))
self.view.replace(edit, region, text)
class ConvertToBase64Command(sublime_plugin.TextCommand):
#Encode string with base64
def run(self, edit):
for region in self.view.sel():
if not region.empty():
text = self.view.substr(region).encode(self.enc())
t = base64.b64encode(text)
txt = str(t, self.enc())
self.view.replace(edit, region, txt)
def enc(self):
if self.view.encoding() == 'Undefined':
return self.view.settings().get('default_encoding', 'UTF-8')
else:
return self.view.encoding()
class ConvertFromBase64Command(sublime_plugin.TextCommand):
#Decode string with base64
def run(self, edit):
for region in self.view.sel():
if not region.empty():
text = self.view.substr(region).encode(self.enc())
t = base64.b64decode(text)
txt = str(t, self.enc())
self.view.replace(edit, region, txt)
def enc(self):
if self.view.encoding() == 'Undefined':
return self.view.settings().get('default_encoding', 'UTF-8')
else:
return self.view.encoding()
class ConvertToHexCommand(sublime_plugin.TextCommand):
#Convert string to hex
def run(self, edit):
for region in self.view.sel():
if not region.empty():
text = self.view.substr(region).encode(self.enc())
t = binascii.hexlify(text)
txt = str(t,'ascii')
self.view.replace(edit, region, txt)
def enc(self):
if self.view.encoding() == 'Undefined':
return self.view.settings().get('default_encoding', 'UTF-8')
else:
return self.view.encoding()
class ConvertFromHexCommand(sublime_plugin.TextCommand):
#Convert string from hex
def run(self, edit):
for region in self.view.sel():
if not region.empty():
text = self.view.substr(region).encode(self.enc())
t = binascii.unhexlify(text)
txt = str(t,'ascii')
self.view.replace(edit, region, txt)
def enc(self):
if self.view.encoding() == 'Undefined':
return self.view.settings().get('default_encoding', 'UTF-8')
else:
return self.view.encoding()
class ConvertHexToRgbCommand(sublime_plugin.TextCommand):
#Convert hex to rgb color
def run(self, edit):
for region in self.view.sel():
if not region.empty():
text = self.view.substr(region)
self.view.replace(edit, region, self.hexToRgb(text))
def enc(self):
if self.view.encoding() == 'Undefined':
return self.view.settings().get('default_encoding', 'UTF-8')
else:
return self.view.encoding()
def hexToRgb(self, value):
value = value.lstrip('#')
lv = len(value)
if lv == 6:
rgb = tuple(str(int(value[i:i+lv//3], 16)) for i in range(0, lv, lv//3))
if lv == 3:
rgb = tuple(str(int(value[i:i+1], 16)*17) for i in range(0, 3))
if lv == 1:
v = str(int(value, 16)*17)
rgb = v, v, v
return 'rgb(' + ','.join(rgb) + ')'
class ConvertRgbToHexCommand(sublime_plugin.TextCommand):
#Convert rgb to hex color
def run(self, edit):
for region in self.view.sel():
if not region.empty():
text = self.view.substr(region)
str_len = len(text)
reg_rgb = '^rgb[a]?\((\s*\d+\s*),(\s*\d+\s*),(\s*\d+\s*),?(\s*(0?.?\d)+\s*)?\)$'
rgb_match = re.match(reg_rgb, text)
if rgb_match is not None:
self.view.replace(edit, region, self.rgbToHex(rgb_match))
def enc(self):
if self.view.encoding() == 'Undefined':
return self.view.settings().get('default_encoding', 'UTF-8')
else:
return self.view.encoding()
def rgbToHex(self, rgb_match):
"""Converts an rgb(a) value to a hex value.
Attributes:
self: The Regionset object.
rgb_match: The reg exp collection of matches.
"""
# Convert all values to 10-base integers, strip the leading characters,
# convert to hex and fill with leading zero's.
val_1 = hex(int(rgb_match.group(1), 10))[2:].zfill(2)
val_2 = hex(int(rgb_match.group(2), 10))[2:].zfill(2)
val_3 = hex(int(rgb_match.group(3), 10))[2:].zfill(2)
# Return the proformatted string with the new values.
return '#%s%s%s' % (val_1, val_2, val_3)
class ConvertSingleQuotesToDoubleCommand(sublime_plugin.TextCommand):
def run(self, edit):
for region in self.view.sel():
if not region.empty():
text = self.view.substr(region)
text = text.replace("'", "\"")
self.view.replace(edit, region, text)
class ConvertDoubleQuotesToSingleCommand(sublime_plugin.TextCommand):
def run(self, edit):
for region in self.view.sel():
if not region.empty():
text = self.view.substr(region)
text = text.replace("\"", "'")
self.view.replace(edit, region, text)
class UrlDecodeCommand(sublime_plugin.TextCommand):
def run(self, edit):
for region in self.view.sel():
if not region.empty():
text = self.view.substr(region)
text = urllib.parse.unquote(text)
self.view.replace(edit, region, text)
class UrlEncodeCommand(sublime_plugin.TextCommand):
def run(self, edit):
for region in self.view.sel():
if not region.empty():
text = self.view.substr(region)
text = urllib.parse.quote(text)
self.view.replace(edit, region, text)
class ConvertMd5Command(ConvertSelection):
"""Calculate the MD5 hash of the selected text"""
def convert(self, text):
return md5(text).hexdigest()
class ConvertSha1Command(ConvertSelection):
"""Calculate the SHA1 hash of the selected text"""
def convert(self, text):
return sha1(text).hexdigest()
class ConvertSha224Command(ConvertSelection):
"""Calculate the SHA224 hash of the selected text"""
def convert(self, text):
return sha224(text).hexdigest()
class ConvertSha256Command(ConvertSelection):
"""Calculate the SHA256 hash of the selected text"""
def convert(self, text):
return sha256(text).hexdigest()
class ConvertSha384Command(ConvertSelection):
"""Calculate the SHA384 hash of the selected text"""
def convert(self, text):
return sha384(text).hexdigest()
class ConvertSha512Command(ConvertSelection):
"""Calculate the SHA512 hash of the selected text"""
def convert(self, text):
return sha512(text).hexdigest()
class ConvertTimeFormatCommand(sublime_plugin.TextCommand):
#This will allow you to convert epoch to human readable date
def run(self, edit):
for region in self.view.sel():
if not region.empty():
text = self.view.substr(region)
if re.match('^([0-9]+)$', text):
result = self.fromUnix(text)
else:
result = self.toUnix(text)
if result:
self.view.replace(edit, region, result)
else:
sublime.status_message('Convert error.')
def fromUnix(self, timestamp):
sublime.status_message('Convert from epoch to human readable date.')
timestamp = float(timestamp)
stamp = datetime.fromtimestamp(timestamp)
return stamp.strftime("%Y-%m-%d %H:%M:%S")
def toUnix(self, timestr):
sublime.status_message('Convert from human readable date to epoch.')
try:
datetime_to_convert = datetime.strptime(timestr, "%Y-%m-%d %H:%M:%S")
return '%d' % (time.mktime(datetime_to_convert.timetuple()))
except:
return False
class InsertTimestampCommand(sublime_plugin.TextCommand):
#This will allow you to insert timestamp to current position
def run(self, edit):
for region in self.view.sel():
self.view.insert(edit, region.begin(), datetime.now().strftime("%Y-%m-%d %H:%M"))
class GeneratePasswordCommand(sublime_plugin.TextCommand):
chars = "23456789abcdefghijkmnpqrstuvwxyzABCDEFGHKMNPQRSTUVWXYZ"
def run(self, edit, length=16):
length = int(length)
self.view.insert(edit, self.view.sel()[0].begin(), ''.join(sample(self.chars, length)))
class GeneratePasswordSpecSymbolsCommand(sublime_plugin.TextCommand):
chars = "0123456789abcdefghijkmnpqrstuvwxyzABCDEFGHKMNPQRSTUVWXYZ%*)?@#$~"
def run(self, edit, length=16):
length = int(length)
self.view.insert(edit, self.view.sel()[0].begin(), ''.join(sample(self.chars, length)))
class DecodeHeidiSqlCommand(sublime_plugin.TextCommand):
# Requires .strip('\x00') on output otherwise sublimetext adds a 'NUL' control chracter
def run(self, edit):
for region in self.view.sel():
if not region.empty():
text = self.view.substr(region)
if text[0].isdigit(): text = self.decodeHeidi(text)
self.view.replace(edit, region, text)
def decodeHeidi(self, hex_in):
shift = int(hex_in[-1])
shifted_list = [int(hex_in[i:i+2], 16) for i in range(0, len(hex_in), 2)]
return ''.join(chr(out_ch - shift) for out_ch in shifted_list).strip('\x00')
class StringUtilitiesExtIpCommand(sublime_plugin.TextCommand):
def run(self, edit):
url = "http://api.long.ge/sublimetext/ip.php"
request = urllib.request.Request(url)
response = urllib.request.urlopen(request)
for region in self.view.sel():
self.view.insert(edit, region.begin(), response.read().decode(self.enc()))
def enc(self):
if self.view.encoding() == 'Undefined':
return self.view.settings().get('default_encoding', 'UTF-8')
else:
return self.view.encoding()
class StringUtilitiesIntIpCommand(sublime_plugin.TextCommand):
def run(self, edit):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('google.com', 0))
int_ip = s.getsockname()[0]
s.close()
for region in self.view.sel():
self.view.insert(edit, region.begin(), int_ip)
def enc(self):
if self.view.encoding() == 'Undefined':
return self.view.settings().get('default_encoding', 'UTF-8')
else:
return self.view.encoding()
class StringUtilitiesDecodeJsonCommand(sublime_plugin.TextCommand):
output = ""
i = 0
def run(self, edit):
for region in self.view.sel():
self.output = ""
if not region.empty():
text = self.view.substr(region).encode(self.enc())
text = str(text, 'utf8')
data = json.loads(text, encoding='utf8')
output = json.dumps(data, indent=4, sort_keys=True)
self.view.replace(edit, region, output)
#self.recursivePrint(data)
#print(self.output)
#pp = pprint.PrettyPrinter(indent=4, width=1)
#data = pp.pformat(data)
#data = self.output
#data = data.replace('{ ', '{')
#data = data.replace('{', '\n {\n')
#self.view.replace(edit, region, self.output)
def enc(self):
if self.view.encoding() == 'Undefined':
return self.view.settings().get('default_encoding', 'UTF-8')
else:
return self.view.encoding()
def recursivePrint(self, src, dpth = 0, key = ''):
""" Recursively prints nested elements."""
tabs = lambda n: '\t' * n * 1 # or 2 or 8 or...
brace = lambda s, n: '%s%s%s' % ('['*n, s, ']'*n)
if isinstance(src, dict):
for key, value in src.items():
if isinstance(value, dict) or (isinstance(value, list)):
self.output += tabs(dpth) + brace(key, dpth) + "\n"
self.recursivePrint(value, dpth + 1, key)
elif (isinstance(src, list)):
self.i = 0
for litem in src:
self.recursivePrint(litem, dpth + 1)
else:
if key:
self.output += tabs(dpth) + '[%s] => %s' % (key, src) + "\n"
else:
self.i = self.i + 1
self.output += tabs(dpth) + str(self.i) + ' => %s' % src + "\n"
class StringUtilitiesTestCommand(sublime_plugin.TextCommand):
def run(self, edit):
ext_ip = urllib2.urlopen('http://api.long.ge/sublimetext/ip.php').read()
for region in self.view.sel():
self.view.insert(edit, region.begin(), ext_ip.encode(self.enc()))
def enc(self):
if self.view.encoding() == 'Undefined':
return self.view.settings().get('default_encoding', 'UTF-8')
else:
return self.view.encoding()
class PhpObjectToArrayCommand(sublime_plugin.TextCommand):
"""
convertes PHP Object into PHP Array Access
from $obj->variable into $obj['variable']
"""
def run(self, edit):
for region in self.view.sel():
if not region.empty():
source_text = self.view.substr(region)
if "->" not in source_text:
# nothing to replace
pass
fragments = source_text.split("->")
result = "{}['{}']".format(fragments[0], fragments[1])
self.view.replace(edit, region, result)
| {
"repo_name": "akalongman/sublimetext-stringutilities",
"path": "stringutilities.py",
"copies": "1",
"size": "21979",
"license": "mit",
"hash": -6603776844865107000,
"line_mean": 36.3792517007,
"line_max": 137,
"alpha_frac": 0.5859684244,
"autogenerated": false,
"ratio": 3.743655254641458,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9814180207265962,
"avg_score": 0.0030886943550991366,
"num_lines": 588
} |
__author__ = 'awhite'
from datetime import datetime
from pylatex import Document, Section, Subsection, \
Table, Package, Enumerate
document = Document()
stack = []
document.packages.append(Package('geometry', options=['tmargin=1cm', "lmargin=2.4cm"]))
stack.append((document, 0))
document.append("hello")
context = 0
maxlist = 3
symbols = dict()
labels = dict()
settings=dict()
settings['auto-para']=False
def get_qualified_name(item):
global symbols
p = ""
writething = ""
l = type(item)
if l == str:
p = item
if l == Document:
p = "Document"
if l == Enumerate:
p = "List"
if l == Table:
p = "Table"
if l == Section:
p = "Section:" + str(item)
if l == Subsection:
p = 'Subsection:' + str(item)
if item in labels:
writething=labels[item]
return "{0}\t{1}".format(p, writething)
def printwithin(l, i, j):
ll = len(l)
print("Current element has {0} Elements\n".format(ll))
for z in range(i, i+j):
if z < ll:
print(str(z) + ":" + get_qualified_name(l[z]))
def append_to_thing(thing, item):
if type(thing) == Enumerate:
thing.add_item(item)
else:
#Break the paragraph
if settings['auto-para']:
thing.append("")
thing.append(item)
def append_mode(i):
n = True
while n:
line = input("")
g = line.split(" ")
for z in g:
if z == "\\quit" or z == '\\exit':
n = False
if n:
append_to_thing(i, line)
#todo rewrite to be more functional, so that things can be interpreted in a more meaningingful manner, without as much work.
def main():
global context
i, context = stack[0]
quityet = False
while not quityet:
printwithin(i, context, maxlist)
prompt = ""
if type(i) == type(Table):
prompt = "Table>"
if type(i) == Document:
prompt = "Document>"
if type(i) == Subsection:
prompt = "Subsection>"
if type(i) == Section:
prompt = "Section>"
if type(i) == Enumerate:
prompt = "List>"
line = input(prompt)
g = line.split(" ")
if len(g) == 1:
if g[0] == '\\append':
v = input("line")
append_to_thing(i, v)
if len(i) - maxlist > context:
context += 1
if g[0] == '\\list':
v = Enumerate()
append_to_thing(i, v)
stack.append((i, context))
context = 0
i = v
if g[0] == '\\append-mode':
append_mode(i)
if g[0] == '\\date':
append_to_thing(i, str(datetime.now()))
if g[0] == '\\table':
v = Table('rc|cl')
append_to_thing(i, v)
stack.append((i, context))
context = 0
i = v
if g[0]=='\\para':
append_to_thing(i,"")
if g[0]=='\\para-auto':
settings['auto-para']=settings['auto-para']
else:
if g[0] == "\\enter":
v = i[int(g[1])]
if type(v) == str:
print("It seems unwise to attempt that with a raw string.")
else:
stack.append((i, context))
context = 0
i = v
if g[0] == '\\exit':
i, context = stack.pop()
if g[0] == '\\generate':
document.generate_pdf(g[1])
if g[0] == '\\cursor':
context = int(g[1])
if g[0] == "\\delete" or g[0] == "\\remove":
i.remove(i[int(g[1])])
if g[0] == '\\append':
v = int(g[1])
if type(i[v]) == str:
i[v] += input()
if g[0] == '\\alias':
symbols[g[1]] = i[int(g[2])]
if g[0] == '\\addrow':
# Append the arguments to the thing
i.add_row(g[1::])
if g[0] == '\\label':
labels[i[int(g[1])]] = g[2]
main()
| {
"repo_name": "jaked122/minuteman",
"path": "main.py",
"copies": "1",
"size": "4247",
"license": "mit",
"hash": -2086963765417656000,
"line_mean": 26.9407894737,
"line_max": 124,
"alpha_frac": 0.4452554745,
"autogenerated": false,
"ratio": 3.6707000864304233,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46159555609304237,
"avg_score": null,
"num_lines": null
} |
import numpy as np
import random
import itertools
import scipy.misc
# import matplotlib.pyplot as plt
class gameOb():
def __init__(self, coordinates, size, intensity, channel, reward, name):
self.x = coordinates[0]
self.y = coordinates[1]
self.size = size
self.intensity = intensity
self.channel = channel
self.reward = reward
self.name = name
class gameEnv():
def __init__(self, partial, size):
self.sizeX = size
self.sizeY = size
self.actions = 4
self.objects = []
self.partial = partial
a = self.reset()
# plt.imshow(a, interpolation="nearest")
def reset(self):
self.objects = []
hero = gameOb(self.newPosition(), 1, 1, 2, None, 'hero')
self.objects.append(hero)
bug = gameOb(self.newPosition(), 1, 1, 1, 1, 'goal')
self.objects.append(bug)
hole = gameOb(self.newPosition(), 1, 1, 0, -1, 'fire')
self.objects.append(hole)
bug2 = gameOb(self.newPosition(), 1, 1, 1, 1, 'goal')
self.objects.append(bug2)
hole2 = gameOb(self.newPosition(), 1, 1, 0, -1, 'fire')
self.objects.append(hole2)
bug3 = gameOb(self.newPosition(), 1, 1, 1, 1, 'goal')
self.objects.append(bug3)
bug4 = gameOb(self.newPosition(), 1, 1, 1, 1, 'goal')
self.objects.append(bug4)
state = self.renderEnv()
self.state = state
return state
def moveChar(self, direction):
# 0 - up, 1 - down, 2 - left, 3 - right
hero = self.objects[0]
heroX = hero.x
heroY = hero.y
penalize = 0.
if direction == 0 and hero.y >= 1:
hero.y -= 1
if direction == 1 and hero.y <= self.sizeY - 2:
hero.y += 1
if direction == 2 and hero.x >= 1:
hero.x -= 1
if direction == 3 and hero.x <= self.sizeX - 2:
hero.x += 1
if hero.x == heroX and hero.y == heroY:
penalize = 0.0
self.objects[0] = hero
return penalize
def newPosition(self):
iterables = [range(self.sizeX), range(self.sizeY)]
points = []
for t in itertools.product(*iterables):
points.append(t)
currentPositions = []
for objectA in self.objects:
if (objectA.x, objectA.y) not in currentPositions:
currentPositions.append((objectA.x, objectA.y))
for pos in currentPositions:
points.remove(pos)
location = np.random.choice(range(len(points)), replace=False)
return points[location]
def checkGoal(self):
others = []
for obj in self.objects:
if obj.name == 'hero':
hero = obj
else:
others.append(obj)
ended = False
for other in others:
if hero.x == other.x and hero.y == other.y:
self.objects.remove(other)
if other.reward == 1:
self.objects.append(gameOb(self.newPosition(), 1, 1, 1, 1, 'goal'))
else:
self.objects.append(gameOb(self.newPosition(), 1, 1, 0, -1, 'fire'))
return other.reward, False
if ended == False:
return 0.0, False
def renderEnv(self):
# a = np.zeros([self.sizeY,self.sizeX,3])
a = np.ones([self.sizeY + 2, self.sizeX + 2, 3])
a[1:-1, 1:-1, :] = 0
hero = None
for item in self.objects:
a[item.y + 1:item.y + item.size + 1, item.x + 1:item.x + item.size + 1, item.channel] = item.intensity
if item.name == 'hero':
hero = item
if self.partial == True:
a = a[hero.y:hero.y + 3, hero.x:hero.x + 3, :]
b = scipy.misc.imresize(a[:, :, 0], [84, 84, 1], interp='nearest')
c = scipy.misc.imresize(a[:, :, 1], [84, 84, 1], interp='nearest')
d = scipy.misc.imresize(a[:, :, 2], [84, 84, 1], interp='nearest')
a = np.stack([b, c, d], axis=2)
return a
def step(self, action):
penalty = self.moveChar(action)
reward, done = self.checkGoal()
state = self.renderEnv()
if reward == None:
print(done)
print(reward)
print(penalty)
return state, (reward + penalty), done
else:
return state, (reward + penalty), done
def processState(states):
"""
Returns the game states 84x84x3 in a flattened array of shape (21168,1)
:param states: game states, 84x84x3 array
:return:
"""
return np.reshape(states,[21168]) | {
"repo_name": "gourie/training_RL",
"path": "gridworld.py",
"copies": "1",
"size": "4761",
"license": "bsd-3-clause",
"hash": 397467847107681000,
"line_mean": 33.2589928058,
"line_max": 114,
"alpha_frac": 0.5345515648,
"autogenerated": false,
"ratio": 3.482809070958303,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9509152436460229,
"avg_score": 0.0016416398596145762,
"num_lines": 139
} |
__author__ = 'Axion'
def _check_duplicates_in_path():
"""Check the $PATH environmental variable for duplicates."""
import os
path = os.environ['PATH']
path_list = [piece for piece in path.split(":")]
print('duplicates: {} items'.format(len(path_list)-len(set(path_list))))
path_count = {part: 0 for part in set(path_list)}
# Build new path with only unique path parts
new_path = ''
for i in range(len(path_list)):
if not path_count[path_list[i]]:
new_path += ':{}'.format(path_list[i])
path_count[path_list[i]] += 1
for v in path_count.values():
assert v == 1
print(new_path)
# This is the current (07/04/14) default path
"/Library/Frameworks/Python.framework/Versions/2.7/bin:/Library/Frameworks/Python.framework/Versions/3.4/bin:/opt/local/bin:/opt/local/sbin:/usr/local/bin:/usr/local/sbin:/Library/Frameworks/Python.framework/Versions/2.7/bin:/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/bin:/usr/local/git/bin"
if __name__ == '__main__':
_check_duplicates_in_path() | {
"repo_name": "devinbarry/yellow-worktracker",
"path": "environ_path.py",
"copies": "1",
"size": "1075",
"license": "apache-2.0",
"hash": -3699288535244386300,
"line_mean": 30.6470588235,
"line_max": 293,
"alpha_frac": 0.6344186047,
"autogenerated": false,
"ratio": 3.287461773700306,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4421880378400306,
"avg_score": null,
"num_lines": null
} |
__author__ = "Aymen SHABOU, Nourhene MAALEL"
__license__ = "BSD"
__maintainer__ = "Auymen SHABOU"
__email__ = "aymen.shabou@gmail.com"
__status__ = "Dev"
from setuptools import setup
from setuptools.command.test import test as TestCommand
import io
import os
import sys
import kaggler
here = os.path.abspath(os.path.dirname(__file__))
def read(*filenames, **kwargs):
encoding = kwargs.get('encoding', 'utf-8')
sep = kwargs.get('sep', '\n')
buf = []
for filename in filenames:
with io.open(filename, encoding=encoding) as f:
buf.append(f.read())
return sep.join(buf)
long_description = read('README.md', 'CHANGES.txt')
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
setup(
name='kaggler',
version=kaggler.__version__,
url='https://github.com/aymen82/kaggler-competitions-scripts.git',
license='BSD 3',
author='Aymen SHABOU',
tests_require=['pytest'],
install_requires=[],
cmdclass={'test': PyTest},
author_email='aymen.shabou@gmail.com',
description=' Python framework for kaggle competitions',
long_description=long_description,
packages=['kaggler'],
include_package_data=True,
platforms='any',
test_suite='kaggler.test.test_kaggler',
classifiers=[
'Programming Language :: Python',
'Development Status :: 1 - Beta',
'Natural Language :: English',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: BSD',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Libraries :: Application Frameworks',
],
extras_require={
'testing': ['pytest'],
}
)
| {
"repo_name": "aymen82/kaggler-competitions-scripts",
"path": "setup.py",
"copies": "1",
"size": "1998",
"license": "bsd-3-clause",
"hash": -8572356153580306000,
"line_mean": 26.3698630137,
"line_max": 79,
"alpha_frac": 0.6311311311,
"autogenerated": false,
"ratio": 3.6526508226691043,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47837819537691045,
"avg_score": null,
"num_lines": null
} |
# Copyright (c) 2009-2012 Advanced Micro Devices, Inc.
# Copyright (c) 2012-2013 Mark D. Hill and David A. Wood
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Jason Power, Joel Hestness
import optparse
import os
import sys
from os.path import join as joinpath
import m5
from m5.defines import buildEnv
from m5.objects import *
from m5.util import addToPath, fatal
addToPath('../../gem5/configs/common')
addToPath('../../gem5/configs/ruby')
addToPath('../../gem5/configs/topologies')
addToPath('gpu_protocol')
import GPUConfig
import GPUMemConfig
import Options
import Ruby
import Simulation
from FSConfig import *
from SysPaths import *
from Benchmarks import *
parser = optparse.OptionParser()
Options.addCommonOptions(parser)
Options.addFSOptions(parser)
GPUConfig.addGPUOptions(parser)
GPUMemConfig.addMemCtrlOptions(parser)
#
# Add the ruby specific and protocol specific options
#
Ruby.define_options(parser)
(options, args) = parser.parse_args()
#system options
options.ruby = True
#options.cpu_type = "detailed" #for an OOO core, timing for an inorder core
options.num_cpus = 4
options.cpu_clock = '1.2GHz'
options.l1d_size = "32kB"
options.l1d_assoc = 4
options.l1i_size = "32kB"
options.l1i_assoc = 4
options.l2_size = "1MB"
###########################################################################
#GPU configs
options.gpgpusim_config = "gpu_soc.config"
#We pin graphics memory so graphics accesses should not page fault.
options.access_host_pagetable = True
options.kernel_stats = True
#should be set by the gpgpusim config file anyway
options.clusters = 6
options.cores_per_cluster = 1
options.gpu_core_clock = "425MHz"
options.ctas_per_shader = 8
options.gpu_warp_size = 32
options.gpu_threads_per_core = 1536
options.sc_l1_size = "16kB"
options.sc_l1_assoc = 4
options.sc_l1_buf_depth = 24
options.sc_tl1_size = "16kB"
options.sc_tl1_assoc = 4
options.sc_tl1_buf_depth = 24
options.gpu_l1_pagewalkers = 12
options.gpu_tlb_entries = 8
options.gpu_tlb_assoc = 8
options.gpu_ttlb_entries = 8
options.gpu_ttlb_assoc = 8
options.gpu_num_l2caches = 1
options.sc_l2_size = "256kB"
options.sc_l2_assoc = 8
#options.gpu_l2_resource_stalls = ? default: False
options.pwc_size = "1kB"
options.pwc_assoc = 4
options.pwc_policy = "LRU"
options.flush_kernel_end = True
#gpu memory
options.shMemDelay = 1
#gpu_mem_* options are used in split mode ignoring here
#System memory conifg tbd
options.mem_type = "RubyLPDDR3_1600_x32"
options.total_mem_size = "2112MB"
options.num_dev_dirs = 0
options.num_dirs = 2
if args:
print "Error: script doesn't take any positional arguments"
sys.exit(1)
if buildEnv['TARGET_ISA'] != "arm":
fatal("gem5-gpu : this config works with an arm system!")
#
# CPU type configuration
#
if options.cpu_type != "timing" and options.cpu_type != "detailed":
print "Warning: gem5-gpu only works with timing and detailed CPUs. Defaulting to timing"
options.cpu_type = "timing"
print "Running Ruby with %s CPU model" % options.cpu_type
(CPUClass, test_mem_mode, FutureClass) = Simulation.setCPUClass(options)
#
# Memory space configuration
#
(cpu_mem_range, gpu_mem_range) = GPUConfig.configureMemorySpaces(options)
#
# Setup benchmark to be run
#
if options.benchmark:
try:
bm = Benchmarks[options.benchmark]
except KeyError:
print "Error benchmark %s has not been defined." % options.benchmark
print "Valid benchmarks are: %s" % DefinedBenchmarks
sys.exit(1)
else:
bm = [SysConfig(disk=options.disk_image)]
bm[0].memsize = '%dB' % cpu_mem_range.size()
# Hard code the cache block width to 128B for now
# TODO: Remove this if/when block size can be different than 128B
if options.cacheline_size != 128:
print "Warning: Only block size currently supported is 128B. Defaulting to 128."
options.cacheline_size = 128
#allow host page table accesses to allow GPU to access the timing TLB.
#We pin graphics memory so graphics accesses should not page fault.
options.access_host_pagetable = True
#
# Instantiate system
#
system = makeArmSystem(test_mem_mode, options.machine_type, bm[0],
options.dtb_filename,
bare_metal=False, ruby=True)
if options.enable_context_switch_stats_dump:
test_sys.enable_context_switch_stats_dump = True
# Set the cache line size for the entire system
system.cache_line_size = options.cacheline_size
# Create a top-level voltage domain
system.voltage_domain = VoltageDomain(voltage = options.sys_voltage)
# Create a source clock for the system and set the clock period
system.clk_domain = SrcClockDomain(clock = options.sys_clock,
voltage_domain = system.voltage_domain)
# Create a CPU volatage domain
system.cpu_voltage_domain = VoltageDomain()
# Create a source clock for the CPUs and set the clock peroid
system.cpu_clk_domain = SrcClockDomain(clock = options.cpu_clock,
voltage_domain = system.cpu_voltage_domain)
if options.kernel is not None:
system.kernel = binary(options.kernel)
if options.script is not None:
system.readfile = options.script
if options.lpae:
system.have_lpae = True
# Assign all the CPUs to the same clock domain
system.cpu = [CPUClass(cpu_id = i, clk_domain = system.cpu_clk_domain)
for i in xrange(options.num_cpus)]
Simulation.setWorkCountOptions(system, options)
#
# Create the GPU
#
system.gpu = GPUConfig.createGPU(options, gpu_mem_range)
# Create the appropriate memory controllers and connect them to the
# PIO bus
system.mem_ctrls = [SimpleMemory(range = r) for r in system.mem_ranges]
for i in xrange(len(system.mem_ctrls)):
system.mem_ctrls[i].port = system.iobus.master
#
# Setup Ruby
#
system.ruby_clk_domain = SrcClockDomain(clock = options.ruby_clock,
voltage_domain = system.voltage_domain)
Ruby.create_system(options, system, system.iobus, system._dma_ports)
system.gpu.ruby = system.ruby
system.ruby.clk_domain = system.ruby_clk_domain
#
# Connect CPU ports
#
for (i, cpu) in enumerate(system.cpu):
ruby_port = system.ruby._cpu_ports[i]
cpu.clk_domain = system.cpu_clk_domain
cpu.createThreads()
cpu.createInterruptController()
#
# Tie the cpu ports to the correct ruby system ports
#
cpu.icache_port = system.ruby._cpu_ports[i].slave
cpu.dcache_port = system.ruby._cpu_ports[i].slave
if buildEnv['TARGET_ISA'] == "arm":
cpu.itb.walker.port = system.ruby._cpu_ports[i].slave
cpu.dtb.walker.port = system.ruby._cpu_ports[i].slave
else:
fatal("Not sure how to connect TLB walker ports in non-x86 system!")
system.ruby._cpu_ports[i].access_phys_mem = True
#
# Connect GPU ports
#
GPUConfig.connectGPUPorts(system.gpu, system.ruby, options)
GPUMemConfig.setDRAMMemoryControlOptions(system, options)
#
# Finalize setup and run
#
root = Root(full_system = True, system = system)
if options.timesync:
root.time_sync_enable = True
if options.frame_capture:
VncServer.frame_capture = True
#m5.disableAllListeners()
Simulation.run(options, root, system, FutureClass)
| {
"repo_name": "ayoubg/gem5-graphics",
"path": "gem5-gpu/configs/soc_arm.py",
"copies": "1",
"size": "8668",
"license": "bsd-3-clause",
"hash": -6888815840372322000,
"line_mean": 29.4140350877,
"line_max": 92,
"alpha_frac": 0.7249653899,
"autogenerated": false,
"ratio": 3.372762645914397,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9571266572395786,
"avg_score": 0.005292292683722205,
"num_lines": 285
} |
__author__ = 'azad'
def ComputeResult(categories , booleanAns):
result = []
color = []
category = list(set(categories))
print category
assoc_answer=zip(categories,booleanAns)
print assoc_answer
for y in category:
# print y
# print y==assoc_answer[0][0]
total = len([x for x in assoc_answer if x[0]==y])
# total = len([x for x in str(assoc_answer) if x[0]==y])
total_correct = len([x for x in assoc_answer if x[0]==y and x[1]==1] )
# print total
percent = total_correct* 100.0/float(total)
if (percent > 50):
color='success'
else:
color='danger'
result.append((y,percent,color))
return result
from app.models import *
class ResultProcessor():
def __init__(self, userId ):
self.userID = userId
def get_user_answer(self, answer_list):
self.answer_list =answer_list
self.ids = [x[0] for x in answer_list]
self.user_ans = [int(x[1]) for x in answer_list]
self.correct_ans = [Question.query.filter_by(questionID=x).first().correctAnswer for x in self.ids ]
self.categoriesID = [Question.query.filter_by(questionID=x).first().categoryID for x in self.ids ]
self.cmpans = [ x[0] == x[1] for x in zip(self.correct_ans,self.user_ans)]
self.categoriesname = [Category.query.filter_by(ID=x).first().category for x in self.categoriesID ]
# print self.correct_ans
#
# print self.user_ans
#
# print self.cmpans
return ComputeResult(self.categoriesname , self.cmpans)
def update_database(self):
self.solve_ans = [SolveProblems.query.filter_by(userID=self.userID).first().correctAnswer for x in self.ids ]
| {
"repo_name": "saifulazad/myApp",
"path": "app/ResultProcessor.py",
"copies": "1",
"size": "1780",
"license": "bsd-3-clause",
"hash": 7919570361682186000,
"line_mean": 29.1694915254,
"line_max": 118,
"alpha_frac": 0.6039325843,
"autogenerated": false,
"ratio": 3.4165067178502877,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45204393021502876,
"avg_score": null,
"num_lines": null
} |
__author__ = 'azad'
import os
from flask import Flask, request, redirect, url_for
from werkzeug import secure_filename
UPLOAD_FOLDER = '/home/azad/myApp/app/static/img'
ALLOWED_EXTENSIONS = set(['png', 'jpeg', 'jpg'])
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
@app.route("/", methods=['GET', 'POST'])
def index():
if request.method == 'POST':
file = request.files['file']
print file
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
return redirect(url_for('index'))
return """
<!doctype html>
<title>Upload new File</title>
<h1>Upload new File</h1>
<form action="" method=post enctype=multipart/form-data>
<p><input type=file name=file>
<input type=submit value=Upload>
</form>
<p>%s</p>
""" % "<br>".join(os.listdir(app.config['UPLOAD_FOLDER'],))
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5001, debug=True) | {
"repo_name": "saifulazad/myApp",
"path": "app/uploa.py",
"copies": "1",
"size": "1184",
"license": "bsd-3-clause",
"hash": 5497421564379432000,
"line_mean": 31.027027027,
"line_max": 74,
"alpha_frac": 0.6148648649,
"autogenerated": false,
"ratio": 3.3925501432664755,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9484828309970021,
"avg_score": 0.0045173396392908585,
"num_lines": 37
} |
__author__ = 'azenk'
from math import log
class Controller(object):
"""
This class models a raid controller or hba
"""
def __init__(self):
# Enclosure dict, keys are enclosure ids
self._enclosures = dict()
# Array Dict, keys are array_id
self._arrays = dict()
def enclosures(self):
"""
Iterator over all enclosures connected to this controller
"""
for enclosure_id, enclosure in iter(sorted(self._enclosures.iteritems())):
yield enclosure
def arrays(self):
for id, array in iter(sorted(self._arrays.iteritems())):
yield array
def drives(self):
"""
Iterator over all drives connected to this controller
"""
for enclosure in self.enclosures():
for drive in enclosure.drives():
yield drive
def create_array(self,diskarray):
"""
Creates an array on the controller. This method should be overridden by each controller type
:param diskarray an instance of the DiskArray object that the controller will attempt to create
"""
pass
def create_global_hotspare(self,drive):
"""
Makes the specified drive into a global hotspare
"""
pass
class DiskArray(object):
"""
This class models a storage array. A collection of disks
combined using some sort of raid technique.
"""
def __init__(self):
self._array_id = None
self._drives = []
self._raid_level = None
@property
def raid_level(self):
return self._raid_level
@raid_level.setter
def raid_level(self, value):
self._raid_level = value
@property
def array_id(self):
return self._array_id
@array_id.setter
def array_id(self, value):
self._array_id = value
def add_drive(self, drive):
self._drives.append(drive)
@property
def drive_count(self):
return len(self._drives)
def drives(self):
for drive in self._drives:
yield drive
def __str__(self):
disks = map(lambda x: "{0}:{1}".format(x.enclosure.enclosure_id,x.slot_number),self._drives)
return "Array {0} {1}".format(self.array_id,disks)
class Enclosure(object):
"""
This class models a disk enclosure. Typically this actually
corresponds with one backplane.
"""
def __init__(self,enclosure_id=0):
self.enclosure_id = enclosure_id
# drives, keys are slot ids
self._drives = dict()
self._slots = None
@property
def enclosure_id(self):
return self._enclosure_id
@enclosure_id.setter
def enclosure_id(self, value):
if isinstance(value,basestring):
value = int(value)
self._enclosure_id = value
@property
def slots(self):
return self._slots
@slots.setter
def slots(self, value):
#print("Setting number of slots {0}".format(value))
self._slots = value
def add_drive(self,drive):
slot_id = drive.slot_number
self._drives[slot_id] = drive
def drive(self, slot_id):
if self._drives.has_key(slot_id):
return self._drives[slot_id]
else:
return None
def drives(self):
"""
Iterates over the drives that are connected to
this enclosure
"""
for slot_number,drive in iter(sorted(self._drives.iteritems())):
yield drive
def __str__(self):
return "Enclosure {0}: {1} drives {2} slots".format(self.enclosure_id,len(self._drives),self.slots)
class Drive(object):
"""
This class models a drive.
"""
def __init__(self,enclosure=None):
self._enclosure = enclosure
self._serial_number = None
self._manufacturer = None
self._model_number = None
self._wwn = None
self._predictive_failure_count = None
self._other_errors = None
self._media_errors = None
self._raw_size_bytes = None
self._coerced_size_bytes = None
self._temperature_c = None
@property
def raw_size(self):
return self._raw_size_bytes
@raw_size.setter
def raw_size(self, value):
self._raw_size_bytes = value
@property
def coerced_size(self):
return self._coerced_size_bytes
@coerced_size.setter
def coerced_size(self, value):
self._coerced_size_bytes = value
@property
def temperature(self):
return self._temperature_c
@temperature.setter
def temperature(self, value):
self._temperature_c = value
@property
def other_errors(self):
return self._other_errors
@other_errors.setter
def other_errors(self, value):
self._other_errors = value
@property
def media_errors(self):
return self._media_errors
@media_errors.setter
def media_errors(self, value):
self._media_errors = value
@property
def slot_number(self):
"""
The number of the slot in the enclosure that this disk occupies
"""
return self._slot_number
@slot_number.setter
def slot_number(self, value):
if isinstance(value,basestring):
value = int(value)
self._slot_number = value
@property
def serial_number(self):
return self._serial_number
@serial_number.setter
def serial_number(self, value):
self._serial_number = value
@property
def manufacturer(self):
return self._manufacturer
@manufacturer.setter
def manufacturer(self, value):
self._manufacturer = value
@property
def wwn(self):
return self._wwn
@wwn.setter
def wwn(self, value):
self._wwn = value
@property
def model_number(self):
return self._model_number
@model_number.setter
def model_number(self, value):
self._model_number = value
@property
def status(self):
try:
return self._status
except:
return "Unknown"
@property
def spunup(self):
try:
return self._spunup
except:
return None
@spunup.setter
def spunup(self, value):
self._spunup = value
@status.setter
def status(self, value):
self._status = value
@property
def enclosure(self):
return self._enclosure
@property
def predictive_failure_count(self):
return self._predictive_failure_count
@predictive_failure_count.setter
def predictive_failure_count(self, value):
self._predictive_failure_count = value
@property
def health(self):
"""
:return: A weighted drive health score
"""
if self.status not in ["Online", "Unconfigured(good)", "Hotspare"]:
if self.status in ["Unconfigured(bad)", "Rebuild"]:
health_score = 50.0
else:
health_score = 0
else:
health_score = 100
if self.media_errors is not None:
health_score += -15 * log(self.media_errors + 1)
#if self.other_errors is not None:
# Supposedly these errors are not disk related
#health_score += -15 * log(self.other_errors + 1)
if self.predictive_failure_count is not None:
health_score += -60 * log(self.predictive_failure_count + 1)
return max(0.0,health_score)
def __str__(self):
return ("Drive {drive.manufacturer} {drive.model_number} {drive.serial_number} " +
"{drive.enclosure.enclosure_id: >3}:{drive.slot_number: <3} " +
"{drive.health: >05.2f} {drive.status:10} {drive.raw_size: >012d} " +
"me:{drive.media_errors:4} oe:{drive.other_errors:4} pfc:{drive.predictive_failure_count:4} " +
"Spun up? {drive.spunup}").format(drive=self)
| {
"repo_name": "azenk/linuxdisktools",
"path": "disktools/base.py",
"copies": "1",
"size": "8018",
"license": "cc0-1.0",
"hash": -4261677775385076000,
"line_mean": 25.2026143791,
"line_max": 119,
"alpha_frac": 0.5824395111,
"autogenerated": false,
"ratio": 3.94392523364486,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.502636474474486,
"avg_score": null,
"num_lines": null
} |
from django.conf.urls import url
from django.conf.urls import handler404, handler500
from . import views
from jaspar import settings
from django.views.static import serve
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^search/?$', views.search, name='search'),
url(r'^docs/$', views.documentation, name='documentation'),
url(r'^tools/$', views.tools, name='tools'),
url(r'^contact-us/?$', views.contact_us, name='contact_us'),
url(r'^about/$', views.about, name='about'),
url(r'^faq/$', views.faq, name='faq'),
url(r'^changelog/$', views.changelog, name='changelog'),
#API documentation
url(r'^api/$', views.api_documentation, name='api_documentation'),
url(r'^inference/?$', views.profile_inference, name='profile_inference'),
url(r'^align/?$', views.matrix_align, name='matrix_align'),
url(r'^analysis/?$', views.analysis, name='analysis'),
url(r'^profile-versions/?$', views.profile_versions, name='profile_versions'),
url(r'^sites/(?P<matrix_id>.+)/$', views.html_binding_sites, name='html_binding_sites'),
url(r'^matrix/(?P<matrix_id>[\w.]+)/$', views.matrix_detail, name='matrix_detail'),
url(r'^matrix/(?P<base_id>\w+)/versions/$', views.matrix_versions, name='matrix_versions'),
url(r'^matrix/(?P<matrix_id>[\w.]+)/svg/$', views.svg_logo, name='svg_logo'),
url(r'^collection/(?P<collection>\w+)/$', views.browse_collection, name='browse_collection'),
url(r'^cart/$', views.view_cart, name='view_cart'),
url(r'^cart/empty$', views.empty_cart, name='empty_cart'),
url(r'^matrix-clusters/$', views.matrix_clustering, name='matrix_clustering'),
url(r'^matrix-clusters/(?P<tax_group>\w+)/$', views.radial_tree, name='radial_tree'),
url(r'^genome-tracks/$', views.genome_tracks, name='genome_tracks'),
#url redirection
url(r'^cgi-bin/jaspar_db.pl?$', views.url_redirection, name='url_redirection'),
url(r'^html/DOWNLOAD/?$', views.url_redirection, name='url_redirection'),
#url(r'^news/(?P<slug>[\w-]+)/$', views.news_and_updates, name='news_and_updates'),
url(r'^blog/(?P<year>[0-9]{4})/(?P<month>[0-9]{2})/(?P<day>[0-9]{2})/(?P<slug>[\w-]+)$', views.post_details, name='post_details'),
url(r'^blog/$', views.post_list, name='post_list'),
url(r'^tour/$', views.tour_video, name='tour_video'),
url(r'^downloads/$', views.download_data, name='download_data'),
#enable this url to create zip/txt files for downloads page
#url(r'^downloads-internal/$', views.internal_download_data, name='internal_download_data'),
url(r'^temp/(?P<path>.*)$', serve, {'document_root': settings.TEMP_DIR}),
url(r'^download/(?P<path>.*)$', serve, {'document_root': settings.DOWNLOAD_DIR, 'show_indexes': True,}),
]
handler404 = 'portal.views.page_not_found'
handler500 = 'portal.views.server_error'
| {
"repo_name": "asntech/jaspar",
"path": "portal/urls.py",
"copies": "1",
"size": "2967",
"license": "bsd-3-clause",
"hash": -3478099893597289000,
"line_mean": 42.6323529412,
"line_max": 134,
"alpha_frac": 0.6423997304,
"autogenerated": false,
"ratio": 3.2320261437908497,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.437442587419085,
"avg_score": null,
"num_lines": null
} |
from django.contrib import admin
from .models import Matrix, MatrixAnnotation, MatrixData, Tffm, Post
class MatrixAdmin(admin.ModelAdmin):
list_display = ('base_id', 'id','name','collection','version',)
search_fields = ['collection', 'id', 'name', 'base_id']
list_filter = ('collection',)
admin.site.register(Matrix, MatrixAdmin)
class MatrixAnnotationAdmin(admin.ModelAdmin):
list_display = ('matrix_id', 'tag','val',)
search_fields = ['tag', 'val']
list_filter = ('tag',)
#admin.site.register(MatrixAnnotation, MatrixAnnotationAdmin)
class MatrixDataAdmin(admin.ModelAdmin):
list_display = ('matrix_id', 'row','col','val',)
search_fields = ['row', 'col']
#admin.site.register(MatrixData, MatrixDataAdmin)
class TffmAdmin(admin.ModelAdmin):
list_display = ('base_id', 'name','matrix_base_id','matrix_version',)
search_fields = ['matrix_base_id', 'base_id','name']
admin.site.register(Tffm, TffmAdmin)
class NewsAndUpdateAdmin(admin.ModelAdmin):
list_display = ('title', 'author','category','date')
search_fields = ['title', 'author','category']
list_filter = ('category','author',)
admin.site.register(Post, NewsAndUpdateAdmin) | {
"repo_name": "asntech/jaspar",
"path": "portal/admin.py",
"copies": "1",
"size": "1254",
"license": "bsd-3-clause",
"hash": 5996565469843459000,
"line_mean": 26.2826086957,
"line_max": 70,
"alpha_frac": 0.7073365231,
"autogenerated": false,
"ratio": 3.142857142857143,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4350193665957143,
"avg_score": null,
"num_lines": null
} |
"""jaspar URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Import the include() function: from django.conf.urls import url, include
3. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.conf import settings
from rest_framework.documentation import include_docs_urls
from rest_framework.schemas import get_schema_view
# from rest_framework_swagger.renderers import OpenAPIRenderer, SwaggerUIRenderer
# schema_view = get_schema_view(
# title='JASPAR API',
# #renderer_classes=[OpenAPIRenderer, SwaggerUIRenderer]
# )
from rest_framework_swagger.views import get_swagger_view
schema_view = get_swagger_view(title='JASPAR REST Live API')
from django.contrib.sitemaps.views import sitemap
from .sitemaps import StaticViewSitemap
sitemaps = {
'static': StaticViewSitemap,
}
admin.site.site_header = 'JASPAR Admin'
urlpatterns = [
url(r'^', include('portal.urls')),
url(r'^admin/', admin.site.urls),
url(r'^api/v1/', include('restapi.v1.urls', namespace='v1')),
#url(r'^api/current/', include('restapi.v1.urls', namespace='current')),
url(r'^$', schema_view),
#url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^api/v1/doc/', include_docs_urls(title='JASPAR RESTful API')),
url(r'^api/v1/live/', schema_view),
url(r'^sitemap\.xml$', sitemap, {'sitemaps': sitemaps},
name='django.contrib.sitemaps.views.sitemap'),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
| {
"repo_name": "asntech/jaspar",
"path": "jaspar/urls.py",
"copies": "1",
"size": "2251",
"license": "bsd-3-clause",
"hash": 2784801447216899000,
"line_mean": 30.2638888889,
"line_max": 84,
"alpha_frac": 0.6974677921,
"autogenerated": false,
"ratio": 3.3397626112759644,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4537230403375964,
"avg_score": null,
"num_lines": null
} |
__author__ = 'azo0502'
# import Raspberry Pi GPIO support into Python environment
import RPi.GPIO as GPIO
# import a sleep function from time module
from time import sleep
led = 18 # GPIO number where the led is connected
# Tell the GPIO module to use GPIO numbering used by processor
GPIO.setmode(GPIO.BCM)
# Set GPIO no 18 to output mode
GPIO.setup(led, GPIO.OUT)
__author__ = 'azo0502'
# make_server is used to create this simple python webserver
from wsgiref.simple_server import make_server
# Function that is ran when a http request comes in
def simple_app(env, start_response):
# set some http headers that are sent to the browser
status = '200 OK'
headers = [('Content-type', 'text/plain')]
start_response(status, headers)
# What did the user ask for?
if env["PATH_INFO"] == "/on":
print("user asked for /on")
GPIO.output(led, False)
elif env["PATH_INFO"] == "/off":
print("user asked for /off")
GPIO.output(led, True)
else:
print("user asked for something else")
return "Hello world!"
# Create a small python server
httpd = make_server("", 8000, simple_app)
print "Serving on port 8000..."
print "You can open this in the browser http://192.168.1.xxx:8000 where xxx is your rpi ip aadress"
print "Or if you run this server on your own computer then http://localhost:8000"
httpd.serve_forever() | {
"repo_name": "TonyMcTony/raspberry_pi_led",
"path": "server_led.py",
"copies": "1",
"size": "1400",
"license": "mit",
"hash": 1922082169256376000,
"line_mean": 30.8409090909,
"line_max": 99,
"alpha_frac": 0.6864285714,
"autogenerated": false,
"ratio": 3.626943005181347,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48133715765813473,
"avg_score": null,
"num_lines": null
} |
__author__ = 'azo0502'
# import Raspberry Pi GPIO support into Python environment
import RPi.GPIO as GPIO
# import a sleep function from time module
from time import sleep
red_led = 10 # GPIO number where the red led channel is connected
green_led = 22 # GPIO number where the green led channel is connected
blue_led = 27 # GPIO number where the blue led channel is connected
# Tell the GPIO module to use GPIO numbering used by processor
GPIO.setmode(GPIO.BCM)
# Set GPIO no 18 to output mode
GPIO.setup(led, GPIO.OUT)
__author__ = 'azo0502'
# make_server is used to create this simple python webserver
from wsgiref.simple_server import make_server
# Function that is ran when a http request comes in
def simple_app(env, start_response):
# set some http headers that are sent to the browser
status = '200 OK'
headers = [('Content-type', 'text/plain')]
start_response(status, headers)
# What did the user ask for?
if env["PATH_INFO"] == "/red":
print("user asked for red light")
GPIO.output(red_led, True)
elif env["PATH_INFO"] == "/blue":
print("user asked for blue light")
GPIO.output(blue_led, True)
elif env["PATH_INFO"] == "/green":
print("user asked for green light")
GPIO.output(green_led, True)
elif env["PATH_INFO"] == "/cyan":
print("user asked for cyan light")
GPIO.output(blue_led, True)
GPIO.output(green_led, True)
elif env["PATH_INFO"] == "/yellow":
print("user asked for yellow light")
GPIO.output(green_led, True)
GPIO.output(red_led, True)
elif env["PATH_INFO"] == "/magenta":
print("user asked for magenta light")
GPIO.output(blue_led, True)
GPIO.output(red_led, True)
elif env["PATH_INFO"] == "/white":
print("user asked for white light")
GPIO.output(green_led, True)
GPIO.output(red_led, True)
GPIO.output(blue_led, True)
else:
print("user asked for something else")
return "Hello world!"
# Create a small python server
httpd = make_server("", 8000, simple_app)
print "Serving on port 8000..."
print "You can open this in the browser http://192.168.1.xxx:8000 where xxx is your rpi ip aadress"
print "Or if you run this server on your own computer then http://localhost:8000"
httpd.serve_forever() | {
"repo_name": "TonyMcTony/raspberry_pi_led",
"path": "server_rgb_led.py",
"copies": "1",
"size": "2356",
"license": "mit",
"hash": -338882202333692700,
"line_mean": 34.7121212121,
"line_max": 99,
"alpha_frac": 0.6578947368,
"autogenerated": false,
"ratio": 3.658385093167702,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9803296386976773,
"avg_score": 0.0025966885981856974,
"num_lines": 66
} |
__author__ = 'azu'
import random
import numpy as np
from practices.config import SELECTED_FUNCTION, POPULATION_SIZE, NUMBER_OF_GENERATIONS, EPSILON, PRINTING_INTERVAL
from escom.pepo.utils import measure_time
from practices.second.particle_swarm_optimization.pso_fitness import function
from escom.pepo.config import NUMBER_OF_VARIABLES, VARIABLE_RANGE_START, VARIABLE_RANGE_END, VELOCITY_RANGE_START, \
VELOCITY_RANGE_END, \
logger
class Particle:
def __init__(self):
self.position = generate_random_array(NUMBER_OF_VARIABLES, VARIABLE_RANGE_START, VARIABLE_RANGE_END)
self.velocity = generate_random_array(NUMBER_OF_VARIABLES, VELOCITY_RANGE_START, VELOCITY_RANGE_END)
self.fitness = evaluate(self.position)
self.best = self.position + 0
def __str__(self):
return "Position: " + str(self.position) + " Velocity: " + str(self.velocity) + " Fitness: " + str(
self.fitness) + " Best: " + str(self.best)
def move_position(self, best_of_all):
self.position += self.velocity
self.velocity = self.velocity + [x * random.random() for x in (self.best - self.position)] + [
x * random.random() for x in (best_of_all - self.position)]
new_fitness = evaluate(self.position)
if new_fitness < evaluate(self.best):
self.best = self.position + 0
self.fitness = new_fitness
def generate_random_array(size, start, end):
return np.asarray([random.random() * (end - start) + start for i in range(size)])
def evaluate(position):
return function[SELECTED_FUNCTION](position)
def get_best_of_all(population, actual_best):
# Get fitnesses for each best particle position
fitness = [evaluate(p.best) for p in population]
# Get best position [x,y,...] in actual population
new_best = population.__getitem__(fitness.index(min(fitness))).best
if evaluate(new_best) < evaluate(actual_best): # If there's a new "best position ever"
return new_best
return actual_best
def generate_population():
population = []
for i in range(POPULATION_SIZE):
particle = Particle()
# logger.debug("%s", particle)
population.append(particle)
return population
@measure_time
def start_pso():
logger.info("*" * 80)
logger.info("*" * 80)
logger.info("Particle Swarm Optimization")
logger.info("*" * 80)
logger.info("Number of Generations: %s", NUMBER_OF_GENERATIONS)
logger.info("Population Size: %s", POPULATION_SIZE)
logger.info("Function: %s", SELECTED_FUNCTION)
logger.info("Epsilon: %s", EPSILON)
logger.info("*" * 80)
logger.info("*" * 80)
# Generate population[] of Particle()
population = generate_population()
# Get the best position in population
best_of_all = get_best_of_all(population, population[0].best)
logger.info("Best: %s Fitness: %s", best_of_all, evaluate(best_of_all))
for y in range(NUMBER_OF_GENERATIONS):
if abs(evaluate(best_of_all)) < EPSILON: # Stop condition
logger.info("*" * 80)
logger.info("Best solution found at %s generation.", y)
logger.info("Best of all: %s Fitness: %s", str(best_of_all), str(evaluate(best_of_all)))
logger.info("*" * 80)
break
for particle in population:
# Move each particle using its velocity
particle.move_position(best_of_all)
# Update best position ever in population's history
best_of_all = get_best_of_all(population, best_of_all)
# logger,info("%s", particle)
if y % PRINTING_INTERVAL == 0:
logger.debug("-" * 80)
logger.debug("Generation: %s", y)
logger.debug("Best of all: %s Fitness: %s", str(best_of_all), str(evaluate(best_of_all)))
logger.debug("-" * 80)
logger.info("*" * 80)
logger.info("No better solution found. ")
logger.info("Best solution at %s generation", NUMBER_OF_GENERATIONS)
logger.info("Best of all: %s Fitness: %s", str(best_of_all), str(evaluate(best_of_all)))
logger.info("*" * 80)
return population
if __name__ == '__main__':
result = start_pso() | {
"repo_name": "pepo27/EvolutionaryComputing",
"path": "practices/second/particle_swarm_optimization/pso_evaluation.py",
"copies": "2",
"size": "4196",
"license": "apache-2.0",
"hash": 3317765030147564500,
"line_mean": 37.1545454545,
"line_max": 116,
"alpha_frac": 0.6391801716,
"autogenerated": false,
"ratio": 3.482157676348548,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5121337847948548,
"avg_score": null,
"num_lines": null
} |
__author__ = 'b03418'
import sys
try:
from functools import partial
except ImportError: # python < 2.5
class partial(object): #Simple partial object to replace
def __init__(self, func, *args, **keywords):
self.func = func
self.args = args
self.keywords = keywords
def __call__(self, *callargs, **callkeys):
keywords = self.keywords.copy()
keywords.copy(callkeys)
return self.func(*(self.args + callargs), **callkeys)
class Decorator():
pass
# TODO: Decorate this one with @decorator annotation
def do_nothing(func, *args, **kwargs):
func(args, kwargs)
#TODO: Decorate this one with @decorator annotation
def redirect_flow(old_flow=sys.stdout, new_flow=sys.stdout):
def call(func, *args, **kwargs):
_old_flow = old_flow
old_flow = new_flow
try: #execute function
result = func(args, kwargs)
finally: #always ensure that old_flow is redirected when it was originally
old_flow = _old_flow
return result
return decorator(call)
#TODO: Decorate this one with @decorator annotation
def redirect_stdout(new_stdout):
redirect_flow(None, new_stdout)
#TODO: Decorate this one with @decorator annotation
def meonize(func, *args, **kwargs):
dic = getattr(func, "_meonize_dict_", dict)
if args in dic:
return dic[args]
result = func(*args)
dic[args] = result
return result
#TODO: Decorate this one with @decorator annotation
def trace(f, *args, **kwargs):
print("calling %s with arguments %s,%s" % (f.func_name, args, kwargs))
f(args, kwargs)
| {
"repo_name": "bossiernesto/python-snipplets",
"path": "decorator.py",
"copies": "1",
"size": "1669",
"license": "bsd-3-clause",
"hash": 4655643276479839000,
"line_mean": 24.6769230769,
"line_max": 83,
"alpha_frac": 0.6309167166,
"autogenerated": false,
"ratio": 3.684326710816777,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4815243427416777,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bagrat'
from setuptools import setup, find_packages
tests_require = ['nose', 'coverage']
install_requires = ['pyflect']
classifiers = ['License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Operating System :: POSIX',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Topic :: Software Development :: Libraries :: Python Modules']
config = {
'description': 'Access file system using objects',
'author': 'Bagrat Aznauryan',
'url': 'git@github.com:n9code/pyerarchy.git',
'download_url': 'git@github.com:n9code/pyerarchy.git',
'author_email': 'bagrat@aznauryan.org',
'version': '0.1',
'install_requires': install_requires,
'tests_require': tests_require,
'classifiers': classifiers,
'packages': find_packages(),
'name': 'pyerarchy',
'license': 'MIT',
'keywords': 'directory file object'
}
setup(**config)
| {
"repo_name": "n9code/pyerarchy",
"path": "setup.py",
"copies": "1",
"size": "1252",
"license": "mit",
"hash": -8266065246034002000,
"line_mean": 32.8378378378,
"line_max": 78,
"alpha_frac": 0.5878594249,
"autogenerated": false,
"ratio": 4.051779935275081,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 37
} |
__author__ = 'bagrat'
def replace_version(content, from_version, to_version):
"""
Replaces the value of the version specification value in the contents of
``contents`` from ``from_version`` to ``to_version``.
:param content: The string containing version specification
:param to_version: The new version to be set
:return: (result_content, number of occurrences)
"""
frm_str = str(from_version)
to_str = str(to_version)
count = content.count(frm_str)
result_setup_py = content.replace(frm_str, to_str, count)
return result_setup_py, count
def update_files(from_version, to_version, files=None):
"""
Update contents of files. See replace_version.
:param from_version: The old version to be replaced.
:param to_version: The new version to be set.
:param files: A list of files, to update versions in.
"""
counts = {}
if not files:
files = ['setup.py']
for filename in files:
with open(filename, 'r') as setup_py:
content = setup_py.read()
new_content, count = replace_version(content, from_version, to_version)
counts[filename] = count
with open(filename, 'w') as setup_py:
setup_py.write(new_content)
return counts
class VersionRollback(object):
# pylint: disable=too-few-public-methods
"""
A rollback object that simple rolls back the updated version.
"""
def __init__(self, old_version, new_version, files=None):
super(VersionRollback, self).__init__()
self._old = old_version
self._new = new_version
self._files = files
def rollback(self):
update_files(self._new, self._old, self._files)
| {
"repo_name": "n9code/pylease",
"path": "pylease/filemgmt.py",
"copies": "1",
"size": "1731",
"license": "mit",
"hash": -7185312423096885000,
"line_mean": 27.3770491803,
"line_max": 83,
"alpha_frac": 0.6331600231,
"autogenerated": false,
"ratio": 3.8127753303964758,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9944959553340348,
"avg_score": 0.000195160031225605,
"num_lines": 61
} |
__author__ = 'bajai'
import bestaplayer
import os
from time import sleep
class GameMaster:
def __init__(self, fichier, player):
self.bot_player = bestaplayer.BestaPlayer(fichier, player)
self.grid = self.bot_player.grille
self.fichier = fichier
def displayGrid(self):
"""
Implements function to display the current grid of play.
"""
os.system('clear')
for line in self.grid:
print ' '.join(line) + '\n'
def updateFileFromGrid(self):
"""
Implements function that update the file after a player give a token.
"""
with open(self.fichier, 'w') as fi:
for line in self.grid:
fi.write(line)
def playAShot(self, col, player):
"""
Implements function to play a turn.
:param col: the column player wanna play in.
"""
if self.grid[0][col] != '0':
return False
elif self.grid[5][col] == '0':
li = list(self.grid[5])
li[col] = str(player)
self.grid[5] = ''.join(li)
return True
else:
for x in xrange(6):
if self.grid[x][col] != '0':
li = list(self.grid[x - 1])
li[col] = str(player)
self.grid[x - 1] = ''.join(li)
return True
def checkWinDiagonalRightToLeft(self, player):
"""
Implements function to check if there's a winning antidiagonal.
:param player: which tokens to check.
:return:
"""
x = 3
flag = False
while x < 6:
count = 0
x_int = x
y_int = 6
while x_int >= 0:
if int(self.grid[x_int][y_int]) == player and not flag:
count = 1
flag = True
elif int(self.grid[x_int][y_int]) == player and flag:
count += 1
if count == 4:
return True
else:
count = 0
flag = False
x_int -= 1
y_int -= 1
x += 1
y = 5
flag = False
while y <= 3:
count = 0
x_int = 5
y_int = y
while y_int >= 3 and x_int >= 0:
if int(self.grid[x_int][y_int]) == player and not flag:
count = 1
flag = True
elif int(self.grid[x_int][y_int]) == player and flag:
count += 1
if count == 4:
return True
else:
count = 0
flage = False
x_int -= 1
y_int -= 1
y -= 1
return False
def checkWinDiagonalLeftToRight(self, player):
"""
Implements function to check there is a / winning diagonal.
:param player: which token to check.
:return:
"""
x = 3
flag = False
while x < 6:
count = 0
x_int = x
y_int = 0
while x_int >= 0:
if int(self.grid[x_int][y_int]) == player and not flag:
count = 1
flag = True
elif int(self.grid[x_int][y_int]) == player and flag:
count += 1
if count == 4:
return True
else:
count = 0
flag = False
x_int -= 1
y_int += 1
x += 1
y = 1
flag = False
while y <= 3:
count = 0
x_int = 5
y_int = y
while y_int <= 6 and x_int >= 0:
if int(self.grid[x_int][y_int]) == player and not flag:
count = 1
flag = True
elif int(self.grid[x_int][y_int]) == player and flag:
count += 1
if count == 4:
return True
else:
count = 0
flage = False
x_int -= 1
y_int += 1
y += 1
return False
def resetGridFile(self):
"""
Implements function to reset the file containing grid.
:return:
"""
with open(self.fichier, 'w') as fi:
for x in xrange(6):
fi.write('0000000\n')
def checkWinDiagonals(self, player):
"""
Implements function to check if there's a winning diagonal.
:param player: which token to check
:return:
"""
return self.checkWinDiagonalLeftToRight(player) or self.checkWinDiagonalRightToLeft(player)
def changeColumnInLines(self):
"""
Implements function to transform columns in lines to make tests eaiser.
:return: a reverse matrice
"""
column = []
for y in xrange(7):
col = ''
for x in xrange(6):
col += self.grid[x][y]
column.append(col)
return column
def checkWinColumns(self, player):
"""
Implements function to check if there is a winning column.
:param player: which token to check.
:return: true or false
"""
column = self.changeColumnInLines()
flag = False
for line in column:
count = 0
for car in line:
if int(car) == player and not flag:
count = 1
flag = True
elif int(car) == player and flag:
count += 1
if count == 4:
return True
else:
flag = False
count = 0
return False
def checkWinLines(self, player):
"""
Implements function to check if there is a winning line.
:param player: which token to check.
:return: true or false
"""
flag = False
for line in self.grid:
count = 0
for car in line[:len(line) - 1]:
if int(car) == player and not flag:
count = 1
flag = True
elif int(car) == player and flag:
count += 1
if count == 4:
return True
else:
flag = False
count = 0
return False
def someoneWinGame(self, player):
"""
Implement function to check if someone win.
:param player: token to check
:return: True or False
"""
return self.checkWinColumns(player) or self.checkWinDiagonals(player) or self.checkWinLines(player)
def gridFull(self):
"""
Implement function to check if the grid is full.
:return: True or False
"""
for line in self.grid:
for car in line:
if car == '0':
return False
return True
def playTheGame(self):
"""
Implements main function of the class
"""
self.displayGrid()
player = True
self.bot_player.players = 2, 1
while not (self.someoneWinGame(1) or self.someoneWinGame(2)) or self.gridFull():
if player:
col = input('Choose a column to play : ')
while col > 7 or col < 1:
col = input('Choose a column (number between 1 and 7')
col -= 1
while not self.playAShot(col, 1):
col = input('This column is full, choose another one : ')
player = False
else:
col = self.bot_player.decideColumn()
self.playAShot(col, 2)
sleep(1)
player = True
print 'Bot has played'
self.updateFileFromGrid()
self.displayGrid()
if self.someoneWinGame(1):
print 'You win !'
elif self.someoneWinGame(2):
print 'You loose ...'
else:
print 'No winner, no looser !'
self.resetGridFile()
gm = GameMaster('grille.txt', (2, 1))
gm.playTheGame()
| {
"repo_name": "KeserOner/puissance4",
"path": "gamemaster.py",
"copies": "1",
"size": "8478",
"license": "mit",
"hash": -5057292824000454000,
"line_mean": 28.3356401384,
"line_max": 107,
"alpha_frac": 0.4407879217,
"autogenerated": false,
"ratio": 4.511974454497073,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5452762376197073,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bakalor'
__maintainer__ = "Igor Bakalo"
__email__ = "bigorigor.ua@gmail.com"
__status__ = "Development"
from defusedxml import ElementTree as ET
import re
from dojo.models import Finding
class SpotbugsXMLParser(object):
def __init__(self, filename, test):
bug_patterns = dict()
dupes = dict()
SEVERITY = {
'1': 'High',
'2': 'Medium',
'3': 'Low'
}
tree = ET.parse(filename)
root = tree.getroot()
for pattern in root.findall('BugPattern'):
plain_pattern = re.sub(r'<[b-z/]*?>|<a|</a>|href=', '', ET.tostring(pattern.find('Details'), method='text').decode('utf-8'))
bug_patterns[pattern.get('type')] = plain_pattern
for bug in root.findall('BugInstance'):
desc = ''
for message in bug.itertext():
desc += message
dupe_key = bug.get('instanceHash')
title = bug.find('ShortMessage').text
cwe = bug.get('cweid', default=0)
severity = SEVERITY[bug.get('priority')]
description = desc
mitigation = bug_patterns[bug.get('type')]
impact = 'N/A'
references = 'N/A'
if dupe_key in dupes:
finding = dupes[dupe_key]
else:
finding = Finding(
title=title,
cwe=cwe,
severity=severity,
description=description,
mitigation=mitigation,
impact=impact,
references=references,
test=test,
active=False,
verified=False,
numerical_severity=Finding.get_numerical_severity(severity),
static_finding=True
)
dupes[dupe_key] = finding
self.items = list(dupes.values())
| {
"repo_name": "rackerlabs/django-DefectDojo",
"path": "dojo/tools/spotbugs/parser.py",
"copies": "1",
"size": "1965",
"license": "bsd-3-clause",
"hash": -126224618506014770,
"line_mean": 29.2307692308,
"line_max": 136,
"alpha_frac": 0.4900763359,
"autogenerated": false,
"ratio": 4.1543340380549685,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5144410373954968,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.